python_code
stringlengths 0
187k
| repo_name
stringlengths 8
46
| file_path
stringlengths 6
135
|
---|---|---|
print(
'ERROR: stitch_wrapper not yet compiled. Please run `cd /path/to/tensorbox/utils && make`'
)
| deepfigures-open-master | vendor/tensorboxresnet/tensorboxresnet/utils/stitch_wrapper.py |
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: AnnoList.proto
import sys
_b = sys.version_info[0] < 3 and (lambda x: x
) or (lambda x: x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='AnnoList.proto',
package='protobuf_annolist',
serialized_pb=_b(
'\n\x0e\x41nnoList.proto\x12\x11protobuf_annolist\"B\n\tAttribute\x12\n\n\x02id\x18\x01 \x01(\x05\x12\x0b\n\x03val\x18\x02 \x01(\x05\x12\x0c\n\x04\x66val\x18\x03 \x01(\x02\x12\x0e\n\x06strval\x18\x04 \x01(\t\"\"\n\tIdStrPair\x12\n\n\x02id\x18\x01 \x01(\x05\x12\t\n\x01s\x18\x02 \x01(\t\"j\n\rAttributeDesc\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\n\n\x02id\x18\x02 \x01(\x05\x12\r\n\x05\x64type\x18\x03 \x01(\x05\x12\x30\n\nval_to_str\x18\x04 \x03(\x0b\x32\x1c.protobuf_annolist.IdStrPair\".\n\x11\x41nnoRectAttribute\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x0b\n\x03val\x18\x02 \x01(\t\"\x98\x01\n\x08\x41nnoRect\x12\n\n\x02x1\x18\x01 \x01(\x02\x12\n\n\x02y1\x18\x02 \x01(\x02\x12\n\n\x02x2\x18\x03 \x01(\x02\x12\n\n\x02y2\x18\x04 \x01(\x02\x12\r\n\x05score\x18\x05 \x01(\x02\x12\n\n\x02id\x18\x06 \x01(\x05\x12\x10\n\x08track_id\x18\x0b \x01(\x05\x12/\n\tattribute\x18\x0c \x03(\x0b\x32\x1c.protobuf_annolist.Attribute\"o\n\nAnnotation\x12\x11\n\timageName\x18\x01 \x01(\t\x12)\n\x04rect\x18\x02 \x03(\x0b\x32\x1b.protobuf_annolist.AnnoRect\x12\x10\n\x08imgWidth\x18\x03 \x01(\x05\x12\x11\n\timgHeight\x18\x04 \x01(\x05\"w\n\x08\x41nnoList\x12\x31\n\nannotation\x18\x01 \x03(\x0b\x32\x1d.protobuf_annolist.Annotation\x12\x38\n\x0e\x61ttribute_desc\x18\x02 \x03(\x0b\x32 .protobuf_annolist.AttributeDescB\x0c\x42\nAnnoListPb'
)
)
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
_ATTRIBUTE = _descriptor.Descriptor(
name='Attribute',
full_name='protobuf_annolist.Attribute',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='id',
full_name='protobuf_annolist.Attribute.id',
index=0,
number=1,
type=5,
cpp_type=1,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None
),
_descriptor.FieldDescriptor(
name='val',
full_name='protobuf_annolist.Attribute.val',
index=1,
number=2,
type=5,
cpp_type=1,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None
),
_descriptor.FieldDescriptor(
name='fval',
full_name='protobuf_annolist.Attribute.fval',
index=2,
number=3,
type=2,
cpp_type=6,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None
),
_descriptor.FieldDescriptor(
name='strval',
full_name='protobuf_annolist.Attribute.strval',
index=3,
number=4,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode('utf-8'),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None
),
],
extensions=[],
nested_types=[],
enum_types=[],
options=None,
is_extendable=False,
extension_ranges=[],
oneofs=[],
serialized_start=37,
serialized_end=103,
)
_IDSTRPAIR = _descriptor.Descriptor(
name='IdStrPair',
full_name='protobuf_annolist.IdStrPair',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='id',
full_name='protobuf_annolist.IdStrPair.id',
index=0,
number=1,
type=5,
cpp_type=1,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None
),
_descriptor.FieldDescriptor(
name='s',
full_name='protobuf_annolist.IdStrPair.s',
index=1,
number=2,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode('utf-8'),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None
),
],
extensions=[],
nested_types=[],
enum_types=[],
options=None,
is_extendable=False,
extension_ranges=[],
oneofs=[],
serialized_start=105,
serialized_end=139,
)
_ATTRIBUTEDESC = _descriptor.Descriptor(
name='AttributeDesc',
full_name='protobuf_annolist.AttributeDesc',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name',
full_name='protobuf_annolist.AttributeDesc.name',
index=0,
number=1,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode('utf-8'),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None
),
_descriptor.FieldDescriptor(
name='id',
full_name='protobuf_annolist.AttributeDesc.id',
index=1,
number=2,
type=5,
cpp_type=1,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None
),
_descriptor.FieldDescriptor(
name='dtype',
full_name='protobuf_annolist.AttributeDesc.dtype',
index=2,
number=3,
type=5,
cpp_type=1,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None
),
_descriptor.FieldDescriptor(
name='val_to_str',
full_name='protobuf_annolist.AttributeDesc.val_to_str',
index=3,
number=4,
type=11,
cpp_type=10,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None
),
],
extensions=[],
nested_types=[],
enum_types=[],
options=None,
is_extendable=False,
extension_ranges=[],
oneofs=[],
serialized_start=141,
serialized_end=247,
)
_ANNORECTATTRIBUTE = _descriptor.Descriptor(
name='AnnoRectAttribute',
full_name='protobuf_annolist.AnnoRectAttribute',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name',
full_name='protobuf_annolist.AnnoRectAttribute.name',
index=0,
number=1,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode('utf-8'),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None
),
_descriptor.FieldDescriptor(
name='val',
full_name='protobuf_annolist.AnnoRectAttribute.val',
index=1,
number=2,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode('utf-8'),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None
),
],
extensions=[],
nested_types=[],
enum_types=[],
options=None,
is_extendable=False,
extension_ranges=[],
oneofs=[],
serialized_start=249,
serialized_end=295,
)
_ANNORECT = _descriptor.Descriptor(
name='AnnoRect',
full_name='protobuf_annolist.AnnoRect',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='x1',
full_name='protobuf_annolist.AnnoRect.x1',
index=0,
number=1,
type=2,
cpp_type=6,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None
),
_descriptor.FieldDescriptor(
name='y1',
full_name='protobuf_annolist.AnnoRect.y1',
index=1,
number=2,
type=2,
cpp_type=6,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None
),
_descriptor.FieldDescriptor(
name='x2',
full_name='protobuf_annolist.AnnoRect.x2',
index=2,
number=3,
type=2,
cpp_type=6,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None
),
_descriptor.FieldDescriptor(
name='y2',
full_name='protobuf_annolist.AnnoRect.y2',
index=3,
number=4,
type=2,
cpp_type=6,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None
),
_descriptor.FieldDescriptor(
name='score',
full_name='protobuf_annolist.AnnoRect.score',
index=4,
number=5,
type=2,
cpp_type=6,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None
),
_descriptor.FieldDescriptor(
name='id',
full_name='protobuf_annolist.AnnoRect.id',
index=5,
number=6,
type=5,
cpp_type=1,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None
),
_descriptor.FieldDescriptor(
name='track_id',
full_name='protobuf_annolist.AnnoRect.track_id',
index=6,
number=11,
type=5,
cpp_type=1,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None
),
_descriptor.FieldDescriptor(
name='attribute',
full_name='protobuf_annolist.AnnoRect.attribute',
index=7,
number=12,
type=11,
cpp_type=10,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None
),
],
extensions=[],
nested_types=[],
enum_types=[],
options=None,
is_extendable=False,
extension_ranges=[],
oneofs=[],
serialized_start=298,
serialized_end=450,
)
_ANNOTATION = _descriptor.Descriptor(
name='Annotation',
full_name='protobuf_annolist.Annotation',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='imageName',
full_name='protobuf_annolist.Annotation.imageName',
index=0,
number=1,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode('utf-8'),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None
),
_descriptor.FieldDescriptor(
name='rect',
full_name='protobuf_annolist.Annotation.rect',
index=1,
number=2,
type=11,
cpp_type=10,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None
),
_descriptor.FieldDescriptor(
name='imgWidth',
full_name='protobuf_annolist.Annotation.imgWidth',
index=2,
number=3,
type=5,
cpp_type=1,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None
),
_descriptor.FieldDescriptor(
name='imgHeight',
full_name='protobuf_annolist.Annotation.imgHeight',
index=3,
number=4,
type=5,
cpp_type=1,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None
),
],
extensions=[],
nested_types=[],
enum_types=[],
options=None,
is_extendable=False,
extension_ranges=[],
oneofs=[],
serialized_start=452,
serialized_end=563,
)
_ANNOLIST = _descriptor.Descriptor(
name='AnnoList',
full_name='protobuf_annolist.AnnoList',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='annotation',
full_name='protobuf_annolist.AnnoList.annotation',
index=0,
number=1,
type=11,
cpp_type=10,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None
),
_descriptor.FieldDescriptor(
name='attribute_desc',
full_name='protobuf_annolist.AnnoList.attribute_desc',
index=1,
number=2,
type=11,
cpp_type=10,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None
),
],
extensions=[],
nested_types=[],
enum_types=[],
options=None,
is_extendable=False,
extension_ranges=[],
oneofs=[],
serialized_start=565,
serialized_end=684,
)
_ATTRIBUTEDESC.fields_by_name['val_to_str'].message_type = _IDSTRPAIR
_ANNORECT.fields_by_name['attribute'].message_type = _ATTRIBUTE
_ANNOTATION.fields_by_name['rect'].message_type = _ANNORECT
_ANNOLIST.fields_by_name['annotation'].message_type = _ANNOTATION
_ANNOLIST.fields_by_name['attribute_desc'].message_type = _ATTRIBUTEDESC
DESCRIPTOR.message_types_by_name['Attribute'] = _ATTRIBUTE
DESCRIPTOR.message_types_by_name['IdStrPair'] = _IDSTRPAIR
DESCRIPTOR.message_types_by_name['AttributeDesc'] = _ATTRIBUTEDESC
DESCRIPTOR.message_types_by_name['AnnoRectAttribute'] = _ANNORECTATTRIBUTE
DESCRIPTOR.message_types_by_name['AnnoRect'] = _ANNORECT
DESCRIPTOR.message_types_by_name['Annotation'] = _ANNOTATION
DESCRIPTOR.message_types_by_name['AnnoList'] = _ANNOLIST
Attribute = _reflection.GeneratedProtocolMessageType(
'Attribute',
(_message.Message,),
dict(
DESCRIPTOR=_ATTRIBUTE,
__module__='AnnoList_pb2'
# @@protoc_insertion_point(class_scope:protobuf_annolist.Attribute)
)
)
_sym_db.RegisterMessage(Attribute)
IdStrPair = _reflection.GeneratedProtocolMessageType(
'IdStrPair',
(_message.Message,),
dict(
DESCRIPTOR=_IDSTRPAIR,
__module__='AnnoList_pb2'
# @@protoc_insertion_point(class_scope:protobuf_annolist.IdStrPair)
)
)
_sym_db.RegisterMessage(IdStrPair)
AttributeDesc = _reflection.GeneratedProtocolMessageType(
'AttributeDesc',
(_message.Message,),
dict(
DESCRIPTOR=_ATTRIBUTEDESC,
__module__='AnnoList_pb2'
# @@protoc_insertion_point(class_scope:protobuf_annolist.AttributeDesc)
)
)
_sym_db.RegisterMessage(AttributeDesc)
AnnoRectAttribute = _reflection.GeneratedProtocolMessageType(
'AnnoRectAttribute',
(_message.Message,),
dict(
DESCRIPTOR=_ANNORECTATTRIBUTE,
__module__='AnnoList_pb2'
# @@protoc_insertion_point(class_scope:protobuf_annolist.AnnoRectAttribute)
)
)
_sym_db.RegisterMessage(AnnoRectAttribute)
AnnoRect = _reflection.GeneratedProtocolMessageType(
'AnnoRect',
(_message.Message,),
dict(
DESCRIPTOR=_ANNORECT,
__module__='AnnoList_pb2'
# @@protoc_insertion_point(class_scope:protobuf_annolist.AnnoRect)
)
)
_sym_db.RegisterMessage(AnnoRect)
Annotation = _reflection.GeneratedProtocolMessageType(
'Annotation',
(_message.Message,),
dict(
DESCRIPTOR=_ANNOTATION,
__module__='AnnoList_pb2'
# @@protoc_insertion_point(class_scope:protobuf_annolist.Annotation)
)
)
_sym_db.RegisterMessage(Annotation)
AnnoList = _reflection.GeneratedProtocolMessageType(
'AnnoList',
(_message.Message,),
dict(
DESCRIPTOR=_ANNOLIST,
__module__='AnnoList_pb2'
# @@protoc_insertion_point(class_scope:protobuf_annolist.AnnoList)
)
)
_sym_db.RegisterMessage(AnnoList)
DESCRIPTOR.has_options = True
DESCRIPTOR._options = _descriptor._ParseOptions(
descriptor_pb2.FileOptions(), _b('B\nAnnoListPb')
)
# @@protoc_insertion_point(module_scope)
| deepfigures-open-master | vendor/tensorboxresnet/tensorboxresnet/utils/annolist/AnnoList_pb2.py |
import os
import sys
import string
import matplotlib
matplotlib.use('Agg')
from pylab import *
import numpy as np
class MatPlotter:
fontsize = 15
color = 0
colors = ["r-", "b-", "k-", "c-", "m-", "y-"]
colors += [x + "-" for x in colors]
colors += ["g-", "g--"]
curFigure = []
legendNames = []
fontsizeLegend = 14
legendPlace = 'lower right'
legendborderpad = None
legendlabelsep = None
def __init__(self, fontsize=15):
# self.newFigure()
self.fontsize = fontsize
self.fontsizeLegend = fontsize - 1
def formatLegend(
self,
newFontSize=14,
newPlace='lower right',
borderpad=None,
labelsep=None
):
self.fontsizeLegend = newFontSize
self.legendPlace = newPlace
self.legendborderpad = borderpad
self.legendlabelsep = labelsep
def newFigure(self, plotTitle="", fsize=rcParams['figure.figsize']):
return self.newRPCFigure(plotTitle, fsize)
def newRPCFigure(self, plotTitle="", fsize=rcParams['figure.figsize']):
curFigure = figure(figsize=fsize)
self.title = title(plotTitle, fontsize=self.fontsize)
#subplots_adjust(left=0.085, right=0.975, top=0.975, bottom=0.085)
subplots_adjust(right=0.975, top=0.975)
#axis('equal')
axis([0, 1, 0, 1])
xticklocs, xticklabels = xticks(arange(0, 1.01, 0.1))
setp(xticklabels, size=self.fontsize)
yticklocs, yticklabels = yticks(arange(0, 1.01, 0.1))
setp(yticklabels, size=self.fontsize)
self.xlabel = xlabel("1-precision")
self.xlabel.set_size(self.fontsize + 2)
self.ylabel = ylabel("recall")
self.ylabel.set_size(self.fontsize + 4)
grid()
hold(True)
def newFPPIFigure(self, plotTitle="", fsize=rcParams['figure.figsize']):
curFigure = figure(figsize=fsize)
self.title = title(plotTitle, fontsize=self.fontsize)
subplots_adjust(left=0.085, right=0.975, top=0.975, bottom=0.085)
#axis('equal')
axis([0, 100, 0, 1])
xticklocs, xticklabels = xticks(arange(0, 100.01, 0.5))
setp(xticklabels, size=self.fontsize)
yticklocs, yticklabels = yticks(arange(0, 1.01, 0.1))
setp(yticklabels, size=self.fontsize)
self.xlabel = xlabel("false positives per image")
self.xlabel.set_size(self.fontsize + 2)
self.ylabel = ylabel("recall")
self.ylabel.set_size(self.fontsize + 4)
grid()
hold(True)
def newFreqFigure(
self, plotTitle="", maxX=10, maxY=10, fsize=rcParams['figure.figsize']
):
curFigure = figure(figsize=fsize)
self.title = title(plotTitle, fontsize=self.fontsize)
subplots_adjust(left=0.085, right=0.975, top=0.975, bottom=0.1)
#axis('equal')
axis([0, maxX, 0, maxY])
xticklocs, xticklabels = xticks(
arange(0, maxX + 0.01, maxX * 1.0 / 10)
)
setp(xticklabels, size=self.fontsize)
yticklocs, yticklabels = yticks(
arange(0, maxY + 0.01, maxY * 1.0 / 10)
)
setp(yticklabels, size=self.fontsize)
self.xlabel = xlabel("False positive / ground truth rect")
self.xlabel.set_size(self.fontsize + 2)
self.ylabel = ylabel("True positives / ground truth rect")
self.ylabel.set_size(self.fontsize + 4)
grid()
hold(True)
def newFPPWFigure(self, plotTitle="", fsize=rcParams['figure.figsize']):
curFigure = figure(figsize=fsize)
self.title = title(plotTitle, fontsize=self.fontsize)
subplots_adjust(left=0.085, right=0.975, top=0.975, bottom=0.085)
self.xlabel = xlabel("false positive per windows (FPPW)")
self.xlabel.set_size(self.fontsize + 2)
self.ylabel = ylabel("miss rate")
self.ylabel.set_size(self.fontsize + 4)
grid()
hold(True)
def newLogFPPIFigure(self, plotTitle="", fsize=rcParams['figure.figsize']):
curFigure = figure(figsize=fsize)
self.title = title(plotTitle, fontsize=self.fontsize)
subplots_adjust(left=0.085, right=0.975, top=0.975, bottom=0.1)
#axis('equal')
self.xlabel = xlabel("false positives per image")
self.xlabel.set_size(self.fontsize + 2)
self.ylabel = ylabel("miss rate")
self.ylabel.set_size(self.fontsize + 4)
grid()
hold(True)
def loadRPCData(self, fname):
self.filename = fname
self.prec = []
self.rec = []
self.score = []
self.fppi = []
file = open(fname)
precScores = []
for i in range(1, 10, 1):
precScores.append(100 - i * 10)
fppiScores = []
for i in range(0, 500, 5):
fppiScores.append(i * 1.0 / 100.0)
precinfo = []
fppiinfo = []
eerinfo = []
logAvInfo = []
logAvMR = []
self.lamr = 0
self.eer = None
firstLine = True
leadingZeroCount = 0
for line in file.readlines():
vals = line.split()
#vals=line.split(" ")
#for val in vals:
# if val=="":
# vals.remove(val)
self.prec.append(1 - float(vals[0]))
self.rec.append(float(vals[1]))
self.score.append(float(vals[2]))
if (len(vals) > 3):
self.fppi.append(float(vals[3]))
if firstLine and not float(vals[3]) == 0:
firstLine = False
lamrcount = 1
self.lamr = 1 - float(vals[1])
lowest_fppi = math.ceil(
math.log(float(vals[3])) / math.log(10) * 10
)
print "lowest_fppi: ", lowest_fppi
# MA: temporarily commented out
# for i in range(lowest_fppi, 1, 1):
# logAvMR.append(10** (i * 1.0 / 10))
#self.score.append(float(vals[2][:-1]))
#print 1-self.prec[-1], self.rec[-1], self.score[-1]
if (len(self.prec) > 1):
diff = (1 - self.prec[-1] - self.rec[-1]
) * (1 - self.prec[-2] - self.rec[-2])
if (diff < 0):
eerinfo.append(
"EER between: %.03f and %.03f\tScore:%f" %
(self.rec[-1], self.rec[-2], self.score[-1])
)
self.eer = (self.rec[-1] + self.rec[-2]) * 0.5
if (diff == 0 and 1 - self.prec[-1] - self.rec[-1] == 0):
eerinfo.append(
"EER: %.03f\tScore:%f" %
(self.rec[-1], self.score[-1])
)
self.eer = self.rec[-1]
#Remove already passed precision
if (len(precScores) > 0 and (float(vals[0])) < precScores[0] / 100.0):
precinfo.append(
"%d percent precision score: %f, recall: %.03f" %
(precScores[0], float(vals[2]), float(vals[1]))
)
while (
len(precScores) > 0 and
precScores[0] / 100.0 > float(vals[0])
):
precScores.pop(0)
#Remove already passed precision
if (len(vals) > 3):
if (len(fppiScores) > 0 and (float(vals[3])) > fppiScores[0]):
fppiinfo.append(
"%f fppi score: %f, recall: %.03f" %
(fppiScores[0], float(vals[2]), float(vals[1]))
)
while (
len(fppiScores) > 0 and fppiScores[0] < float(vals[3])
):
fppiScores.pop(0)
if (len(logAvMR) > 0 and (float(vals[3])) > logAvMR[0]):
while (len(logAvMR) > 0 and logAvMR[0] < float(vals[3])):
logAvInfo.append(
"%f fppi, miss rate: %.03f, score: %f" %
(logAvMR[0], 1 - float(vals[1]), float(vals[2]))
)
self.lamr += 1 - float(vals[1])
lamrcount += 1
logAvMR.pop(0)
lastMR = 1 - float(vals[1])
if (len(vals) > 3):
for i in logAvMR:
logAvInfo.append(
"%f fppi, miss rate: %.03f, extended" % (i, lastMR)
)
self.lamr += lastMR
lamrcount += 1
for i in precinfo:
print i
print
for i in fppiinfo:
print i
print
for i in eerinfo:
print i
print
print "Recall at first false positive: %.03f" % self.rec[0]
if (len(vals) > 3):
print
for i in logAvInfo:
print i
self.lamr = self.lamr * 1.0 / lamrcount
print "Log average miss rate in [10^%.01f, 10^0]: %.03f" % (
lowest_fppi / 10.0, self.lamr
)
print
print
file.close()
def loadFreqData(self, fname):
self.filename = fname
self.prec = []
self.rec = []
self.score = []
file = open(fname)
for line in file.readlines():
vals = line.split()
self.prec.append(float(vals[0]))
self.rec.append(float(vals[1]))
self.score.append(float(vals[2]))
file.close()
def loadFPPWData(self, fname):
self.loadFreqData(fname)
def finishPlot(self, axlimits=[0, 1.0, 0, 1.0]):
# MA:
#self.legend = legend(self.legendNames, self.legendPlace, pad = self.legendborderpad, labelsep = self.legendlabelsep)
self.legend = legend(self.legendNames, self.legendPlace)
lstrings = self.legend.get_texts()
setp(lstrings, fontsize=self.fontsizeLegend)
#line= plot( [1 - axlimits[0], 0], [axlimits[3], 1 - axlimits[3] ] , 'k')
line = plot([1, 0], [0, 1], 'k')
def finishFreqPlot(self):
self.legend = legend(
self.legendNames,
self.legendPlace,
pad=self.legendborderpad,
labelsep=self.legendlabelsep
)
lstrings = self.legend.get_texts()
setp(lstrings, fontsize=self.fontsizeLegend)
def show(self, plotEER=True, axlimits=[0, 1.0, 0, 1.0]):
if (plotEER):
self.finishPlot(axlimits)
axis(axlimits)
else:
self.finishFreqPlot()
show()
def saveCurrentFigure(self, plotEER, filename, axlimits=[0, 1.0, 0, 1.0]):
if (plotEER):
self.finishPlot(axlimits)
axis(axlimits)
else:
self.finishFreqPlot()
print "Saving: " + filename
savefig(filename)
def plotRFP(self, numImages, fname, line="r-"):
print 'NOT YET IMPLEMENTED'
def plotRPC(
self,
fname,
descr="line",
style="-1",
axlimits=[0, 1.0, 0, 1.0],
linewidth=2,
dashstyle=[],
addEER=False
):
self.loadRPCData(fname)
#axis(axlimits);
if (style == "-1"):
if dashstyle != []:
line = plot(
self.prec,
self.rec,
self.colors[self.color],
dashes=dashstyle
)
else:
line = plot(self.prec, self.rec, self.colors[self.color])
self.color = self.color + 1
self.color = self.color % len(self.colors)
else:
if dashstyle != []:
line = plot(self.prec, self.rec, style, dashes=dashstyle)
else:
line = plot(self.prec, self.rec, style)
axis(axlimits)
if addEER and self.eer != None:
descr += " (%.01f%%)" % (self.eer * 100)
setp(line, 'linewidth', linewidth)
self.legendNames = self.legendNames + [descr]
def plotFPPI(
self,
fname,
descr="line",
style="-1",
axlimits=[0, 2, 0, 1],
linewidth=2,
dashstyle=[]
):
self.loadRPCData(fname)
if (style == "-1"):
if dashstyle != []:
line = plot(
self.fppi,
self.rec,
self.colors[self.color],
dashes=dashstyle
)
else:
line = plot(self.fppi, self.rec, self.colors[self.color])
self.color = self.color + 1
self.color = self.color % len(self.colors)
else:
if dashstyle != []:
line = plot(self.fppi, self.rec, style, dashes=dashstyle)
else:
line = plot(self.fppi, self.rec, style)
axis(axlimits)
setp(line, 'linewidth', linewidth)
self.legendNames = self.legendNames + [descr]
def plotFreq(
self, fname, descr="line", style="-1", linewidth=2, dashstyle=[]
):
self.loadFreqData(fname)
if (style == "-1"):
if dashstyle != []:
line = plot(
self.prec,
self.rec,
self.colors[self.color],
dashes=dashstyle
)
else:
line = plot(self.prec, self.rec, self.colors[self.color])
self.color = self.color + 1
self.color = self.color % len(self.colors)
else:
if dashstyle != []:
line = plot(self.prec, self.rec, style, dashes=dashstyle)
else:
line = plot(self.prec, self.rec, style)
setp(line, 'linewidth', linewidth)
self.legendNames = self.legendNames + [descr]
def plotFPPW(
self,
fname,
descr="line",
style="-1",
axlimits=[5e-6, 1e0, 1e-2, 0.5],
linewidth=2,
dashstyle=[]
):
self.loadFPPWData(fname)
if (style == "-1"):
if dashstyle != []:
line = loglog(
self.prec,
self.rec,
self.colors[self.color],
dashes=dashstyle
)
else:
line = loglog(self.prec, self.rec, self.colors[self.color])
self.color = self.color + 1
self.color = self.color % len(self.colors)
else:
if dashstyle != []:
line = loglog(self.prec, self.rec, style, dashes=dashstyle)
else:
line = loglog(self.prec, self.rec, style)
xticklocs, xticklabels = xticks([1e-5, 1e-4, 1e-3, 1e-2, 1e-1, 1e0])
setp(xticklabels, size=self.fontsize)
yticklocs, yticklabels = yticks(
array(
[
0.01, 0.02, 0.03, 0.04, 0.05, 0.06, 0.07, 0.08, 0.09, 0.1,
0.2, 0.3, 0.4, 0.5
]
), (
"0.01", "0.02", "0.03", "0.04", "0.05", "0.06", "0.07", "0.08",
"0.09", "0.1", "0.2", "0.3", "0.4", "0.5"
)
)
setp(yticklabels, size=self.fontsize)
axis(axlimits)
gca().yaxis.grid(True, 'minor')
setp(line, 'linewidth', linewidth)
self.legendNames = self.legendNames + [descr]
def plotLogFPPI(
self,
fname,
descr="line",
style="-1",
axlimits=[5e-3, 1e1, 1e-1, 1],
linewidth=2,
dashstyle=[],
addlamr=False
):
self.loadRPCData(fname)
if (style == "-1"):
if dashstyle != []:
line = loglog(
self.fppi, [1 - x for x in self.rec],
self.colors[self.color],
dashes=dashstyle
)
else:
line = loglog(
self.fppi, [1 - x
for x in self.rec], self.colors[self.color]
)
self.color = (self.color + 1) % len(self.colors)
else:
if dashstyle != []:
line = loglog(
self.fppi, [1 - x for x in self.rec],
style,
dashes=dashstyle
)
else:
line = loglog(self.fppi, [1 - x for x in self.rec], style)
gca().yaxis.grid(True, 'minor')
m = min(self.fppi)
lax = axlimits[0]
for i in self.fppi:
if (i != m):
lax = math.floor(log(i) / math.log(10))
leftlabel = math.pow(10, lax)
break
m = max(self.fppi)
rightlabel = math.pow(10, math.ceil(log(m) / math.log(10))) + 0.01
k = leftlabel
ticks = [k]
while k < rightlabel:
k = k * 10
ticks.append(k)
xticklocs, xticklabels = xticks(ticks)
setp(xticklabels, size=self.fontsize)
yticklocs, yticklabels = yticks(
arange(0.1, 1.01, 0.1), (
"0.1", "0.2", "0.3", "0.4", "0.5", "0.6", "0.7", "0.8", "0.9",
"1.0"
)
)
setp(yticklabels, size=self.fontsize)
axlimits[0] = lax
axis(axlimits)
setp(line, 'linewidth', linewidth)
if addlamr:
descr += " (%.01f%%)" % (self.lamr * 100)
self.legendNames = self.legendNames + [descr]
| deepfigures-open-master | vendor/tensorboxresnet/tensorboxresnet/utils/annolist/MatPlotter.py |
deepfigures-open-master | vendor/tensorboxresnet/tensorboxresnet/utils/annolist/__init__.py |
|
#!/usr/bin/env python
import sys
import os
import random
import re
from AnnotationLib import *
from MatPlotter import *
from optparse import OptionParser
from copy import deepcopy
from math import sqrt
def main(argv):
parser = OptionParser(usage="usage: %prog [options] <datafile> [...]")
parser.add_option(
"-o",
"--output-file",
action="store",
dest="output",
type="str",
help="outfile. mandatory"
)
parser.add_option(
"--fppw",
action="store_true",
dest="fppw",
help="False Positives Per Window"
)
parser.add_option("--colors", action="store", dest="colors", help="colors")
parser.add_option(
"--fppi",
action="store_true",
dest="fppi",
help="False Positives Per Image"
)
parser.add_option(
"--lfppi",
action="store_true",
dest="lfppi",
help="False Positives Per Image(log)"
)
parser.add_option(
"-c",
"--components",
action="store",
dest="ncomponents",
type="int",
help="show n trailing components of the part",
default=3
)
parser.add_option(
"--cut-trailing",
action="store",
dest="cutcomponents",
type="int",
help=
"cut n trailing components of the part (applied after --components)",
default=-1
)
parser.add_option(
"-t", "--title", action="store", dest="title", type="str", default=""
)
parser.add_option(
"-f",
"--fontsize",
action="store",
dest="fontsize",
type="int",
default=12
)
parser.add_option(
"-l",
"--legend'",
action="store",
dest="legend",
type="string",
default="lr"
)
(options, args) = parser.parse_args()
plotter = MatPlotter(options.fontsize)
position = "lower right"
if (options.legend == "ur"):
position = "upper right"
if (options.legend == "ul"):
position = "upper left"
if (options.legend == "ll"):
position = "lower left"
plotter.formatLegend(options.fontsize, newPlace=position)
title = options.title
colors = None
if (options.colors):
colors = options.colors.split()
if (options.fppw):
plotter.newFPPWFigure(title)
elif (options.lfppi):
plotter.newLogFPPIFigure(title)
elif (options.fppi):
plotter.newFPPIFigure(title)
else:
plotter.newFigure(title)
for i, filename in enumerate(args):
if (os.path.isdir(filename)):
filename = os.path.join(filename, "rpc", "result-minh-48")
displayname = filename
if (options.ncomponents > 0):
suffix = None
for idx in xrange(options.ncomponents):
displayname, last = os.path.split(displayname)
if (suffix):
suffix = os.path.join(last, suffix)
else:
suffix = last
displayname = suffix
if (options.cutcomponents > 0):
for idx in xrange(options.cutcomponents):
displayname, last = os.path.split(displayname)
# plusidx = displayname.index("+")
# displayname = displayname[plusidx:]
print "Plotting: " + displayname
if (options.fppw):
plotter.plotFPPW(filename, displayname)
elif (options.lfppi):
if colors:
plotter.plotLogFPPI(filename, displayname, colors[i])
else:
plotter.plotLogFPPI(filename, displayname)
elif (options.fppi):
plotter.plotFPPI(filename, displayname)
else:
plotter.plotRPC(filename, displayname)
plotLine = not (options.fppw or options.lfppi or options.fppi)
if (options.output is None):
plotter.show(plotLine)
else:
plotter.saveCurrentFigure(plotLine, options.output)
return 0
if __name__ == "__main__":
sys.exit(main(sys.argv))
| deepfigures-open-master | vendor/tensorboxresnet/tensorboxresnet/utils/annolist/plotSimple.py |
import os
from math import sqrt
import gzip
import json
import bz2
import numpy as np
from collections import MutableSequence
#import AnnoList_pb2
from . import PalLib
import xml.dom.minidom
xml_dom_ext_available = False
try:
import xml.dom.ext
xml_dom_ext_available = True
except ImportError:
pass
################################################
#
# TODO: check distance function
#
################################################
def cmpAnnoRectsByScore(r1, r2):
return cmp(r1.score, r2.score)
def cmpAnnoRectsByScoreDescending(r1, r2):
return (-1) * cmp(r1.score, r2.score)
def cmpDetAnnoRectsByScore(r1, r2):
return cmp(r1.rect.score, r2.rect.score)
def suffixMatch(fn1, fn2):
l1 = len(fn1)
l2 = len(fn2)
if fn1[-l2:] == fn2:
return True
if fn2[-l1:] == fn1:
return True
return False
class AnnoList(MutableSequence):
"""Define a list format, which I can customize"""
TYPE_INT32 = 5
TYPE_FLOAT = 2
TYPE_STRING = 9
def __init__(self, data=None):
super(AnnoList, self).__init__()
self.attribute_desc = {}
self.attribute_val_to_str = {}
if not (data is None):
self._list = list(data)
else:
self._list = list()
def add_attribute(self, name, dtype):
_adesc = AnnoList_pb2.AttributeDesc()
_adesc.name = name
if self.attribute_desc:
_adesc.id = max(
(self.attribute_desc[d].id for d in self.attribute_desc)
) + 1
else:
_adesc.id = 0
if dtype == int:
_adesc.dtype = AnnoList.TYPE_INT32
elif dtype == float or dtype == np.float32:
_adesc.dtype = AnnoList.TYPE_FLOAT
elif dtype == str:
_adesc.dtype = AnnoList.TYPE_STRING
else:
print("unknown attribute type: ", dtype)
assert (False)
#print "adding attribute: {}, id: {}, type: {}".format(_adesc.name, _adesc.id, _adesc.dtype);
self.attribute_desc[name] = _adesc
def add_attribute_val(self, aname, vname, val):
# add attribute before adding string corresponding to integer value
assert (aname in self.attribute_desc)
# check and add if new
if all(
(
val_desc.id != val
for val_desc in self.attribute_desc[aname].val_to_str
)
):
val_desc = self.attribute_desc[aname].val_to_str.add()
val_desc.id = val
val_desc.s = vname
# also add to map for quick access
if not aname in self.attribute_val_to_str:
self.attribute_val_to_str[aname] = {}
assert (not val in self.attribute_val_to_str[aname])
self.attribute_val_to_str[aname][val] = vname
def attribute_get_value_str(self, aname, val):
if aname in self.attribute_val_to_str and val in self.attribute_val_to_str[
aname
]:
return self.attribute_val_to_str[aname][val]
else:
return str(val)
def save(self, fname):
save(fname, self)
#MA: list interface
def __len__(self):
return len(self._list)
def __getitem__(self, ii):
if isinstance(ii, slice):
res = AnnoList()
res.attribute_desc = self.attribute_desc
res._list = self._list[ii]
return res
else:
return self._list[ii]
def __delitem__(self, ii):
del self._list[ii]
def __setitem__(self, ii, val):
self._list[ii] = val
return self._list[ii]
def __str__(self):
return self.__repr__()
def __repr__(self):
return """<AnnoList %s>""" % self._list
def insert(self, ii, val):
self._list.insert(ii, val)
def append(self, val):
list_idx = len(self._list)
self.insert(list_idx, val)
def is_compatible_attr_type(protobuf_type, attr_type):
if protobuf_type == AnnoList.TYPE_INT32:
return (attr_type == int)
elif protobuf_type == AnnoList.TYPE_FLOAT:
return (attr_type == float or attr_type == np.float32)
elif protobuf_type == AnnoList.TYPE_STRING:
return (attr_type == str)
else:
assert (false)
def protobuf_type_to_python(protobuf_type):
if protobuf_type == AnnoList.TYPE_INT32:
return int
elif protobuf_type == AnnoList.TYPE_FLOAT:
return float
elif protobuf_type == AnnoList.TYPE_STRING:
return str
else:
assert (false)
class AnnoPoint(object):
def __init__(self, x=None, y=None, id=None):
self.x = x
self.y = y
self.id = id
class AnnoRect(object):
def __init__(self, x1=-1, y1=-1, x2=-1, y2=-1):
self.x1 = x1
self.y1 = y1
self.x2 = x2
self.y2 = y2
self.score = -1.0
self.scale = -1.0
self.articulations = []
self.viewpoints = []
self.d3 = []
self.silhouetteID = -1
self.classID = -1
self.track_id = -1
self.point = []
self.at = {}
def width(self):
return abs(self.x2 - self.x1)
def height(self):
return abs(self.y2 - self.y1)
def centerX(self):
return (self.x1 + self.x2) / 2.0
def centerY(self):
return (self.y1 + self.y2) / 2.0
def left(self):
return min(self.x1, self.x2)
def right(self):
return max(self.x1, self.x2)
def top(self):
return min(self.y1, self.y2)
def bottom(self):
return max(self.y1, self.y2)
def forceAspectRatio(self, ratio, KeepHeight=False, KeepWidth=False):
"""force the Aspect ratio"""
if KeepWidth or (
(not KeepHeight) and self.width() * 1.0 / self.height() > ratio
):
# extend height
newHeight = self.width() * 1.0 / ratio
self.y1 = (self.centerY() - newHeight / 2.0)
self.y2 = (self.y1 + newHeight)
else:
# extend width
newWidth = self.height() * ratio
self.x1 = (self.centerX() - newWidth / 2.0)
self.x2 = (self.x1 + newWidth)
def clipToImage(self, min_x, max_x, min_y, max_y):
self.x1 = max(min_x, self.x1)
self.x2 = max(min_x, self.x2)
self.y1 = max(min_y, self.y1)
self.y2 = max(min_y, self.y2)
self.x1 = min(max_x, self.x1)
self.x2 = min(max_x, self.x2)
self.y1 = min(max_y, self.y1)
self.y2 = min(max_y, self.y2)
def printContent(self):
print("Coords: ", self.x1, self.y1, self.x2, self.y2)
print("Score: ", self.score)
print("Articulations: ", self.articulations)
print("Viewpoints: ", self.viewpoints)
print("Silhouette: ", self.silhouetteID)
def ascii(self):
r = "(" + str(self.x1) + ", " + str(self.y1) + ", " + str(
self.x2
) + ", " + str(self.y2) + ")"
if (self.score != -1):
r = r + ":" + str(self.score)
if (self.silhouetteID != -1):
ri = r + "/" + str(self.silhouetteID)
return r
def writeIDL(self, file):
file.write(
" (" + str(self.x1) + ", " + str(self.y1) + ", " + str(self.x2) +
", " + str(self.y2) + ")"
)
if (self.score != -1):
file.write(":" + str(self.score))
if (self.silhouetteID != -1):
file.write("/" + str(self.silhouetteID))
def writeJSON(self):
jdoc = {"x1": self.x1, "x2": self.x2, "y1": self.y1, "y2": self.y2}
if (self.score != -1):
jdoc["score"] = self.score
return jdoc
def sortCoords(self):
if (self.x1 > self.x2):
self.x1, self.x2 = self.x2, self.x1
if (self.y1 > self.y2):
self.y1, self.y2 = self.y2, self.y1
def rescale(self, factor):
self.x1 = (self.x1 * float(factor))
self.y1 = (self.y1 * float(factor))
self.x2 = (self.x2 * float(factor))
self.y2 = (self.y2 * float(factor))
def resize(self, factor, factor_y=None):
w = self.width()
h = self.height()
if factor_y is None:
factor_y = factor
centerX = float(self.x1 + self.x2) / 2.0
centerY = float(self.y1 + self.y2) / 2.0
self.x1 = (centerX - (w / 2.0) * factor)
self.y1 = (centerY - (h / 2.0) * factor_y)
self.x2 = (centerX + (w / 2.0) * factor)
self.y2 = (centerY + (h / 2.0) * factor_y)
def intersection(self, other):
self.sortCoords()
other.sortCoords()
if (self.x1 >= other.x2):
return (0, 0)
if (self.x2 <= other.x1):
return (0, 0)
if (self.y1 >= other.y2):
return (0, 0)
if (self.y2 <= other.y1):
return (0, 0)
l = max(self.x1, other.x1)
t = max(self.y1, other.y1)
r = min(self.x2, other.x2)
b = min(self.y2, other.y2)
return (r - l, b - t)
#Alternate implementation
#nWidth = self.x2 - self.x1
#nHeight = self.y2 - self.y1
#iWidth = max(0,min(max(0,other.x2-self.x1),nWidth )-max(0,other.x1-self.x1))
#iHeight = max(0,min(max(0,other.y2-self.y1),nHeight)-max(0,other.y1-self.y1))
#return (iWidth, iHeight)
def cover(self, other):
nWidth = self.width()
nHeight = self.height()
iWidth, iHeight = self.intersection(other)
return float(iWidth * iHeight) / float(nWidth * nHeight)
def overlap_pascal(self, other):
self.sortCoords()
other.sortCoords()
nWidth = self.x2 - self.x1
nHeight = self.y2 - self.y1
iWidth, iHeight = self.intersection(other)
interSection = iWidth * iHeight
union = self.width() * self.height() + other.width() * other.height(
) - interSection
overlap = interSection * 1.0 / union
return overlap
def isMatchingPascal(self, other, minOverlap):
overlap = self.overlap_pascal(other)
if (overlap >= minOverlap and (self.classID == -1 or other.classID == -1 or self.classID == other.classID)):
return 1
else:
return 0
def distance(self, other, aspectRatio=-1, fixWH='fixheight'):
if (aspectRatio != -1):
if (fixWH == 'fixwidth'):
dWidth = float(self.x2 - self.x1)
dHeight = dWidth / aspectRatio
elif (fixWH == 'fixheight'):
dHeight = float(self.y2 - self.y1)
dWidth = dHeight * aspectRatio
else:
dWidth = float(self.x2 - self.x1)
dHeight = float(self.y2 - self.y1)
xdist = (self.x1 + self.x2 - other.x1 - other.x2) / dWidth
ydist = (self.y1 + self.y2 - other.y1 - other.y2) / dHeight
return sqrt(xdist * xdist + ydist * ydist)
def isMatchingStd(
self,
other,
coverThresh,
overlapThresh,
distThresh,
aspectRatio=-1,
fixWH=-1
):
cover = other.cover(self)
overlap = self.cover(other)
dist = self.distance(other, aspectRatio, fixWH)
#if(self.width() == 24 ):
#print cover, " ", overlap, " ", dist
#print coverThresh, overlapThresh, distThresh
#print (cover>=coverThresh and overlap>=overlapThresh and dist<=distThresh)
if (cover>=coverThresh and overlap>=overlapThresh and dist<=distThresh and self.classID == other.classID):
return 1
else:
return 0
def isMatching(
self,
other,
style,
coverThresh,
overlapThresh,
distThresh,
minOverlap,
aspectRatio=-1,
fixWH=-1
):
#choose matching style
if (style == 0):
return self.isMatchingStd(
other,
coverThresh,
overlapThresh,
distThresh,
aspectRatio=-1,
fixWH=-1
)
if (style == 1):
return self.isMatchingPascal(other, minOverlap)
def addToXML(self, node, doc): # no Silhouette yet
rect_el = doc.createElement("annorect")
for item in "x1 y1 x2 y2 score scale track_id".split():
coord_el = doc.createElement(item)
coord_val = doc.createTextNode(str(self.__getattribute__(item)))
coord_el.appendChild(coord_val)
rect_el.appendChild(coord_el)
articulation_el = doc.createElement("articulation")
for articulation in self.articulations:
id_el = doc.createElement("id")
id_val = doc.createTextNode(str(articulation))
id_el.appendChild(id_val)
articulation_el.appendChild(id_el)
if (len(self.articulations) > 0):
rect_el.appendChild(articulation_el)
viewpoint_el = doc.createElement("viewpoint")
for viewpoint in self.viewpoints:
id_el = doc.createElement("id")
id_val = doc.createTextNode(str(viewpoint))
id_el.appendChild(id_val)
viewpoint_el.appendChild(id_el)
if (len(self.viewpoints) > 0):
rect_el.appendChild(viewpoint_el)
d3_el = doc.createElement("D3")
for d in self.d3:
id_el = doc.createElement("id")
id_val = doc.createTextNode(str(d))
id_el.appendChild(id_val)
d3_el.appendChild(id_el)
if (len(self.d3) > 0):
rect_el.appendChild(d3_el)
if self.silhouetteID != -1:
silhouette_el = doc.createElement("silhouette")
id_el = doc.createElement("id")
id_val = doc.createTextNode(str(self.silhouetteID))
id_el.appendChild(id_val)
silhouette_el.appendChild(id_el)
rect_el.appendChild(silhouette_el)
if self.classID != -1:
class_el = doc.createElement("classID")
class_val = doc.createTextNode(str(self.classID))
class_el.appendChild(class_val)
rect_el.appendChild(class_el)
if len(self.point) > 0:
annopoints_el = doc.createElement("annopoints")
for p in self.point:
point_el = doc.createElement("point")
point_id_el = doc.createElement("id")
point_id_val = doc.createTextNode(str(p.id))
point_id_el.appendChild(point_id_val)
point_el.appendChild(point_id_el)
point_x_el = doc.createElement("x")
point_x_val = doc.createTextNode(str(p.x))
point_x_el.appendChild(point_x_val)
point_el.appendChild(point_x_el)
point_y_el = doc.createElement("y")
point_y_val = doc.createTextNode(str(p.y))
point_y_el.appendChild(point_y_val)
point_el.appendChild(point_y_el)
annopoints_el.appendChild(point_el)
rect_el.appendChild(annopoints_el)
node.appendChild(rect_el)
class Annotation(object):
def __init__(self):
self.imageName = ""
self.imagePath = ""
self.rects = []
self.frameNr = -1
def clone_empty(self):
new = Annotation()
new.imageName = self.imageName
new.imagePath = self.imagePath
new.frameNr = self.frameNr
new.rects = []
return new
def filename(self):
return os.path.join(self.imagePath, self.imageName)
def printContent(self):
print("Name: ", self.imageName)
for rect in self.rects:
rect.printContent()
def writeIDL(self, file):
if (self.frameNr == -1):
file.write(
"\"" + os.path.join(self.imagePath, self.imageName) + "\""
)
else:
file.write(
"\"" + os.path.join(self.imagePath, self.imageName) +
"@%d\"" % self.frameNr
)
if (len(self.rects) > 0):
file.write(":")
i = 0
for rect in self.rects:
rect.writeIDL(file)
if (i + 1 < len(self.rects)):
file.write(",")
i += 1
def writeJSON(self):
jdoc = {}
jdoc['image_path'] = os.path.join(self.imagePath, self.imageName)
jdoc['rects'] = []
for rect in self.rects:
jdoc['rects'].append(rect.writeJSON())
return jdoc
def addToXML(self, node, doc): # no frame# yet
annotation_el = doc.createElement("annotation")
img_el = doc.createElement("image")
name_el = doc.createElement("name")
name_val = doc.createTextNode(
os.path.join(self.imagePath, self.imageName)
)
name_el.appendChild(name_val)
img_el.appendChild(name_el)
if (self.frameNr != -1):
frame_el = doc.createElement("frameNr")
frame_val = doc.createTextNode(str(self.frameNr))
frame_el.appendChild(frame_val)
img_el.appendChild(frame_el)
annotation_el.appendChild(img_el)
for rect in self.rects:
rect.addToXML(annotation_el, doc)
node.appendChild(annotation_el)
def sortByScore(self, dir="ascending"):
if (dir == "descending"):
self.rects.sort(cmpAnnoRectsByScoreDescending)
else:
self.rects.sort(cmpAnnoRectsByScore)
def __getitem__(self, index):
return self.rects[index]
class detAnnoRect:
def __init(self):
self.imageName = ""
self.frameNr = -1
self.rect = AnnoRect()
self.imageIndex = -1
self.boxIndex = -1
#####################################################################
### Parsing
def parseTii(filename):
# MA: this must be some really old code
assert (False)
annotations = []
#--- parse xml ---#
doc = xml.dom.minidom.parse(filename)
#--- get tags ---#
for file in doc.getElementsByTagName("file"):
anno = Annotation()
for filename in file.getElementsByTagName("filename"):
aNode = filename.getAttributeNode("Src")
anno.imageName = aNode.firstChild.data[:-4] + ".png"
for objects in file.getElementsByTagName("objects"):
for vehicle in objects.getElementsByTagName("vehicle"):
aNode = vehicle.getAttributeNode("Type")
type = aNode.firstChild.data
if (type == "pedestrian"):
rect = AnnoRect()
aNode = vehicle.getAttributeNode("FR")
frontrear = aNode.firstChild.data
aNode = vehicle.getAttributeNode("SD")
side = aNode.firstChild.data
if (frontrear == "1"):
orientation = "FR"
elif (side == "1"):
orientation = "SD"
aNode = vehicle.getAttributeNode(
orientation + "_TopLeft_X"
)
rect.x1 = float(aNode.firstChild.data)
aNode = vehicle.getAttributeNode(
orientation + "_TopLeft_Y"
)
rect.y1 = float(aNode.firstChild.data)
aNode = vehicle.getAttributeNode(
orientation + "_BottomRight_X"
)
rect.x2 = float(aNode.firstChild.data)
aNode = vehicle.getAttributeNode(
orientation + "_BottomRight_Y"
)
rect.y2 = float(aNode.firstChild.data)
print(
"pedestrian:", anno.imageName, rect.x1, rect.y1,
rect.x2, rect.y2
)
anno.rects.append(rect)
annotations.append(anno)
return annotations
def parseXML(filename):
filename = os.path.realpath(filename)
name, ext = os.path.splitext(filename)
annotations = AnnoList([])
if (ext == ".al"):
file = open(filename, 'r')
lines = file.read()
file.close()
if (ext == ".gz"):
zfile = gzip.GzipFile(filename)
lines = zfile.read()
zfile.close()
if (ext == ".bz2"):
bfile = bz2.BZ2File(filename)
lines = bfile.read()
bfile.close()
#--- parse xml ---#
doc = xml.dom.minidom.parseString(lines)
#--- get tags ---#
for annotation in doc.getElementsByTagName("annotation"):
anno = Annotation()
for image in annotation.getElementsByTagName("image"):
for name in image.getElementsByTagName("name"):
anno.imageName = name.firstChild.data
for fn in image.getElementsByTagName("frameNr"):
anno.frameNr = int(fn.firstChild.data)
rects = []
for annoRect in annotation.getElementsByTagName("annorect"):
rect = AnnoRect()
for x1 in annoRect.getElementsByTagName("x1"):
rect.x1 = float(x1.firstChild.data)
for y1 in annoRect.getElementsByTagName("y1"):
rect.y1 = float(y1.firstChild.data)
for x2 in annoRect.getElementsByTagName("x2"):
rect.x2 = float(x2.firstChild.data)
for y2 in annoRect.getElementsByTagName("y2"):
rect.y2 = float(y2.firstChild.data)
for scale in annoRect.getElementsByTagName("scale"):
rect.scale = float(scale.firstChild.data)
for score in annoRect.getElementsByTagName("score"):
rect.score = float(score.firstChild.data)
for classID in annoRect.getElementsByTagName("classID"):
rect.classID = int(classID.firstChild.data)
for track_id in annoRect.getElementsByTagName("track_id"):
rect.track_id = int(track_id.firstChild.data)
for articulation in annoRect.getElementsByTagName("articulation"):
for id in articulation.getElementsByTagName("id"):
rect.articulations.append(int(id.firstChild.data))
#print "Articulations: ", rect.articulations
for viewpoint in annoRect.getElementsByTagName("viewpoint"):
for id in viewpoint.getElementsByTagName("id"):
rect.viewpoints.append(int(id.firstChild.data))
#print "Viewpoints: ", rect.viewpoints
for d in annoRect.getElementsByTagName("D3"):
for id in d.getElementsByTagName("id"):
rect.d3.append(float(id.firstChild.data))
for silhouette in annoRect.getElementsByTagName("silhouette"):
for id in silhouette.getElementsByTagName("id"):
rect.silhouetteID = int(id.firstChild.data)
#print "SilhouetteID: ", rect.silhouetteID
for annoPoints in annoRect.getElementsByTagName("annopoints"):
for annoPoint in annoPoints.getElementsByTagName("point"):
p = AnnoPoint()
for annoPointX in annoPoint.getElementsByTagName("x"):
p.x = int(float(annoPointX.firstChild.data))
for annoPointY in annoPoint.getElementsByTagName("y"):
p.y = int(float(annoPointY.firstChild.data))
for annoPointId in annoPoint.getElementsByTagName("id"):
p.id = int(annoPointId.firstChild.data)
assert (p.x != None and p.y != None and p.id != None)
rect.point.append(p)
rects.append(rect)
anno.rects = rects
annotations.append(anno)
return annotations
def parseJSON(filename):
filename = os.path.realpath(filename)
name, ext = os.path.splitext(filename)
assert ext == '.json'
annotations = AnnoList([])
with open(filename, 'r') as f:
jdoc = json.load(f)
for annotation in jdoc:
anno = Annotation()
anno.imageName = annotation["image_path"]
rects = []
for annoRect in annotation["rects"]:
rect = AnnoRect()
rect.x1 = annoRect["x1"]
rect.x2 = annoRect["x2"]
rect.y1 = annoRect["y1"]
rect.y2 = annoRect["y2"]
if "score" in annoRect:
rect.score = annoRect["score"]
rects.append(rect)
anno.rects = rects
annotations.append(anno)
return annotations
def parse(filename, abs_path=False):
#print "Parsing: ", filename
name, ext = os.path.splitext(filename)
if (ext == ".gz" or ext == ".bz2"):
name, ext = os.path.splitext(name)
if (ext == ".idl"):
annolist = parseIDL(filename)
elif (ext == ".al"):
annolist = parseXML(filename)
elif (ext == ".pal"):
annolist = PalLib.pal2al(PalLib.loadPal(filename))
elif (ext == ".json"):
annolist = parseJSON(filename)
else:
annolist = AnnoList([])
if abs_path:
basedir = os.path.dirname(os.path.abspath(filename))
for a in annolist:
a.imageName = basedir + "/" + os.path.basename(a.imageName)
return annolist
def parseIDL(filename):
filename = os.path.realpath(filename)
name, ext = os.path.splitext(filename)
lines = []
if (ext == ".idl"):
file = open(filename, 'r')
lines = file.readlines()
file.close()
if (ext == ".gz"):
zfile = gzip.GzipFile(filename)
lines = zfile.readlines()
zfile.close()
if (ext == ".bz2"):
bfile = bz2.BZ2File(filename)
lines = bfile.readlines()
bfile.close()
annotations = AnnoList([])
for line in lines:
anno = Annotation()
### remove line break
if (line[-1] == '\n'):
line = line[:-1]
# remove '\n'
lineLen = len(line)
#print line
### get image name
posImageEnd = line.find('\":')
if (posImageEnd == -1):
posImageEnd = line.rfind("\"")
anno.imageName = line[1:posImageEnd]
#print anno.imageName
pos = anno.imageName.rfind("@")
if (pos >= 0):
anno.frameNr = int(anno.imageName[pos + 1:])
anno.imageName = anno.imageName[:pos]
if anno.imageName[-1] == "/":
anno.imageName = anno.imageName[:-1]
else:
anno.frameNr = -1
### get rect list
# we split by ','. there are 3 commas for each rect and 1 comma seperating the rects
rectSegs = []
if (posImageEnd != -1 and posImageEnd + 4 < lineLen):
line = line[posImageEnd + 3:-1]
# remove ; or .
segments = line.split(',')
if (len(segments) % 4 != 0):
print("Parse Errror")
else:
for i in range(0, len(segments), 4):
rectSeg = segments[i] + "," + segments[
i + 1
] + "," + segments[i + 2] + "," + segments[i + 3]
rectSegs.append(rectSeg)
#print rectSegs
## parse rect segments
for rectSeg in rectSegs:
#print "RectSeg: ", rectSeg
rect = AnnoRect()
posBracket1 = rectSeg.find('(')
posBracket2 = rectSeg.find(')')
coordinates = rectSeg[posBracket1 + 1:posBracket2].split(',')
#print coordinates
#print "Coordinates: ",coordinates
rect.x1 = float(round(float(coordinates[0].strip())))
rect.y1 = float(round(float(coordinates[1].strip())))
rect.x2 = float(round(float(coordinates[2].strip())))
rect.y2 = float(round(float(coordinates[3].strip())))
posColon = rectSeg.find(':')
posSlash = rectSeg.find('/')
if (posSlash != -1):
rect.silhouetteID = int(rectSeg[posSlash + 1:])
else:
rectSeg += "\n"
if (posColon != -1):
#print rectSeg[posColon+1:posSlash]
rect.score = float(rectSeg[posColon + 1:posSlash])
anno.rects.append(rect)
annotations.append(anno)
return annotations
#####################################################################
### Saving
def save(filename, annotations):
print("saving: ", filename)
name, ext = os.path.splitext(filename)
if (ext == ".gz" or ext == ".bz2"):
name, ext = os.path.splitext(name)
if (ext == ".idl"):
return saveIDL(filename, annotations)
elif (ext == '.json'):
return saveJSON(filename, annotations)
elif (ext == ".al"):
return saveXML(filename, annotations)
elif (ext == ".pal"):
return PalLib.savePal(filename, PalLib.al2pal(annotations))
else:
assert (False)
return False
def saveIDL(filename, annotations):
[name, ext] = os.path.splitext(filename)
if (ext == ".idl"):
file = open(filename, 'w')
if (ext == ".gz"):
file = gzip.GzipFile(filename, 'w')
if (ext == ".bz2"):
file = bz2.BZ2File(filename, 'w')
i = 0
for annotation in annotations:
annotation.writeIDL(file)
if (i + 1 < len(annotations)):
file.write(";\n")
else:
file.write(".\n")
i += 1
file.close()
def saveJSON(filename, annotations):
[name, ext] = os.path.splitext(filename)
jdoc = []
for annotation in annotations:
jdoc.append(annotation.writeJSON())
with open(filename, 'w') as f:
f.write(json.dumps(jdoc, indent=2, sort_keys=True))
def idlBase(filename):
if (filename.rfind(".pal") == len(filename) - 4):
return (filename[:-4], ".pal")
if (filename.rfind(".json") == len(filename) - 5):
return (filename[:-5], ".json")
if (filename.rfind(".idl") == len(filename) - 4):
return (filename[:-4], ".idl")
if (filename.rfind(".al") == len(filename) - 3):
return (filename[:-3], ".al")
if (filename.rfind(".idl.gz") == len(filename) - 7):
return (filename[:-7], ".idl.gz")
if (filename.rfind(".idl.bz2") == len(filename) - 8):
return (filename[:-8], ".idl.bz2")
if (filename.rfind(".al.gz") == len(filename) - 6):
return (filename[:-6], ".al.gz")
if (filename.rfind(".al.bz2") == len(filename) - 7):
return (filename[:-7], ".al.bz2")
def saveXML(filename, annotations):
document = xml.dom.minidom.Document()
rootnode = document.createElement("annotationlist")
for anno in annotations:
anno.addToXML(rootnode, document)
document.appendChild(rootnode)
[name, ext] = os.path.splitext(filename)
if (ext == ".al"):
writer = open(filename, 'w')
elif (ext == ".gz"):
writer = gzip.GzipFile(filename, 'w')
elif (ext == ".bz2"):
writer = bz2.BZ2File(filename, 'w')
else:
print("invalid filename - .al(.gz|.bz2) is accepted")
return
if xml_dom_ext_available:
xml.dom.ext.PrettyPrint(document, writer)
else:
# MA: skip header (currently Matlab's loadannotations can't deal with the header)
document.documentElement.writexml(writer)
#document.writexml(writer)
document.unlink()
#####################################################################
### Statistics
def getStats(annotations):
no = 0
noTiny = 0
noSmall = 0
heights = []
widths = []
###--- get all rects ---###
for anno in annotations:
no = no + len(anno.rects)
for rect in anno.rects:
if (rect.height() < 36):
noTiny = noTiny + 1
if (rect.height() < 128):
noSmall = noSmall + 1
heights.append(rect.height())
if (rect.width() == 0):
print("Warning: width=0 in image ", anno.imageName)
widths.append(1)
else:
widths.append(rect.width())
if (float(rect.height()) / float(rect.width()) < 1.5):
print(
"Degenerated pedestrian annotation: ", anno.imageName
)
###--- compute average height and variance ---###
avgHeight = 0
varHeight = 0
minHeight = 0
maxHeight = 0
if len(heights) > 0:
minHeight = heights[0]
maxHeight = heights[0]
for height in heights:
avgHeight = avgHeight + height
if (height > maxHeight):
maxHeight = height
if (height < minHeight):
minHeight = height
if (no > 0):
avgHeight = avgHeight / no
for height in heights:
varHeight += (height - avgHeight) * (height - avgHeight)
if (no > 1):
varHeight = float(varHeight) / float(no - 1)
###--- compute average width and variance ---###
avgWidth = 0
varWidth = 0
for width in widths:
avgWidth = avgWidth + width
if (no > 0):
avgWidth = avgWidth / no
for width in widths:
varWidth += (width - avgWidth) * (width - avgWidth)
if (no > 1):
varWidth = float(varWidth) / float(no - 1)
###--- write statistics ---###
print(" Total # rects:", no)
print(
" avg. Width:", avgWidth, " (",
sqrt(varWidth), "standard deviation )"
)
print(
" avg. Height:", avgHeight, " (",
sqrt(varHeight), "standard deviation )"
)
print(" tiny rects:", noTiny, " (< 36 pixels)")
print(" small rects:", noSmall, " (< 128 pixels)")
print(" minimum height:", minHeight)
print(" maximum height:", maxHeight)
###--- return ---###
return [widths, heights]
############################################################
##
## IDL merging
##
def mergeIDL(detIDL, det2IDL, detectionFuse=True, minOverlap=0.5):
mergedIDL = []
for i, anno in enumerate(detIDL):
mergedAnno = Annotation()
mergedAnno.imageName = anno.imageName
mergedAnno.frameNr = anno.frameNr
mergedAnno.rects = anno.rects
imageFound = False
filterIndex = -1
for i, filterAnno in enumerate(det2IDL):
if (suffixMatch(anno.imageName, filterAnno.imageName) and anno.frameNr == filterAnno.frameNr):
filterIndex = i
imageFound = True
break
if (not imageFound):
mergedIDL.append(mergedAnno)
continue
for rect in det2IDL[filterIndex].rects:
matches = False
for frect in anno.rects:
if rect.overlap_pascal(frect) > minOverlap:
matches = True
break
if (not matches or detectionFuse == False):
mergedAnno.rects.append(rect)
mergedIDL.append(mergedAnno)
return mergedIDL
############################################################################33
#
# Function to force the aspect ratio of annotations to ratio = width / height
#
#
def forceAspectRatio(annotations, ratio, KeepHeight=False, KeepWidth=False):
for anno in annotations:
for rect in anno.rects:
rect.forceAspectRatio(ratio, KeepHeight, KeepWidth)
#Determine which side needs to be extended
# if (rect.width() * 1.0 / rect.height() > ratio):
#
# #Too wide -> extend height
# newHeight = rect.width() * 1.0 / ratio
# rect.y1 = int(rect.centerY() - newHeight / 2.0)
# rect.y2 = int(rect.y1 + newHeight)
#
# else:
# #Too short -> extend width
# newWidth = rect.height() * ratio
# rect.x1 = int(rect.centerX() - newWidth / 2.0)
# rect.x2 = int(rect.x1 + newWidth)
###################################################################
# Function to greedyly remove subset detIDL from gtIDL
#
# returns two sets
#
# [filteredIDL, missingRecallIDL]
#
# filteredIDL == Rects that were present in both sets
# missingRecallIDL == Rects that were only present in set gtIDL
#
###################################################################
def extractSubSet(gtIDL, detIDL):
filteredIDL = []
missingRecallIDL = []
for i, gtAnno in enumerate(gtIDL):
filteredAnno = Annotation()
filteredAnno.imageName = gtAnno.imageName
filteredAnno.frameNr = gtAnno.frameNr
missingRecallAnno = Annotation()
missingRecallAnno.imageName = gtAnno.imageName
missingRecallAnno.frameNr = gtAnno.frameNr
imageFound = False
filterIndex = -1
for i, anno in enumerate(detIDL):
if (suffixMatch(anno.imageName, gtAnno.imageName) and anno.frameNr == gtAnno.frameNr):
filterIndex = i
imageFound = True
break
if (not imageFound):
print("Image not found " + gtAnno.imageName + " !")
missingRecallIDL.append(gtAnno)
filteredIDL.append(filteredAnno)
continue
matched = [-1] * len(detIDL[filterIndex].rects)
for j, rect in enumerate(gtAnno.rects):
matches = False
matchingID = -1
minCenterPointDist = -1
for k, frect in enumerate(detIDL[filterIndex].rects):
minCover = 0.5
minOverlap = 0.5
maxDist = 0.5
if rect.isMatchingStd(frect, minCover, minOverlap, maxDist):
if (matchingID == -1 or rect.distance(frect) < minCenterPointDist):
matchingID = k
minCenterPointDist = rect.distance(frect)
matches = True
if (matches):
#Already matched once check if you are the better match
if (matched[matchingID] >= 0):
#Take the match with the smaller center point distance
if(gtAnno.rects[matched[matchingID]].distance(frect) > rect.distance(frect)):
missingRecallAnno.rects.append(
gtAnno.rects[matched[matchingID]]
)
filteredAnno.rects.remove(
gtAnno.rects[matched[matchingID]]
)
filteredAnno.rects.append(rect)
matched[matchingID] = j
else:
missingRecallAnno.rects.append(rect)
else:
#Not matched before.. go on and add the match
filteredAnno.rects.append(rect)
matched[matchingID] = j
else:
missingRecallAnno.rects.append(rect)
filteredIDL.append(filteredAnno)
missingRecallIDL.append(missingRecallAnno)
return (filteredIDL, missingRecallIDL)
###########################################################
#
# Function to remove all detections with a too low score
#
#
def filterMinScore(detections, minScore):
newDetections = []
for anno in detections:
newAnno = Annotation()
newAnno.frameNr = anno.frameNr
newAnno.imageName = anno.imageName
newAnno.imagePath = anno.imagePath
newAnno.rects = []
for rect in anno.rects:
if (rect.score >= minScore):
newAnno.rects.append(rect)
newDetections.append(newAnno)
return newDetections
# foo.idl -> foo-suffix.idl, foo.idl.gz -> foo-suffix.idl.gz etc
def suffixIdlFileName(filename, suffix):
exts = [".idl", ".idl.gz", ".idl.bz2"]
for ext in exts:
if filename.endswith(ext):
return filename[0:-len(ext)] + "-" + suffix + ext
raise ValueError(
"this does not seem to be a valid filename for an idl-file"
)
if __name__ == "__main__":
# test output
idl = parseIDL("/tmp/asdf.idl")
idl[0].rects[0].articulations = [4, 2]
idl[0].rects[0].viewpoints = [2, 3]
saveXML("", idl)
def annoAnalyze(detIDL):
allRects = []
for i, anno in enumerate(detIDL):
for j in anno.rects:
newRect = detAnnoRect()
newRect.imageName = anno.imageName
newRect.frameNr = anno.frameNr
newRect.rect = j
allRects.append(newRect)
allRects.sort(cmpDetAnnoRectsByScore)
filteredIDL = AnnoList([])
for i in allRects:
a = Annotation()
a.imageName = i.imageName
a.frameNr = i.frameNr
a.rects = []
a.rects.append(i.rect)
filteredIDL.append(a)
return filteredIDL
| deepfigures-open-master | vendor/tensorboxresnet/tensorboxresnet/utils/annolist/AnnotationLib.py |
#import AnnoList_pb2
from . import AnnotationLib
def loadPal(filename):
_annolist = AnnoList_pb2.AnnoList()
f = open(filename, "rb")
_annolist.ParseFromString(f.read())
f.close()
return _annolist
def savePal(filename, _annolist):
f = open(filename, "wb")
f.write(_annolist.SerializeToString())
f.close()
def al2pal(annotations):
_annolist = AnnoList_pb2.AnnoList()
#assert(isinstance(annotations, AnnotationLib.AnnoList));
# check type of attributes, add missing attributes
for a in annotations:
for r in a.rects:
for k, v in r.at.items():
if not k in annotations.attribute_desc:
annotations.add_attribute(k, type(v))
else:
assert (
AnnotationLib.is_compatible_attr_type(
annotations.attribute_desc[k].dtype, type(v)
)
)
# check attributes values
for a in annotations:
for r in a.rects:
for k, v in r.at.items():
if k in annotations.attribute_val_to_str:
# don't allow undefined values
if not v in annotations.attribute_val_to_str[k]:
print(
"attribute: {}, undefined value: {}".format(k, v)
)
assert (False)
# store attribute descriptions in pal structure
for aname, adesc in annotations.attribute_desc.items():
_annolist.attribute_desc.extend([adesc])
for a in annotations:
_a = _annolist.annotation.add()
_a.imageName = a.imageName
for r in a.rects:
_r = _a.rect.add()
_r.x1 = r.x1
_r.y1 = r.y1
_r.x2 = r.x2
_r.y2 = r.y2
_r.score = float(r.score)
if hasattr(r, 'id'):
_r.id = r.id
if hasattr(r, 'track_id'):
_r.track_id = r.track_id
if hasattr(r, 'at'):
for k, v in list(r.at.items()):
_at = _r.attribute.add()
_at.id = annotations.attribute_desc[k].id
if annotations.attribute_desc[
k
].dtype == AnnotationLib.AnnoList.TYPE_INT32:
assert (
AnnotationLib.is_compatible_attr_type(
AnnotationLib.AnnoList.TYPE_INT32, type(v)
)
)
_at.val = int(v)
elif annotations.attribute_desc[
k
].dtype == AnnotationLib.AnnoList.TYPE_FLOAT:
assert (
AnnotationLib.is_compatible_attr_type(
AnnotationLib.AnnoList.TYPE_FLOAT, type(v)
)
)
_at.fval = float(v)
elif annotations.attribute_desc[
k
].dtype == AnnotationLib.AnnoList.TYPE_STRING:
assert (
AnnotationLib.is_compatible_attr_type(
AnnotationLib.AnnoList.TYPE_STRING, type(v)
)
)
_at.strval = str(v)
else:
assert (false)
return _annolist
def pal2al(_annolist):
#annotations = [];
annotations = AnnotationLib.AnnoList()
for adesc in _annolist.attribute_desc:
annotations.attribute_desc[adesc.name] = adesc
print("attribute: ", adesc.name, adesc.id)
for valdesc in adesc.val_to_str:
annotations.add_attribute_val(adesc.name, valdesc.s, valdesc.id)
attribute_name_from_id = {
adesc.id: aname
for aname, adesc in annotations.attribute_desc.items()
}
attribute_dtype_from_id = {
adesc.id: adesc.dtype
for aname, adesc in annotations.attribute_desc.items()
}
for _a in _annolist.annotation:
anno = AnnotationLib.Annotation()
anno.imageName = _a.imageName
anno.rects = []
for _r in _a.rect:
rect = AnnotationLib.AnnoRect()
rect.x1 = _r.x1
rect.x2 = _r.x2
rect.y1 = _r.y1
rect.y2 = _r.y2
if _r.HasField("id"):
rect.id = _r.id
if _r.HasField("track_id"):
rect.track_id = _r.track_id
if _r.HasField("score"):
rect.score = _r.score
for _at in _r.attribute:
try:
cur_aname = attribute_name_from_id[_at.id]
cur_dtype = attribute_dtype_from_id[_at.id]
except KeyError as e:
print("attribute: ", _at.id)
print(e)
assert (False)
if cur_dtype == AnnotationLib.AnnoList.TYPE_INT32:
rect.at[cur_aname] = _at.val
elif cur_dtype == AnnotationLib.AnnoList.TYPE_FLOAT:
rect.at[cur_aname] = _at.fval
elif cur_dtype == AnnotationLib.AnnoList.TYPE_STRING:
rect.at[cur_aname] = _at.strval
else:
assert (False)
anno.rects.append(rect)
annotations.append(anno)
return annotations
| deepfigures-open-master | vendor/tensorboxresnet/tensorboxresnet/utils/annolist/PalLib.py |
def is_number(s):
try:
float(s)
return True
except ValueError:
return False
| deepfigures-open-master | vendor/tensorboxresnet/tensorboxresnet/utils/annolist/ma_utils.py |
#!/usr/bin/env python
import os, sys
from AnnotationLib import *
from optparse import OptionParser
import copy
import math
# BASED ON WIKIPEDIA VERSION
# n - number of nodes
# C - capacity matrix
# F - flow matrix
# s - source
# t - sink
# sumC - sum over rows of C (too speed up computation)
def edmonds_karp(n, C, s, t, sumC):
# Residual capacity from u to v is C[u][v] - F[u][v]
F = [[0] * n for i in xrange(n)]
while True:
P = [-1] * n # Parent table
P[s] = s
M = [0] * n # Capacity of path to node
M[s] = float('infinity')
Q = [s] # BFS queue
while Q:
u = Q.pop(0)
for v in xrange(n):
# There is available capacity,
# and v is not seen before in search
if C[u][v] - F[u][v] > 0 and P[v] == -1:
P[v] = u
M[v] = min(M[u], C[u][v] - F[u][v])
if v != t:
if (sumC[u] > 0):
Q.append(v)
else:
# Backtrack search, and write flow
while P[v] != v:
u = P[v]
F[u][v] += M[t]
F[v][u] -= M[t]
v = u
Q = None
break
if P[t] == -1: # We did not find a path to t
return (F)
class AnnoGraph:
def __init__(
self, anno, det, ignore, style, minCover, minOverlap, maxDistance,
ignoreOverlap
):
# setting rects
#print anno.imageName
self.anno = anno
self.det = det
self.det.sortByScore("descending")
# generate initial graph
self.n = len(det.rects)
self.m = len(anno.rects)
# Number of nodes = number of detections + number of GT + source + sink
self.a = self.n + self.m + 2
# Flow matrix
self.F = [[0] * self.a for i in xrange(self.a)]
# Capacity matrix
self.C = [[0] * self.a for i in xrange(self.a)]
# Connect source to all detections
for i in range(1, self.n + 1):
self.C[0][i] = 1
self.C[i][0] = 1
# Connect sink to all GT
for i in range(self.n + 1, self.a - 1):
self.C[i][self.a - 1] = 1
self.C[self.a - 1][i] = 1
# Overall flow
self.full_flow = 0
self.ignore_flow = 0
# match rects / Adjacency matrix
self.M = [[] for i in xrange(self.n)]
self.match(style, minCover, minOverlap, maxDistance)
self.nextN = 0
# Deactivate All Non Matching detections
# Save row sums for capacity matrix
self.sumC = []
self.sumC.append(self.n)
for q in [len(self.M[j]) for j in xrange(len(self.M))]:
self.sumC.append(q)
for q in [1] * self.m:
self.sumC.append(q)
# Initially no links are active
self.sumC_active = []
self.sumC_active.append(self.n)
for q in [len(self.M[j]) for j in xrange(len(self.M))]:
self.sumC_active.append(0)
for q in [1] * self.m:
self.sumC_active.append(q)
#
self.ignore = [0] * self.m
for ig in ignore.rects:
for i, r in enumerate(anno.rects):
if (ig.overlap_pascal(r) > ignoreOverlap):
self.ignore[i] = 1
def match(self, style, minCover, minOverlap, maxDistance):
for i in xrange(self.n):
detRect = self.det.rects[i]
for j in xrange(self.m):
annoRect = self.anno.rects[j]
# Bastian Leibe's matching style
if (style == 0):
assert False
if detRect.isMatchingStd(
annoRect, minCover, minOverlap, maxDistance
):
self.M[i].append(self.n + 1 + j)
# Pascal Matching style
if (style == 1):
if (detRect.isMatchingPascal(annoRect, minOverlap)):
self.M[i].append(self.n + 1 + j)
def decreaseScore(self, score):
capacity_change = False
for i in xrange(self.nextN, self.n):
if (self.det.rects[i].score >= score):
capacity_change = self.insertIntoC(i + 1) or capacity_change
self.nextN += 1
else:
break
if capacity_change:
self.F = edmonds_karp(
self.a, self.C, 0, self.a - 1, self.sumC_active
)
self.full_flow = sum([self.F[0][i] for i in xrange(self.a)])
self.ignore_flow = sum(
[
self.F[i][self.a - 1] * self.ignore[i - 1 - self.n]
for i in range(1 + self.n, 1 + self.n + self.m)
]
)
return capacity_change
def addBB(self, rect):
self.nextN += 1
capacity_change = self.insertIntoC(rect.boxIndex + 1)
if capacity_change:
self.F = edmonds_karp(
self.a, self.C, 0, self.a - 1, self.sumC_active
)
self.full_flow = sum([self.F[0][i] for i in xrange(self.a)])
self.ignore_flow = sum(
[
self.F[i][self.a - 1] * self.ignore[i - 1 - self.n]
for i in range(1 + self.n, 1 + self.n + self.m)
]
)
return capacity_change
def insertIntoC(self, i):
#print "Inserting node", i, self.det.rects[i-1].score, "of image", self.anno.imageName
for match in self.M[i - 1]:
#print " match: ", match
self.C[i][match] = 1
self.C[match][i] = 1
self.sumC_active[i] = self.sumC[i]
return self.sumC[i] > 0
def maxflow(self):
return self.full_flow - self.ignore_flow
def consideredDets(self):
return self.nextN - self.ignore_flow
def ignoredFlow(self):
return self.ignore_flow
def getTruePositives(self):
ret = copy.copy(self.anno)
ret.rects = []
#iterate over GT
for i in xrange(self.n + 1, self.a - 1):
#Flow to sink > 0
if(self.F[i][self.a - 1] > 0 and self.ignore[i - self.n - 1] == 0):
#Find associated det
for j in xrange(1, self.n + 1):
if (self.F[j][i] > 0):
ret.rects.append(self.det[j - 1])
break
return ret
def getIgnoredTruePositives(self):
ret = copy.copy(self.anno)
ret.rects = []
#iterate over GT
for i in xrange(self.n + 1, self.a - 1):
#Flow to sink > 0
if(self.F[i][self.a - 1] > 0 and self.ignore[i - self.n - 1] == 1):
#Find associated det
for j in xrange(1, self.n + 1):
if (self.F[j][i] > 0):
ret.rects.append(self.det[j - 1])
break
return ret
def getMissingRecall(self):
ret = copy.copy(self.anno)
ret.rects = []
for i in xrange(self.n + 1, self.a - 1):
if(self.F[i][self.a - 1] == 0 and self.ignore[i - self.n - 1] == 0):
ret.rects.append(self.anno.rects[i - self.n - 1])
return ret
def getFalsePositives(self):
ret = copy.copy(self.det)
ret.rects = []
for i in xrange(1, self.n + 1):
if (self.F[0][i] == 0):
ret.rects.append(self.det[i - 1])
return ret
def asort(
idlGT,
idlDet,
minWidth,
minHeight,
style,
minCover,
minOverlap,
maxDistance,
maxWidth=float('inf'),
maxHeight=float('inf')
):
#Asort too small object in ground truth
for x, anno in enumerate(idlGT):
imageFound = False
filterIndex = -1
for i, filterAnno in enumerate(idlDet):
if (suffixMatch(anno.imageName, filterAnno.imageName) and anno.frameNr == filterAnno.frameNr):
filterIndex = i
imageFound = True
break
if (not imageFound):
continue
validGTRects = []
for j in anno.rects:
if (j.width() >= minWidth) and (j.height() >= minHeight) and (
j.width() <= maxWidth
) and (j.height() <= maxHeight):
validGTRects.append(j)
else:
# Sort out detections that would have matched
matchingIndexes = []
for m, frect in enumerate(idlDet[filterIndex].rects):
if (style == 0):
if (j.isMatchingStd(frect, minCover,minOverlap, maxDistance)):
overlap = j.overlap_pascal(frect)
matchingIndexes.append((m, overlap))
if (style == 1):
if (j.isMatchingPascal(frect, minOverlap)):
overlap = j.overlap_pascal(frect)
matchingIndexes.append((m, overlap))
for m in xrange(len(matchingIndexes) - 1, -1, -1):
matching_rect = idlDet[filterIndex
].rects[matchingIndexes[m][0]]
matching_overlap = matchingIndexes[m][1]
better_overlap_found = False
for l in anno.rects:
if l.overlap_pascal(matching_rect) > matching_overlap:
better_overlap_found = True
if better_overlap_found:
continue
del idlDet[filterIndex].rects[matchingIndexes[m][0]]
idlGT[x].rects = validGTRects
#Sort out too small false positives
for x, anno in enumerate(idlDet):
imageFound = False
filterIndex = -1
for i, filterAnno in enumerate(idlGT):
if (suffixMatch(anno.imageName, filterAnno.imageName) and anno.frameNr == filterAnno.frameNr):
filterIndex = i
imageFound = True
break
if (not imageFound):
continue
validDetRects = []
for j in anno.rects:
if (j.width() >= minWidth) and (j.height() >= minHeight) and (
j.width() <= maxWidth
) and (j.height() <= maxHeight):
validDetRects.append(j)
else:
for frect in idlGT[filterIndex].rects:
if (style == 0):
if j.isMatchingStd(
frect, minCover, minOverlap, maxDistance
):
validDetRects.append(j)
if (style == 1):
if (j.isMatchingPascal(frect, minOverlap)):
validDetRects.append(j)
idlDet[x].rects = validDetRects
# MA: simplified version that does Pascal style matching with one parameter controlling "intersection-over-union" matching threshold
def comp_prec_recall(annoIDL, detIDL, minOverlap):
ignoreIDL = copy.deepcopy(annoIDL)
for anno in ignoreIDL:
anno.rects = []
precs, recalls, scores, fppi, graphs = comp_prec_recall_all_params(
annoIDL, detIDL, ignoreIDL, minOverlap=minOverlap
)
return precs, recalls, scores, fppi
def comp_prec_recall_graphs(annoIDL, detIDL, minOverlap):
ignoreIDL = copy.deepcopy(annoIDL)
for anno in ignoreIDL:
anno.rects = []
precs, recalls, scores, fppi, graphs = comp_prec_recall_all_params(
annoIDL, detIDL, ignoreIDL, minOverlap=minOverlap
)
return graphs
# MA: full version
def comp_prec_recall_all_params(
annoIDL,
detIDL,
ignoreIDL,
minWidth=0,
minHeight=0,
maxWidth=float('inf'),
maxHeight=float('inf'),
matchingStyle=1,
minCover=0.5,
minOverlap=0.5,
maxDistance=0.5,
ignoreOverlap=0.9,
verbose=False
):
# Asort detections which are too small/too big
if verbose:
print "Asorting too large/ too small detections"
print "minWidth:", minWidth
print "minHeight:", minHeight
print "maxWidth: ", maxWidth
print "maxHeight: ", maxHeight
asort(
annoIDL, detIDL, minWidth, minHeight, matchingStyle, minCover,
minOverlap, maxDistance, maxWidth, maxHeight
)
#Debugging asort
#saveIDL("testGT.idl", annoIDL)
#saveIDL("testDET.idl", detIDL)
noAnnotations = 0
for anno in annoIDL:
for j, detAnno in enumerate(detIDL):
if (suffixMatch(anno.imageName, detIDL[j].imageName) and anno.frameNr == detIDL[j].frameNr):
noAnnotations = noAnnotations + len(anno.rects)
break
if verbose:
print "#Annotations:", noAnnotations
###--- set up graphs ---###
print "Setting up graphs ..."
graphs = []
allRects = []
missingFrames = 0
for i in xrange(len(annoIDL)):
imageFound = False
filterIndex = -1
for j, detAnno in enumerate(detIDL):
if (suffixMatch(annoIDL[i].imageName, detIDL[j].imageName) and annoIDL[i].frameNr == detIDL[j].frameNr):
filterIndex = j
imageFound = True
break
if (not imageFound):
print "No annotation/detection pair found for: " + annoIDL[
i
].imageName + " frame: " + str(annoIDL[i].frameNr)
missingFrames += 1
continue
graphs.append(
AnnoGraph(
annoIDL[i], detIDL[filterIndex], ignoreIDL[i], matchingStyle,
minCover, minOverlap, maxDistance, ignoreOverlap
)
)
for j, rect in enumerate(detIDL[filterIndex]):
newRect = detAnnoRect()
newRect.imageName = anno.imageName
newRect.frameNr = anno.frameNr
newRect.rect = rect
newRect.imageIndex = i - missingFrames
newRect.boxIndex = j
allRects.append(newRect)
if verbose:
print "missingFrames: ", missingFrames
print "Number of detections on annotated frames: ", len(allRects)
###--- get scores from all rects ---###
print "Sorting scores ..."
allRects.sort(cmpDetAnnoRectsByScore)
allRects.reverse()
###--- gradually decrease score ---###
if verbose:
print "Gradually decrease score ..."
lastScore = float('infinity')
precs = [1.0]
recalls = [0.0]
#fppi = [ 10**(math.floor(math.log(1.0 / float(len(annoIDL)))/math.log(10) * 10.0) / 10.0) ]
fppi = [1.0 / float(len(annoIDL))]
scores = [lastScore]
numDet = len(allRects)
sf = lastsf = 0
cd = lastcd = 0
iflow = lastiflow = 0
changed = False
firstFP = True
for i, nextrect in enumerate(allRects):
score = nextrect.rect.score
# updating true and false positive counts
sf = sf - graphs[nextrect.imageIndex].maxflow()
cd = cd - graphs[nextrect.imageIndex].consideredDets()
iflow = iflow - graphs[nextrect.imageIndex].ignoredFlow()
#changed = changed or graphs[nextrect.imageIndex].decreaseScore(score)
changed = graphs[nextrect.imageIndex].addBB(nextrect) or changed
sf = sf + graphs[nextrect.imageIndex].maxflow()
cd = cd + graphs[nextrect.imageIndex].consideredDets()
iflow = iflow + graphs[nextrect.imageIndex].ignoredFlow()
if (firstFP and cd - sf != 0):
firstFP = False
changed = True
if (i == numDet - 1 or score != allRects[i + 1].rect.score or firstFP or i == len(allRects)):
if (changed or i == numDet - 1 or i == len(allRects)):
if (lastcd > 0):
scores.append(lastScore)
recalls.append(
float(lastsf) / float(noAnnotations - lastiflow)
)
precs.append(float(lastsf) / float(lastcd))
fppi.append(float(lastcd - lastsf) / float(len(annoIDL)))
if (cd > 0):
scores.append(score)
recalls.append(float(sf) / float(noAnnotations - iflow))
precs.append(float(sf) / float(cd))
fppi.append(float(cd - sf) / float(len(annoIDL)))
changed = False
lastScore = score
lastsf = sf
lastcd = cd
lastiflow = iflow
return precs, recalls, scores, fppi, graphs
def main():
parser = OptionParser(
usage="usage: %prog [options] <groundTruthIdl> <detectionIdl>"
)
parser.add_option(
"-o", "--outFile", action="store", type="string", dest="outFile"
)
parser.add_option(
"-a",
"--analysisFiles",
action="store",
type="string",
dest="analysisFile"
)
parser.add_option(
"-s", "--minScore", action="store", type="float", dest="minScore"
)
parser.add_option(
"-w",
"--minWidth",
action="store",
type="int",
dest="minWidth",
default=0
)
parser.add_option(
"-u",
"--minHeight",
action="store",
type="int",
dest="minHeight",
default=0
)
parser.add_option(
"--maxWidth",
action="store",
type="float",
dest="maxWidth",
default=float('inf')
)
parser.add_option(
"--maxHeight",
action="store",
type="float",
dest="maxHeight",
default=float('inf')
)
parser.add_option(
"-r",
"--fixAspectRatio",
action="store",
type="float",
dest="aspectRatio"
)
parser.add_option(
"-p", "--Pascal-Style", action="store_true", dest="pascalStyle"
)
parser.add_option(
"-l",
"--Leibe-Seemann-Matching-Style",
action="store_true",
dest="leibeStyle"
)
parser.add_option(
"--minCover",
action="store",
type="float",
dest="minCover",
default=0.5
)
parser.add_option(
"--maxDistance",
action="store",
type="float",
dest="maxDistance",
default=0.5
)
parser.add_option(
"--minOverlap",
action="store",
type="float",
dest="minOverlap",
default=0.5
)
parser.add_option(
"--clipToImageWidth",
action="store",
type="float",
dest="clipWidth",
default=None
)
parser.add_option(
"--clipToImageHeight",
action="store",
type="float",
dest="clipHeight",
default=None
)
parser.add_option(
"-d", "--dropFirst", action="store_true", dest="dropFirst"
)
#parser.add_option("-c", "--class", action="store", type="int", dest="classID", default=-1)
parser.add_option(
"-c",
"--class",
action="store",
type="int",
dest="classID",
default=None
)
parser.add_option(
"-i", "--ignore", action="store", type="string", dest="ignoreFile"
)
parser.add_option(
"--ignoreOverlap",
action="store",
type="float",
dest="ignoreOverlap",
default=0.9
)
(options, args) = parser.parse_args()
if (len(args) < 2):
print "Please specify annotation and detection as arguments!"
parser.print_help()
sys.exit(1)
annoFile = args[0]
# First figure out the minimum height and width we are dealing with
minWidth = options.minWidth
minHeight = options.minHeight
maxWidth = options.maxWidth
maxHeight = options.maxHeight
print "Minimum width: %d height: %d" % (minWidth, minHeight)
# Load files
annoIDL = parse(annoFile)
detIDL = []
for dets in args[1:]:
detIDL += parse(dets)
if options.ignoreFile != None:
ignoreIDL = parse(options.ignoreFile)
else:
ignoreIDL = copy.deepcopy(annoIDL)
for anno in ignoreIDL:
anno.rects = []
if (options.classID is not None):
for anno in annoIDL:
anno.rects = [
rect for rect in anno.rects
if (rect.classID == options.classID or rect.classID == -1)
]
for anno in detIDL:
anno.rects = [
rect for rect in anno.rects
if (rect.classID == options.classID or rect.classID == -1)
]
for anno in ignoreIDL:
anno.rects = [
rect for rect in anno.rects
if (rect.classID == options.classID or rect.classID == -1)
]
# prevent division by zero when fixing aspect ratio
for anno in annoIDL:
anno.rects = [
rect for rect in anno.rects
if rect.width() > 0 and rect.height() > 0
]
for anno in detIDL:
anno.rects = [
rect for rect in anno.rects
if rect.width() > 0 and rect.height() > 0
]
for anno in ignoreIDL:
anno.rects = [
rect for rect in anno.rects
if rect.width() > 0 and rect.height() > 0
]
# Fix aspect ratio
if (not options.aspectRatio == None):
forceAspectRatio(annoIDL, options.aspectRatio)
forceAspectRatio(detIDL, options.aspectRatio)
forceAspectRatio(ignoreIDL, options.aspectRatio)
# Deselect detections with too low score
if (not options.minScore == None):
for i, anno in enumerate(detIDL):
validRects = []
for rect in anno.rects:
if (rect.score >= options.minScore):
validRects.append(rect)
anno.rects = validRects
# Clip detections to the image dimensions
if (options.clipWidth != None or options.clipHeight != None):
min_x = -float('inf')
min_y = -float('inf')
max_x = float('inf')
max_y = float('inf')
if (options.clipWidth != None):
min_x = 0
max_x = options.clipWidth
if (options.clipHeight != None):
min_y = 0
max_y = options.clipHeight
print "Clipping width: (%.02f-%.02f); clipping height: (%.02f-%.02f)" % (
min_x, max_x, min_y, max_y
)
for anno in annoIDL:
for rect in anno:
rect.clipToImage(min_x, max_x, min_y, max_y)
for anno in detIDL:
for rect in anno:
rect.clipToImage(min_x, max_x, min_y, max_y)
# Setup matching style; standard is Pascal
# style
matchingStyle = 1
# Pascal style
if (options.pascalStyle == True):
matchingStyle = 1
if (options.leibeStyle == True):
matchingStyle = 0
if (options.pascalStyle and options.leibeStyle):
print "Conflicting matching styles!"
sys.exit(1)
if (options.dropFirst == True):
print "Drop first frame of each sequence..."
newIDL = []
for i, anno in enumerate(detIDL):
if (i > 1 and detIDL[i].frameNr == detIDL[i-1].frameNr + 1 and detIDL[i].frameNr == detIDL[i-2].frameNr + 2 and detIDL[i].frameNr == detIDL[i-3].frameNr + 3 and detIDL[i].frameNr == detIDL[i-4].frameNr + 4):
newIDL.append(anno)
detIDL = newIDL
verbose = True
precs, recalls, scores, fppi, graphs = comp_prec_recall_all_params(
annoIDL,
detIDL,
ignoreIDL,
minWidth=options.minWidth,
minHeight=options.minHeight,
maxWidth=options.maxWidth,
maxHeight=options.maxHeight,
matchingStyle=matchingStyle,
minCover=options.minCover,
minOverlap=options.minOverlap,
maxDistance=options.maxDistance,
ignoreOverlap=options.ignoreOverlap,
verbose=verbose
)
###--- output to file ---###
outfilename = options.outFile
if outfilename is None:
outputDir = os.path.dirname(os.path.abspath(args[1]))
outputFile = os.path.basename(os.path.abspath(args[1]))
[base, ext] = idlBase(outputFile)
#outfilename = outputDir + "/rpc-" + base + ".txt"
outfilename = outputDir + "/rpc-" + base + "_overlap" + str(
options.minOverlap
) + ".txt"
print "saving:\n" + outfilename
file = open(outfilename, 'w')
for i in xrange(len(precs)):
file.write(
str(precs[i]) + " " + str(recalls[i]) + " " + str(scores[i]) + " "
+ str(fppi[i]) + "\n"
)
file.close()
# Extracting failure cases
if (options.analysisFile != None):
anaPrefix = options.analysisFile
falsePositives = AnnoList([])
truePositives = AnnoList([])
missingRecall = AnnoList([])
ignoredTruePositives = AnnoList([])
for i in xrange(len(graphs)):
falsePositives.append(graphs[i].getFalsePositives())
truePositives.append(graphs[i].getTruePositives())
truePositives[-1].imageName = falsePositives[-1].imageName
truePositives[-1].imagePath = falsePositives[-1].imagePath
missingRecall.append(graphs[i].getMissingRecall())
missingRecall[-1].imageName = falsePositives[-1].imageName
missingRecall[-1].imagePath = falsePositives[-1].imagePath
if options.ignoreFile != None:
ignoredTruePositives.append(
graphs[i].getIgnoredTruePositives()
)
#saveIDL(anaPrefix + "-falsePositives.idl.gz", falsePositives);
falsePositives.save(anaPrefix + "-falsePositives.pal")
sortedFP = annoAnalyze(falsePositives)
#saveIDL(anaPrefix + "-falsePositives-sortedByScore.idl.gz", sortedFP);
#saveIDL(anaPrefix + "-truePositives.idl.gz", truePositives);
# saveIDL(anaPrefix + "-falsePositives-sortedByScore.idl", sortedFP);
# saveIDL(anaPrefix + "-truePositives.idl", truePositives);
sortedFP.save(anaPrefix + "-falsePositives-sortedByScore.pal")
truePositives.save(anaPrefix + "-truePositives.pal")
sortedFP = annoAnalyze(truePositives)
#saveIDL(anaPrefix + "-truePositives-sortedByScore.idl.gz", sortedFP);
#saveIDL(anaPrefix + "-truePositives-sortedByScore.idl", sortedFP);
sortedFP.save(anaPrefix + "-truePositives-sortedByScore.pal")
if options.ignoreFile != None:
#saveIDL(anaPrefix + "-ignoredTruePositives.idl.gz", ignoredTruePositives)
#saveIDL(anaPrefix + "-ignoredTruePositives.idl", ignoredTruePositives)
ignoredTruePositives.save(anaPrefix + "-ignoredTruePositives.pal")
#saveIDL(anaPrefix + "-missingRecall.idl.gz", missingRecall);
#saveIDL(anaPrefix + "-missingRecall.idl", missingRecall);
missingRecall.save(anaPrefix + "-missingRecall.pal")
if __name__ == "__main__":
main()
| deepfigures-open-master | vendor/tensorboxresnet/tensorboxresnet/utils/annolist/doRPC.py |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains definitions for the original form of Residual Networks.
The 'v1' residual networks (ResNets) implemented in this module were proposed
by:
[1] Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun
Deep Residual Learning for Image Recognition. arXiv:1512.03385
Other variants were introduced in:
[2] Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun
Identity Mappings in Deep Residual Networks. arXiv: 1603.05027
The networks defined in this module utilize the bottleneck building block of
[1] with projection shortcuts only for increasing depths. They employ batch
normalization *after* every weight layer. This is the architecture used by
MSRA in the Imagenet and MSCOCO 2016 competition models ResNet-101 and
ResNet-152. See [2; Fig. 1a] for a comparison between the current 'v1'
architecture and the alternative 'v2' architecture of [2] which uses batch
normalization *before* every weight layer in the so-called full pre-activation
units.
Typical use:
from tensorflow.contrib.slim.nets import resnet_v1
ResNet-101 for image classification into 1000 classes:
# inputs has shape [batch, 224, 224, 3]
with slim.arg_scope(resnet_v1.resnet_arg_scope()):
net, end_points = resnet_v1.resnet_v1_101(inputs, 1000, is_training=False)
ResNet-101 for semantic segmentation into 21 classes:
# inputs has shape [batch, 513, 513, 3]
with slim.arg_scope(resnet_v1.resnet_arg_scope()):
net, end_points = resnet_v1.resnet_v1_101(inputs,
21,
is_training=False,
global_pool=False,
output_stride=16)
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from . import resnet_utils
resnet_arg_scope = resnet_utils.resnet_arg_scope
from tensorflow.contrib import slim
@slim.add_arg_scope
def bottleneck(
inputs,
depth,
depth_bottleneck,
stride,
rate=1,
outputs_collections=None,
scope=None
):
"""Bottleneck residual unit variant with BN after convolutions.
This is the original residual unit proposed in [1]. See Fig. 1(a) of [2] for
its definition. Note that we use here the bottleneck variant which has an
extra bottleneck layer.
When putting together two consecutive ResNet blocks that use this unit, one
should use stride = 2 in the last unit of the first block.
Args:
inputs: A tensor of size [batch, height, width, channels].
depth: The depth of the ResNet unit output.
depth_bottleneck: The depth of the bottleneck layers.
stride: The ResNet unit's stride. Determines the amount of downsampling of
the units output compared to its input.
rate: An integer, rate for atrous convolution.
outputs_collections: Collection to add the ResNet unit output.
scope: Optional variable_scope.
Returns:
The ResNet unit's output.
"""
with tf.variable_scope(scope, 'bottleneck_v1', [inputs]) as sc:
depth_in = slim.utils.last_dimension(inputs.get_shape(), min_rank=4)
if depth == depth_in:
shortcut = resnet_utils.subsample(inputs, stride, 'shortcut')
else:
shortcut = slim.conv2d(
inputs,
depth, [1, 1],
stride=stride,
activation_fn=None,
scope='shortcut'
)
residual = slim.conv2d(
inputs, depth_bottleneck, [1, 1], stride=1, scope='conv1'
)
residual = resnet_utils.conv2d_same(
residual, depth_bottleneck, 3, stride, rate=rate, scope='conv2'
)
residual = slim.conv2d(
residual,
depth, [1, 1],
stride=1,
activation_fn=None,
scope='conv3'
)
output = tf.nn.relu(shortcut + residual)
return slim.utils.collect_named_outputs(
outputs_collections, sc.original_name_scope, output
)
def resnet_v1(
inputs,
blocks,
num_classes=None,
is_training=True,
global_pool=True,
output_stride=None,
include_root_block=True,
reuse=None,
scope=None
):
"""Generator for v1 ResNet models.
This function generates a family of ResNet v1 models. See the resnet_v1_*()
methods for specific model instantiations, obtained by selecting different
block instantiations that produce ResNets of various depths.
Training for image classification on Imagenet is usually done with [224, 224]
inputs, resulting in [7, 7] feature maps at the output of the last ResNet
block for the ResNets defined in [1] that have nominal stride equal to 32.
However, for dense prediction tasks we advise that one uses inputs with
spatial dimensions that are multiples of 32 plus 1, e.g., [321, 321]. In
this case the feature maps at the ResNet output will have spatial shape
[(height - 1) / output_stride + 1, (width - 1) / output_stride + 1]
and corners exactly aligned with the input image corners, which greatly
facilitates alignment of the features to the image. Using as input [225, 225]
images results in [8, 8] feature maps at the output of the last ResNet block.
For dense prediction tasks, the ResNet needs to run in fully-convolutional
(FCN) mode and global_pool needs to be set to False. The ResNets in [1, 2] all
have nominal stride equal to 32 and a good choice in FCN mode is to use
output_stride=16 in order to increase the density of the computed features at
small computational and memory overhead, cf. http://arxiv.org/abs/1606.00915.
Args:
inputs: A tensor of size [batch, height_in, width_in, channels].
blocks: A list of length equal to the number of ResNet blocks. Each element
is a resnet_utils.Block object describing the units in the block.
num_classes: Number of predicted classes for classification tasks. If None
we return the features before the logit layer.
is_training: whether is training or not.
global_pool: If True, we perform global average pooling before computing the
logits. Set to True for image classification, False for dense prediction.
output_stride: If None, then the output will be computed at the nominal
network stride. If output_stride is not None, it specifies the requested
ratio of input to output spatial resolution.
include_root_block: If True, include the initial convolution followed by
max-pooling, if False excludes it.
reuse: whether or not the network and its variables should be reused. To be
able to reuse 'scope' must be given.
scope: Optional variable_scope.
Returns:
net: A rank-4 tensor of size [batch, height_out, width_out, channels_out].
If global_pool is False, then height_out and width_out are reduced by a
factor of output_stride compared to the respective height_in and width_in,
else both height_out and width_out equal one. If num_classes is None, then
net is the output of the last ResNet block, potentially after global
average pooling. If num_classes is not None, net contains the pre-softmax
activations.
end_points: A dictionary from components of the network to the corresponding
activation.
Raises:
ValueError: If the target output_stride is not valid.
"""
with tf.variable_scope(scope, 'resnet_v1', [inputs], reuse=reuse) as sc:
end_points_collection = sc.name + '_end_points'
with slim.arg_scope(
[slim.conv2d, bottleneck, resnet_utils.stack_blocks_dense],
outputs_collections=end_points_collection
):
with slim.arg_scope([slim.batch_norm], is_training=is_training):
net = inputs
if include_root_block:
if output_stride is not None:
if output_stride % 4 != 0:
raise ValueError(
'The output_stride needs to be a multiple of 4.'
)
output_stride /= 4
net = resnet_utils.conv2d_same(
net, 64, 7, stride=2, scope='conv1'
)
net = slim.max_pool2d(net, [3, 3], stride=2, scope='pool1')
net = resnet_utils.stack_blocks_dense(
net, blocks, output_stride
)
if global_pool:
# Global average pooling.
net = tf.reduce_mean(
net, [1, 2], name='pool5', keep_dims=True
)
if num_classes is not None:
net = slim.conv2d(
net,
num_classes, [1, 1],
activation_fn=None,
normalizer_fn=None,
scope='logits'
)
# Convert end_points_collection into a dictionary of end_points.
end_points = slim.utils.convert_collection_to_dict(
end_points_collection
)
if num_classes is not None:
end_points['predictions'] = slim.softmax(
net, scope='predictions'
)
return net, end_points
resnet_v1.default_image_size = 224
def resnet_v1_50(
inputs,
num_classes=None,
is_training=True,
global_pool=True,
output_stride=None,
reuse=None,
scope='resnet_v1_50'
):
"""ResNet-50 model of [1]. See resnet_v1() for arg and return description."""
blocks = [
resnet_utils.Block(
'block1', bottleneck, [(256, 64, 1)] * 2 + [(256, 64, 2)]
),
resnet_utils.Block(
'block2', bottleneck, [(512, 128, 1)] * 3 + [(512, 128, 2)]
),
resnet_utils.Block(
'block3', bottleneck, [(1024, 256, 1)] * 5 + [(1024, 256, 2)]
),
resnet_utils.Block('block4', bottleneck, [(2048, 512, 1)] * 3)
]
return resnet_v1(
inputs,
blocks,
num_classes,
is_training,
global_pool=global_pool,
output_stride=output_stride,
include_root_block=True,
reuse=reuse,
scope=scope
)
def resnet_v1_101(
inputs,
num_classes=None,
is_training=True,
global_pool=True,
output_stride=None,
reuse=None,
scope='resnet_v1_101'
):
"""ResNet-101 model of [1]. See resnet_v1() for arg and return description."""
blocks = [
resnet_utils.Block(
'block1', bottleneck, [(256, 64, 1)] * 2 + [(256, 64, 2)]
),
resnet_utils.Block(
'block2', bottleneck, [(512, 128, 1)] * 3 + [(512, 128, 2)]
),
resnet_utils.Block(
'block3', bottleneck, [(1024, 256, 1)] * 22 + [(1024, 256, 2)]
),
resnet_utils.Block('block4', bottleneck, [(2048, 512, 1)] * 3)
]
return resnet_v1(
inputs,
blocks,
num_classes,
is_training,
global_pool=global_pool,
output_stride=output_stride,
include_root_block=True,
reuse=reuse,
scope=scope
)
def resnet_v1_152(
inputs,
num_classes=None,
is_training=True,
global_pool=True,
output_stride=None,
reuse=None,
scope='resnet_v1_152'
):
"""ResNet-152 model of [1]. See resnet_v1() for arg and return description."""
blocks = [
resnet_utils.Block(
'block1', bottleneck, [(256, 64, 1)] * 2 + [(256, 64, 2)]
),
resnet_utils.Block(
'block2', bottleneck, [(512, 128, 1)] * 7 + [(512, 128, 2)]
),
resnet_utils.Block(
'block3', bottleneck, [(1024, 256, 1)] * 35 + [(1024, 256, 2)]
),
resnet_utils.Block('block4', bottleneck, [(2048, 512, 1)] * 3)
]
return resnet_v1(
inputs,
blocks,
num_classes,
is_training,
global_pool=global_pool,
output_stride=output_stride,
include_root_block=True,
reuse=reuse,
scope=scope
)
def resnet_v1_200(
inputs,
num_classes=None,
is_training=True,
global_pool=True,
output_stride=None,
reuse=None,
scope='resnet_v1_200'
):
"""ResNet-200 model of [2]. See resnet_v1() for arg and return description."""
blocks = [
resnet_utils.Block(
'block1', bottleneck, [(256, 64, 1)] * 2 + [(256, 64, 2)]
),
resnet_utils.Block(
'block2', bottleneck, [(512, 128, 1)] * 23 + [(512, 128, 2)]
),
resnet_utils.Block(
'block3', bottleneck, [(1024, 256, 1)] * 35 + [(1024, 256, 2)]
),
resnet_utils.Block('block4', bottleneck, [(2048, 512, 1)] * 3)
]
return resnet_v1(
inputs,
blocks,
num_classes,
is_training,
global_pool=global_pool,
output_stride=output_stride,
include_root_block=True,
reuse=reuse,
scope=scope
)
| deepfigures-open-master | vendor/tensorboxresnet/tensorboxresnet/utils/slim_nets/resnet_v1.py |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains the definition for inception v1 classification network."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
slim = tf.contrib.slim
trunc_normal = lambda stddev: tf.truncated_normal_initializer(0.0, stddev)
from tensorboxresnet.utils import tf_concat
def inception_v1_base(inputs, final_endpoint='Mixed_5c', scope='InceptionV1'):
"""Defines the Inception V1 base architecture.
This architecture is defined in:
Going deeper with convolutions
Christian Szegedy, Wei Liu, Yangqing Jia, Pierre Sermanet, Scott Reed,
Dragomir Anguelov, Dumitru Erhan, Vincent Vanhoucke, Andrew Rabinovich.
http://arxiv.org/pdf/1409.4842v1.pdf.
Args:
inputs: a tensor of size [batch_size, height, width, channels].
final_endpoint: specifies the endpoint to construct the network up to. It
can be one of ['Conv2d_1a_7x7', 'MaxPool_2a_3x3', 'Conv2d_2b_1x1',
'Conv2d_2c_3x3', 'MaxPool_3a_3x3', 'Mixed_3b', 'Mixed_3c',
'MaxPool_4a_3x3', 'Mixed_4b', 'Mixed_4c', 'Mixed_4d', 'Mixed_4e',
'Mixed_4f', 'MaxPool_5a_2x2', 'Mixed_5b', 'Mixed_5c']
scope: Optional variable_scope.
Returns:
A dictionary from components of the network to the corresponding activation.
Raises:
ValueError: if final_endpoint is not set to one of the predefined values.
"""
end_points = {}
with tf.variable_scope(scope, 'InceptionV1', [inputs]):
with slim.arg_scope(
[slim.conv2d, slim.fully_connected],
weights_initializer=trunc_normal(0.01)
):
with slim.arg_scope(
[slim.conv2d, slim.max_pool2d], stride=1, padding='SAME'
):
end_point = 'Conv2d_1a_7x7'
net = slim.conv2d(
inputs, 64, [7, 7], stride=2, scope=end_point
)
end_points[end_point] = net
if final_endpoint == end_point: return net, end_points
end_point = 'MaxPool_2a_3x3'
net = slim.max_pool2d(net, [3, 3], stride=2, scope=end_point)
end_points[end_point] = net
if final_endpoint == end_point: return net, end_points
end_point = 'Conv2d_2b_1x1'
net = slim.conv2d(net, 64, [1, 1], scope=end_point)
end_points[end_point] = net
if final_endpoint == end_point: return net, end_points
end_point = 'Conv2d_2c_3x3'
net = slim.conv2d(net, 192, [3, 3], scope=end_point)
end_points[end_point] = net
if final_endpoint == end_point: return net, end_points
end_point = 'MaxPool_3a_3x3'
net = slim.max_pool2d(net, [3, 3], stride=2, scope=end_point)
end_points[end_point] = net
if final_endpoint == end_point: return net, end_points
end_point = 'Mixed_3b'
with tf.variable_scope(end_point):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(
net, 64, [1, 1], scope='Conv2d_0a_1x1'
)
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(
net, 96, [1, 1], scope='Conv2d_0a_1x1'
)
branch_1 = slim.conv2d(
branch_1, 128, [3, 3], scope='Conv2d_0b_3x3'
)
with tf.variable_scope('Branch_2'):
branch_2 = slim.conv2d(
net, 16, [1, 1], scope='Conv2d_0a_1x1'
)
branch_2 = slim.conv2d(
branch_2, 32, [3, 3], scope='Conv2d_0b_3x3'
)
with tf.variable_scope('Branch_3'):
branch_3 = slim.max_pool2d(
net, [3, 3], scope='MaxPool_0a_3x3'
)
branch_3 = slim.conv2d(
branch_3, 32, [1, 1], scope='Conv2d_0b_1x1'
)
net = tf_concat(
3, [branch_0, branch_1, branch_2, branch_3]
)
end_points[end_point] = net
if final_endpoint == end_point: return net, end_points
end_point = 'Mixed_3c'
with tf.variable_scope(end_point):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(
net, 128, [1, 1], scope='Conv2d_0a_1x1'
)
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(
net, 128, [1, 1], scope='Conv2d_0a_1x1'
)
branch_1 = slim.conv2d(
branch_1, 192, [3, 3], scope='Conv2d_0b_3x3'
)
with tf.variable_scope('Branch_2'):
branch_2 = slim.conv2d(
net, 32, [1, 1], scope='Conv2d_0a_1x1'
)
branch_2 = slim.conv2d(
branch_2, 96, [3, 3], scope='Conv2d_0b_3x3'
)
with tf.variable_scope('Branch_3'):
branch_3 = slim.max_pool2d(
net, [3, 3], scope='MaxPool_0a_3x3'
)
branch_3 = slim.conv2d(
branch_3, 64, [1, 1], scope='Conv2d_0b_1x1'
)
net = tf_concat(
3, [branch_0, branch_1, branch_2, branch_3]
)
end_points[end_point] = net
if final_endpoint == end_point: return net, end_points
end_point = 'MaxPool_4a_3x3'
net = slim.max_pool2d(net, [3, 3], stride=2, scope=end_point)
end_points[end_point] = net
if final_endpoint == end_point: return net, end_points
end_point = 'Mixed_4b'
with tf.variable_scope(end_point):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(
net, 192, [1, 1], scope='Conv2d_0a_1x1'
)
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(
net, 96, [1, 1], scope='Conv2d_0a_1x1'
)
branch_1 = slim.conv2d(
branch_1, 208, [3, 3], scope='Conv2d_0b_3x3'
)
with tf.variable_scope('Branch_2'):
branch_2 = slim.conv2d(
net, 16, [1, 1], scope='Conv2d_0a_1x1'
)
branch_2 = slim.conv2d(
branch_2, 48, [3, 3], scope='Conv2d_0b_3x3'
)
with tf.variable_scope('Branch_3'):
branch_3 = slim.max_pool2d(
net, [3, 3], scope='MaxPool_0a_3x3'
)
branch_3 = slim.conv2d(
branch_3, 64, [1, 1], scope='Conv2d_0b_1x1'
)
net = tf_concat(
3, [branch_0, branch_1, branch_2, branch_3]
)
end_points[end_point] = net
if final_endpoint == end_point: return net, end_points
end_point = 'Mixed_4c'
with tf.variable_scope(end_point):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(
net, 160, [1, 1], scope='Conv2d_0a_1x1'
)
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(
net, 112, [1, 1], scope='Conv2d_0a_1x1'
)
branch_1 = slim.conv2d(
branch_1, 224, [3, 3], scope='Conv2d_0b_3x3'
)
with tf.variable_scope('Branch_2'):
branch_2 = slim.conv2d(
net, 24, [1, 1], scope='Conv2d_0a_1x1'
)
branch_2 = slim.conv2d(
branch_2, 64, [3, 3], scope='Conv2d_0b_3x3'
)
with tf.variable_scope('Branch_3'):
branch_3 = slim.max_pool2d(
net, [3, 3], scope='MaxPool_0a_3x3'
)
branch_3 = slim.conv2d(
branch_3, 64, [1, 1], scope='Conv2d_0b_1x1'
)
net = tf_concat(
3, [branch_0, branch_1, branch_2, branch_3]
)
end_points[end_point] = net
if final_endpoint == end_point: return net, end_points
end_point = 'Mixed_4d'
with tf.variable_scope(end_point):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(
net, 128, [1, 1], scope='Conv2d_0a_1x1'
)
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(
net, 128, [1, 1], scope='Conv2d_0a_1x1'
)
branch_1 = slim.conv2d(
branch_1, 256, [3, 3], scope='Conv2d_0b_3x3'
)
with tf.variable_scope('Branch_2'):
branch_2 = slim.conv2d(
net, 24, [1, 1], scope='Conv2d_0a_1x1'
)
branch_2 = slim.conv2d(
branch_2, 64, [3, 3], scope='Conv2d_0b_3x3'
)
with tf.variable_scope('Branch_3'):
branch_3 = slim.max_pool2d(
net, [3, 3], scope='MaxPool_0a_3x3'
)
branch_3 = slim.conv2d(
branch_3, 64, [1, 1], scope='Conv2d_0b_1x1'
)
net = tf_concat(
3, [branch_0, branch_1, branch_2, branch_3]
)
end_points[end_point] = net
if final_endpoint == end_point: return net, end_points
end_point = 'Mixed_4e'
with tf.variable_scope(end_point):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(
net, 112, [1, 1], scope='Conv2d_0a_1x1'
)
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(
net, 144, [1, 1], scope='Conv2d_0a_1x1'
)
branch_1 = slim.conv2d(
branch_1, 288, [3, 3], scope='Conv2d_0b_3x3'
)
with tf.variable_scope('Branch_2'):
branch_2 = slim.conv2d(
net, 32, [1, 1], scope='Conv2d_0a_1x1'
)
branch_2 = slim.conv2d(
branch_2, 64, [3, 3], scope='Conv2d_0b_3x3'
)
with tf.variable_scope('Branch_3'):
branch_3 = slim.max_pool2d(
net, [3, 3], scope='MaxPool_0a_3x3'
)
branch_3 = slim.conv2d(
branch_3, 64, [1, 1], scope='Conv2d_0b_1x1'
)
net = tf_concat(
3, [branch_0, branch_1, branch_2, branch_3]
)
end_points[end_point] = net
if final_endpoint == end_point: return net, end_points
end_point = 'Mixed_4f'
with tf.variable_scope(end_point):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(
net, 256, [1, 1], scope='Conv2d_0a_1x1'
)
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(
net, 160, [1, 1], scope='Conv2d_0a_1x1'
)
branch_1 = slim.conv2d(
branch_1, 320, [3, 3], scope='Conv2d_0b_3x3'
)
with tf.variable_scope('Branch_2'):
branch_2 = slim.conv2d(
net, 32, [1, 1], scope='Conv2d_0a_1x1'
)
branch_2 = slim.conv2d(
branch_2, 128, [3, 3], scope='Conv2d_0b_3x3'
)
with tf.variable_scope('Branch_3'):
branch_3 = slim.max_pool2d(
net, [3, 3], scope='MaxPool_0a_3x3'
)
branch_3 = slim.conv2d(
branch_3, 128, [1, 1], scope='Conv2d_0b_1x1'
)
net = tf_concat(
3, [branch_0, branch_1, branch_2, branch_3]
)
end_points[end_point] = net
if final_endpoint == end_point: return net, end_points
end_point = 'MaxPool_5a_2x2'
net = slim.max_pool2d(net, [2, 2], stride=2, scope=end_point)
end_points[end_point] = net
if final_endpoint == end_point: return net, end_points
end_point = 'Mixed_5b'
with tf.variable_scope(end_point):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(
net, 256, [1, 1], scope='Conv2d_0a_1x1'
)
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(
net, 160, [1, 1], scope='Conv2d_0a_1x1'
)
branch_1 = slim.conv2d(
branch_1, 320, [3, 3], scope='Conv2d_0b_3x3'
)
with tf.variable_scope('Branch_2'):
branch_2 = slim.conv2d(
net, 32, [1, 1], scope='Conv2d_0a_1x1'
)
branch_2 = slim.conv2d(
branch_2, 128, [3, 3], scope='Conv2d_0a_3x3'
)
with tf.variable_scope('Branch_3'):
branch_3 = slim.max_pool2d(
net, [3, 3], scope='MaxPool_0a_3x3'
)
branch_3 = slim.conv2d(
branch_3, 128, [1, 1], scope='Conv2d_0b_1x1'
)
net = tf_concat(
3, [branch_0, branch_1, branch_2, branch_3]
)
end_points[end_point] = net
if final_endpoint == end_point: return net, end_points
end_point = 'Mixed_5c'
with tf.variable_scope(end_point):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(
net, 384, [1, 1], scope='Conv2d_0a_1x1'
)
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(
net, 192, [1, 1], scope='Conv2d_0a_1x1'
)
branch_1 = slim.conv2d(
branch_1, 384, [3, 3], scope='Conv2d_0b_3x3'
)
with tf.variable_scope('Branch_2'):
branch_2 = slim.conv2d(
net, 48, [1, 1], scope='Conv2d_0a_1x1'
)
branch_2 = slim.conv2d(
branch_2, 128, [3, 3], scope='Conv2d_0b_3x3'
)
with tf.variable_scope('Branch_3'):
branch_3 = slim.max_pool2d(
net, [3, 3], scope='MaxPool_0a_3x3'
)
branch_3 = slim.conv2d(
branch_3, 128, [1, 1], scope='Conv2d_0b_1x1'
)
net = tf_concat(
3, [branch_0, branch_1, branch_2, branch_3]
)
end_points[end_point] = net
if final_endpoint == end_point: return net, end_points
raise ValueError('Unknown final endpoint %s' % final_endpoint)
def inception_v1(
inputs,
num_classes=1000,
is_training=True,
dropout_keep_prob=0.8,
prediction_fn=slim.softmax,
spatial_squeeze=True,
reuse=None,
scope='InceptionV1'
):
"""Defines the Inception V1 architecture.
This architecture is defined in:
Going deeper with convolutions
Christian Szegedy, Wei Liu, Yangqing Jia, Pierre Sermanet, Scott Reed,
Dragomir Anguelov, Dumitru Erhan, Vincent Vanhoucke, Andrew Rabinovich.
http://arxiv.org/pdf/1409.4842v1.pdf.
The default image size used to train this network is 224x224.
Args:
inputs: a tensor of size [batch_size, height, width, channels].
num_classes: number of predicted classes.
is_training: whether is training or not.
dropout_keep_prob: the percentage of activation values that are retained.
prediction_fn: a function to get predictions out of logits.
spatial_squeeze: if True, logits is of shape is [B, C], if false logits is
of shape [B, 1, 1, C], where B is batch_size and C is number of classes.
reuse: whether or not the network and its variables should be reused. To be
able to reuse 'scope' must be given.
scope: Optional variable_scope.
Returns:
logits: the pre-softmax activations, a tensor of size
[batch_size, num_classes]
end_points: a dictionary from components of the network to the corresponding
activation.
"""
# Final pooling and prediction
with tf.variable_scope(
scope, 'InceptionV1', [inputs, num_classes], reuse=reuse
) as scope:
with slim.arg_scope(
[slim.batch_norm, slim.dropout], is_training=is_training
):
net, end_points = inception_v1_base(inputs, scope=scope)
with tf.variable_scope('Logits'):
net = slim.avg_pool2d(
net, [7, 7], stride=1, scope='MaxPool_0a_7x7'
)
net = slim.dropout(net, dropout_keep_prob, scope='Dropout_0b')
logits = slim.conv2d(
net,
num_classes, [1, 1],
activation_fn=None,
normalizer_fn=None,
scope='Conv2d_0c_1x1'
)
if spatial_squeeze:
logits = tf.squeeze(logits, [1, 2], name='SpatialSqueeze')
end_points['Logits'] = logits
end_points['Predictions'] = prediction_fn(
logits, scope='Predictions'
)
return logits, end_points
inception_v1.default_image_size = 224
def inception_v1_arg_scope(
weight_decay=0.00004,
use_batch_norm=True,
batch_norm_var_collection='moving_vars'
):
"""Defines the default InceptionV1 arg scope.
Note: Althougth the original paper didn't use batch_norm we found it useful.
Args:
weight_decay: The weight decay to use for regularizing the model.
use_batch_norm: "If `True`, batch_norm is applied after each convolution.
batch_norm_var_collection: The name of the collection for the batch norm
variables.
Returns:
An `arg_scope` to use for the inception v3 model.
"""
batch_norm_params = {
# Decay for the moving averages.
'decay': 0.9997,
# epsilon to prevent 0s in variance.
'epsilon': 0.001,
# collection containing update_ops.
'updates_collections': tf.GraphKeys.UPDATE_OPS,
# collection containing the moving mean and moving variance.
'variables_collections':
{
'beta': None,
'gamma': None,
'moving_mean': [batch_norm_var_collection],
'moving_variance': [batch_norm_var_collection],
}
}
if use_batch_norm:
normalizer_fn = slim.batch_norm
normalizer_params = batch_norm_params
else:
normalizer_fn = None
normalizer_params = {}
# Set weight_decay for weights in Conv and FC layers.
with slim.arg_scope(
[slim.conv2d, slim.fully_connected],
weights_regularizer=slim.l2_regularizer(weight_decay)
):
with slim.arg_scope(
[slim.conv2d],
weights_initializer=slim.variance_scaling_initializer(),
activation_fn=tf.nn.relu,
normalizer_fn=normalizer_fn,
normalizer_params=normalizer_params
) as sc:
return sc
| deepfigures-open-master | vendor/tensorboxresnet/tensorboxresnet/utils/slim_nets/inception_v1.py |
deepfigures-open-master | vendor/tensorboxresnet/tensorboxresnet/utils/slim_nets/__init__.py |
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains building blocks for various versions of Residual Networks.
Residual networks (ResNets) were proposed in:
Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun
Deep Residual Learning for Image Recognition. arXiv:1512.03385, 2015
More variants were introduced in:
Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun
Identity Mappings in Deep Residual Networks. arXiv: 1603.05027, 2016
We can obtain different ResNet variants by changing the network depth, width,
and form of residual unit. This module implements the infrastructure for
building them. Concrete ResNet units and full ResNet networks are implemented in
the accompanying resnet_v1.py and resnet_v2.py modules.
Compared to https://github.com/KaimingHe/deep-residual-networks, in the current
implementation we subsample the output activations in the last residual unit of
each block, instead of subsampling the input activations in the first residual
unit of each block. The two implementations give identical results but our
implementation is more memory efficient.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
from tensorflow.contrib import layers as layers_lib
from tensorflow.contrib.framework.python.ops import add_arg_scope
from tensorflow.contrib.framework.python.ops import arg_scope
from tensorflow.contrib.layers.python.layers import initializers
from tensorflow.contrib.layers.python.layers import layers
from tensorflow.contrib.layers.python.layers import regularizers
from tensorflow.contrib.layers.python.layers import utils
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import variable_scope
class Block(collections.namedtuple('Block', ['scope', 'unit_fn', 'args'])):
"""A named tuple describing a ResNet block.
Its parts are:
scope: The scope of the `Block`.
unit_fn: The ResNet unit function which takes as input a `Tensor` and
returns another `Tensor` with the output of the ResNet unit.
args: A list of length equal to the number of units in the `Block`. The list
contains one (depth, depth_bottleneck, stride) tuple for each unit in the
block to serve as argument to unit_fn.
"""
def subsample(inputs, factor, scope=None):
"""Subsamples the input along the spatial dimensions.
Args:
inputs: A `Tensor` of size [batch, height_in, width_in, channels].
factor: The subsampling factor.
scope: Optional variable_scope.
Returns:
output: A `Tensor` of size [batch, height_out, width_out, channels] with the
input, either intact (if factor == 1) or subsampled (if factor > 1).
"""
if factor == 1:
return inputs
else:
return layers.max_pool2d(inputs, [1, 1], stride=factor, scope=scope)
def conv2d_same(inputs, num_outputs, kernel_size, stride, rate=1, scope=None):
"""Strided 2-D convolution with 'SAME' padding.
When stride > 1, then we do explicit zero-padding, followed by conv2d with
'VALID' padding.
Note that
net = conv2d_same(inputs, num_outputs, 3, stride=stride)
is equivalent to
net = tf.contrib.layers.conv2d(inputs, num_outputs, 3, stride=1,
padding='SAME')
net = subsample(net, factor=stride)
whereas
net = tf.contrib.layers.conv2d(inputs, num_outputs, 3, stride=stride,
padding='SAME')
is different when the input's height or width is even, which is why we add the
current function. For more details, see ResnetUtilsTest.testConv2DSameEven().
Args:
inputs: A 4-D tensor of size [batch, height_in, width_in, channels].
num_outputs: An integer, the number of output filters.
kernel_size: An int with the kernel_size of the filters.
stride: An integer, the output stride.
rate: An integer, rate for atrous convolution.
scope: Scope.
Returns:
output: A 4-D tensor of size [batch, height_out, width_out, channels] with
the convolution output.
"""
if stride == 1:
return layers_lib.conv2d(
inputs,
num_outputs,
kernel_size,
stride=1,
rate=rate,
padding='SAME',
scope=scope
)
else:
kernel_size_effective = kernel_size + (kernel_size - 1) * (rate - 1)
pad_total = kernel_size_effective - 1
pad_beg = pad_total // 2
pad_end = pad_total - pad_beg
inputs = array_ops.pad(
inputs, [[0, 0], [pad_beg, pad_end], [pad_beg, pad_end], [0, 0]]
)
return layers_lib.conv2d(
inputs,
num_outputs,
kernel_size,
stride=stride,
rate=rate,
padding='VALID',
scope=scope
)
@add_arg_scope
def stack_blocks_dense(
net, blocks, output_stride=None, outputs_collections=None
):
"""Stacks ResNet `Blocks` and controls output feature density.
First, this function creates scopes for the ResNet in the form of
'block_name/unit_1', 'block_name/unit_2', etc.
Second, this function allows the user to explicitly control the ResNet
output_stride, which is the ratio of the input to output spatial resolution.
This is useful for dense prediction tasks such as semantic segmentation or
object detection.
Most ResNets consist of 4 ResNet blocks and subsample the activations by a
factor of 2 when transitioning between consecutive ResNet blocks. This results
to a nominal ResNet output_stride equal to 8. If we set the output_stride to
half the nominal network stride (e.g., output_stride=4), then we compute
responses twice.
Control of the output feature density is implemented by atrous convolution.
Args:
net: A `Tensor` of size [batch, height, width, channels].
blocks: A list of length equal to the number of ResNet `Blocks`. Each
element is a ResNet `Block` object describing the units in the `Block`.
output_stride: If `None`, then the output will be computed at the nominal
network stride. If output_stride is not `None`, it specifies the requested
ratio of input to output spatial resolution, which needs to be equal to
the product of unit strides from the start up to some level of the ResNet.
For example, if the ResNet employs units with strides 1, 2, 1, 3, 4, 1,
then valid values for the output_stride are 1, 2, 6, 24 or None (which
is equivalent to output_stride=24).
outputs_collections: Collection to add the ResNet block outputs.
Returns:
net: Output tensor with stride equal to the specified output_stride.
Raises:
ValueError: If the target output_stride is not valid.
"""
# The current_stride variable keeps track of the effective stride of the
# activations. This allows us to invoke atrous convolution whenever applying
# the next residual unit would result in the activations having stride larger
# than the target output_stride.
current_stride = 1
# The atrous convolution rate parameter.
rate = 1
for block in blocks:
with variable_scope.variable_scope(block.scope, 'block', [net]) as sc:
for i, unit in enumerate(block.args):
if output_stride is not None and current_stride > output_stride:
raise ValueError(
'The target output_stride cannot be reached.'
)
with variable_scope.variable_scope(
'unit_%d' % (i + 1), values=[net]
):
unit_depth, unit_depth_bottleneck, unit_stride = unit
# If we have reached the target output_stride, then we need to employ
# atrous convolution with stride=1 and multiply the atrous rate by the
# current unit's stride for use in subsequent layers.
if output_stride is not None and current_stride == output_stride:
net = block.unit_fn(
net,
depth=unit_depth,
depth_bottleneck=unit_depth_bottleneck,
stride=1,
rate=rate
)
rate *= unit_stride
else:
net = block.unit_fn(
net,
depth=unit_depth,
depth_bottleneck=unit_depth_bottleneck,
stride=unit_stride,
rate=1
)
current_stride *= unit_stride
net = utils.collect_named_outputs(
outputs_collections, sc.name, net
)
if output_stride is not None and current_stride != output_stride:
raise ValueError('The target output_stride cannot be reached.')
return net
def resnet_arg_scope(
is_training=True,
weight_decay=0.0001,
batch_norm_decay=0.997,
batch_norm_epsilon=1e-5,
batch_norm_scale=True
):
"""Defines the default ResNet arg scope.
TODO(gpapan): The batch-normalization related default values above are
appropriate for use in conjunction with the reference ResNet models
released at https://github.com/KaimingHe/deep-residual-networks. When
training ResNets from scratch, they might need to be tuned.
Args:
is_training: Whether or not we are training the parameters in the batch
normalization layers of the model.
weight_decay: The weight decay to use for regularizing the model.
batch_norm_decay: The moving average decay when estimating layer activation
statistics in batch normalization.
batch_norm_epsilon: Small constant to prevent division by zero when
normalizing activations by their variance in batch normalization.
batch_norm_scale: If True, uses an explicit `gamma` multiplier to scale the
activations in the batch normalization layer.
Returns:
An `arg_scope` to use for the resnet models.
"""
batch_norm_params = {
'is_training': is_training,
'decay': batch_norm_decay,
'epsilon': batch_norm_epsilon,
'scale': batch_norm_scale,
'updates_collections': ops.GraphKeys.UPDATE_OPS,
}
with arg_scope(
[layers_lib.conv2d],
weights_regularizer=regularizers.l2_regularizer(weight_decay),
weights_initializer=initializers.variance_scaling_initializer(),
activation_fn=nn_ops.relu,
normalizer_fn=layers.batch_norm,
normalizer_params=batch_norm_params
):
with arg_scope([layers.batch_norm], **batch_norm_params):
# The following implies padding='SAME' for pool1, which makes feature
# alignment easier for dense prediction tasks. This is also used in
# https://github.com/facebook/fb.resnet.torch. However the accompanying
# code of 'Deep Residual Learning for Image Recognition' uses
# padding='VALID' for pool1. You can switch to that choice by setting
# tf.contrib.framework.arg_scope([tf.contrib.layers.max_pool2d], padding='VALID').
with arg_scope([layers.max_pool2d], padding='SAME') as arg_sc:
return arg_sc
| deepfigures-open-master | vendor/tensorboxresnet/tensorboxresnet/utils/slim_nets/resnet_utils.py |
from setuptools import setup, find_packages
setup(
name="allennlp_beaker",
version="0.0.1",
description=(
"An interactive AllenNLP plugin for submitting training jobs to beaker"
),
long_description=open("README.md").read(),
long_description_content_type="text/markdown",
classifiers=[
"Intended Audience :: Science/Research",
"Development Status :: 3 - Alpha",
"License :: OSI Approved :: Apache Software License",
"Programming Language :: Python :: 3.6",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
],
keywords="allennlp NLP beaker",
url="https://github.com/allenai/allennlp-beaker",
author="Allen Institute for Artificial Intelligence",
author_email="[email protected]",
license="Apache",
packages=find_packages(exclude=["*.tests", "*.tests.*", "tests.*", "tests"],),
install_requires=["allennlp", "click", "PyYAML", "click-spinner"],
entry_points={"console_scripts": ["allennlp-beaker=allennlp_beaker.__main__:run"]},
python_requires=">=3.6.1",
)
| allennlp-beaker-master | setup.py |
allennlp-beaker-master | allennlp_beaker/__init__.py |
|
from collections import deque
from datetime import date
import os
import shutil
import subprocess
from tempfile import TemporaryDirectory
from typing import Any, Dict, List, Iterable, Optional, Tuple
import uuid
from allennlp.common.file_utils import cached_path
from allennlp.common.params import Params
import click
import click_spinner
import yaml
DEFAULT_CLUSTER = "ai2/on-prem-ai2-server2"
DOCKERFILE = """
FROM python:3.7
ENV LC_ALL=C.UTF-8
ENV LANG=C.UTF-8
ENV LD_LIBRARY_PATH /usr/local/nvidia/lib:/usr/local/nvidia/lib64
# Tell nvidia-docker the driver spec that we need as well as to
# use all available devices, which are mounted at /usr/local/nvidia.
# The LABEL supports an older version of nvidia-docker, the env
# variables a newer one.
ENV NVIDIA_VISIBLE_DEVICES all
ENV NVIDIA_DRIVER_CAPABILITIES compute,utility
LABEL com.nvidia.volumes.needed="nvidia_driver"
WORKDIR /stage/allennlp
ENTRYPOINT ["allennlp"]
ARG ALLENNLP
RUN pip install --no-cache-dir ${ALLENNLP}
COPY . .
"""
DOCKERFILE_EXTRA_STEPS = """
# Ensure allennlp isn't re-installed when we install allennlp-models.
ENV ALLENNLP_VERSION_OVERRIDE allennlp
# To be compatible with older versions of allennlp-models.
ENV IGNORE_ALLENNLP_IN_SETUP true
# Disable parallelism in tokenizers because it doesn't help, and sometimes hurts.
ENV TOKENIZERS_PARALLELISM 0
ARG PACKAGES
RUN pip install --no-cache-dir ${PACKAGES}
"""
def echo_command_output(cmd: List[str]) -> None:
for line in shell_out_command(cmd):
click.echo(line, nl=True)
def shell_out_command(cmd: List[str]) -> Iterable[str]:
try:
child = subprocess.run(
cmd,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
universal_newlines=True,
check=True,
)
for line in child.stdout.split("\n"):
line = line.rstrip()
if line.strip():
yield line
except subprocess.CalledProcessError as exc:
raise click.ClickException(click.style(exc.output, fg="red"))
except FileNotFoundError as exc:
raise click.ClickException(click.style(f"{exc.filename} not found", fg="red"))
def create_beaker_config(
name: str = None,
description: str = None,
image: str = None,
gpus: int = 0,
cluster: str = DEFAULT_CLUSTER,
) -> Dict[str, Any]:
return {
"description": description,
"tasks": [
{
"name": name,
"spec": {
"image": image,
"resultPath": "/output",
"args": [
"train",
"config.jsonnet",
"-s",
"/output",
"--file-friendly-logging",
],
"requirements": {"gpuCount": gpus},
},
"cluster": cluster,
}
],
}
def parse_version(ctx, param, version) -> str:
if not version:
return version
if param.name == "allennlp_version":
package = "allennlp"
else:
package = "allennlp-models"
if version.startswith("git@"):
git_url = f"https://github.com/allenai/{package}"
if version == "git@master":
# Get the latest commit from the git repo.
click.secho("Checking for latest commit...", fg="yellow")
with click_spinner.spinner():
latest_commits = list(
shell_out_command(["git", "ls-remote", git_url + ".git"])
)
latest = latest_commits[0].split("\t")[0]
version = f"git+{git_url}.git@{latest}"
else:
version = f"git+{git_url}.{version}"
elif version.startswith("git+"):
pass
else:
version = f"{package}=={version}"
click.echo("Using " + click.style(f"{version}", fg="green"))
return version
def check_for_beaker():
# Print beaker version for debugging. If beaker is not installed, this will
# exit with an error an notify the user.
echo_command_output(["beaker", "--version"])
_DEFAULT_EXPERIMENT_NAME: Optional[str] = None
def setup(ctx, param, config_path):
check_for_beaker()
path = cached_path(config_path)
# If this is a local json/jsonnet file, we'll use the file basename as the
# the default name of the experiment.
global _DEFAULT_EXPERIMENT_NAME
if path.endswith(".json") or path.endswith(".jsonnet"):
_DEFAULT_EXPERIMENT_NAME = os.path.splitext(os.path.basename(path))[
0
] + date.today().strftime("_%Y%m%d")
return path
def parse_gpus(ctx, param, value):
if value is None:
params_file = ctx.params["config"]
gpus: int = 0
params = Params.from_file(params_file).as_dict()
if "distributed" in params:
cuda_devices = params["distributed"].get("cuda_devices")
if cuda_devices:
gpus = len([d for d in cuda_devices if d >= 0])
else:
cuda_device = params.get("trainer", {}).get("cuda_device")
if isinstance(cuda_device, int) and cuda_device >= 0:
gpus = 1
value = gpus
click.echo("Config specifies " + click.style(f"{value}", fg="green") + " gpus")
elif not isinstance(value, int):
value = int(value)
return value
def validate_includes(ctx, param, value):
if value:
for path, _ in value:
if not os.path.exists(path):
raise click.BadParameter(f"path {path} doesn't exit")
return value
@click.command()
@click.argument(
"config",
callback=setup,
)
@click.option(
"--name",
prompt="What do you want to call your experiment?",
default=lambda: _DEFAULT_EXPERIMENT_NAME,
help="The name to give the experiment on beaker.",
)
@click.option(
"--allennlp-version",
prompt="What version of AllenNLP do you want to use?",
default="git@master",
show_default=True,
help="The PyPI version, branch, or commit SHA of AlleNLP to use. "
"Git branches and commits should be prefixed with 'git@'. For example, "
"'git@master' or '1.0.0rc5'.",
callback=parse_version,
)
@click.option(
"--models-version",
prompt="What version (if any) of AllenNLP Models do you want to use?",
default="",
help="The PyPI version, branch, or commit SHA of AllenNLP Models to use, if any. "
"Git branches and commits should be prefixed with 'git@'.",
callback=parse_version,
)
@click.option(
"--packages",
prompt="What other Python packages does your experiment need?",
help="Additional Python packages to install in the docker image. "
"The value of this argument will be passed directly to `pip install`.",
default="",
)
@click.option(
"--gpus",
default=None,
show_default="parsed from training config",
callback=parse_gpus,
type=click.INT,
help="The number of GPUs to reserve for your experiment. If not specified "
"the GPUs will be guessed from the training config.",
)
@click.option(
"--workspace",
default=os.environ.get("BEAKER_DEFAULT_WORKSPACE", ""),
show_default="$BEAKER_DEFAULT_WORKSPACE",
prompt="Which beaker workspace do you want to use?",
help="The beaker workspace to submit the experiment to.",
)
@click.option(
"--user",
default=os.environ.get("BEAKER_DEFAULT_USER", ""),
show_default="$BEAKER_DEFAULT_USER",
prompt="What is your beaker username?",
help="The beaker username to submit the experiment under.",
)
@click.option(
"--include",
type=(str, str),
multiple=True,
prompt="Do you want to include any other files or directories?",
help="A list of files or directories to include.",
callback=validate_includes,
)
@click.option("-v", "--verbose", count=True)
@click.option("--dry-run", is_flag=True)
@click.option(
"--cluster", type=str, default=DEFAULT_CLUSTER, help="The beaker cluster to use."
)
def run(
config: str,
name: str,
allennlp_version: str,
models_version: str,
packages: str,
gpus: int,
workspace: str,
user: str,
include: Tuple[Tuple[str, str], ...],
verbose: int,
dry_run: bool,
cluster: str,
):
# We create a temp directory to use as context for the Docker build, and
# also to create a temporary beaker config file.
with TemporaryDirectory() as context_dir:
# Write the training config to the context directory.
training_config_path = os.path.join(context_dir, "config.jsonnet")
params = Params.from_file(config)
params.to_file(training_config_path)
# Create a unique tag to use.
image_id = str(uuid.uuid4())
local_image_name = f"allennlp-beaker-{name}:{image_id}"
beaker_image_name = f"allennlp-beaker-{name}-{image_id}"
if models_version:
packages = models_version + " " + packages
packages = packages.strip()
# Write the Dockefile to the context directory.
dockerfile_path = os.path.join(context_dir, "Dockerfile")
with open(dockerfile_path, "w") as dockerfile:
dockerfile.write(DOCKERFILE)
if packages:
dockerfile.write(DOCKERFILE_EXTRA_STEPS)
# Write the beaker config to the context directory.
beaker_config_path = os.path.join(context_dir, "config.yml")
with open(beaker_config_path, "w") as beaker_config:
beaker_config.write(
yaml.dump(
create_beaker_config(
name=name,
image=user + "/" + beaker_image_name,
gpus=gpus,
description=f"{allennlp_version} {packages}",
cluster=cluster,
)
)
)
if verbose:
click.echo("Beaker config:")
for line in shell_out_command(["cat", beaker_config_path]):
print(line)
# Copy any other include files.
if include:
for (path, dest) in include:
dest = os.path.join(context_dir, dest)
click.echo(f"Copying {path} to {dest}")
if os.path.isdir(path):
shutil.copytree(path, dest)
else:
shutil.copy(path, dest)
# Build the Docker image.
click.echo(
"Building docker image with name "
+ click.style(local_image_name, fg="green")
+ "..."
)
build_args = [
"docker",
"build",
"--build-arg",
f"ALLENNLP={allennlp_version}",
]
if packages:
build_args.extend(["--build-arg", f"PACKAGES={packages}"])
build_args.extend(["-t", local_image_name, context_dir])
if verbose:
for line in shell_out_command(build_args):
print(line)
else:
with click_spinner.spinner():
deque(shell_out_command(build_args), maxlen=0)
if dry_run:
click.echo("Run the following to check the Docker image:\n")
click.echo(
f" docker run --rm -it --entrypoint /bin/bash {local_image_name}"
)
return None
# Publish the image to beaker.
click.echo("Publishing image to beaker...")
with click_spinner.spinner():
deque(
shell_out_command(
[
"beaker",
"image",
"create",
"-n",
beaker_image_name,
local_image_name,
]
),
maxlen=0,
)
# Submit the experiment to beaker.
click.echo("Submitting experiment to beaker...")
cmds = [
"beaker",
"experiment",
"create",
"--name",
name,
"-f",
beaker_config_path,
]
if workspace:
cmds.extend(["--workspace", workspace])
echo_command_output(cmds)
if __name__ == "__main__":
run()
| allennlp-beaker-master | allennlp_beaker/__main__.py |
allennlp-beaker-master | tests/__init__.py |
|
def test_hello():
print("Hello!")
| allennlp-beaker-master | tests/test_hello.py |
from setuptools import setup, find_packages
def read_requirements(filename: str):
with open(filename) as requirements_file:
import re
def fix_url_dependencies(req: str) -> str:
"""Pip and setuptools disagree about how URL dependencies should be handled."""
m = re.match(
r"^(git\+)?(https|ssh)://(git@)?github\.com/([\w-]+)/(?P<name>[\w-]+)\.git", req
)
if m is None:
return req
else:
return f"{m.group('name')} @ {req}"
requirements = []
for line in requirements_file:
line = line.strip()
if line.startswith("#") or len(line) <= 0:
continue
requirements.append(fix_url_dependencies(line))
return requirements
# version.py defines the VERSION and VERSION_SHORT variables.
# We use exec here so we don't import cached_path whilst setting up.
VERSION = {} # type: ignore
with open("better_promptability/version.py", "r") as version_file:
exec(version_file.read(), VERSION)
setup(
name="better_promptability",
version=VERSION["VERSION"],
description="",
long_description=open("README.md").read(),
long_description_content_type="text/markdown",
classifiers=[
"Intended Audience :: Science/Research",
"Development Status :: 3 - Alpha",
"License :: OSI Approved :: Apache Software License",
"Programming Language :: Python :: 3",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
],
keywords="",
url="https://github.com/allenai/better-promptability",
author="Allen Institute for Artificial Intelligence",
author_email="[email protected]",
license="Apache",
packages=find_packages(
exclude=["*.tests", "*.tests.*", "tests.*", "tests"],
),
install_requires=read_requirements("requirements.txt"),
python_requires=">=3.7, <3.8", # restriction by promptsource
)
| better-promptability-main | setup.py |
_MAJOR = "0"
_MINOR = "1"
# On main and in a nightly release the patch should be one ahead of the last
# released build.
_PATCH = "0"
# This is mainly for nightly builds which have the suffix ".dev$DATE". See
# https://semver.org/#is-v123-a-semantic-version for the semantics.
_SUFFIX = ""
VERSION_SHORT = "{0}.{1}".format(_MAJOR, _MINOR)
VERSION = "{0}.{1}.{2}{3}".format(_MAJOR, _MINOR, _PATCH, _SUFFIX)
| better-promptability-main | better_promptability/version.py |
from tango import Step
@Step.register("check_install")
class CheckInstall(Step):
DETERMINISTIC = True
CACHEABLE = False
def run(self) -> None:
import torch
if torch.cuda.is_available():
print("All good! CUDA is available :)")
else:
print("All good! No CUDA though :/")
| better-promptability-main | better_promptability/check_install.py |
better-promptability-main | better_promptability/__init__.py |
|
"""
Changing T5Attention's forward to support prefix tuning, along with subclassing other classes that
use T5Attention. Changes in T5Attention's forward from are marked with
"# <CHANGE>" and "# </CHANGE>". It's possible that the added logic can be separated as some code
that entirely preceeds the original forward, s.t. we can call super().forward() without code
duplciation. Even better, we might be able to use a pre-hook so that most of this won't be needed.
"""
import torch
from torch import nn
from transformers.models.t5.modeling_t5 import (
T5Config,
T5Attention,
T5LayerSelfAttention,
T5LayerCrossAttention,
T5Block,
T5Stack,
T5ForConditionalGeneration,
)
class T5WithPrefixConfig(T5Config):
def __init__(
self, num_prefix=None, reparam=False, reparam_dim=512, no_decoder_self_attn=False, **kwargs
):
super().__init__(**kwargs)
self.num_prefix = num_prefix
self.reparam = reparam
self.reparam_dim = reparam_dim
self.no_decoder_self_attn = no_decoder_self_attn
@classmethod
def get_config_dict(cls, *args, **kwargs):
config_dict, kwargs = T5Config.get_config_dict(*args, **kwargs)
for field in ("num_prefix", "reparam_dim"):
assert field not in config_dict
if field in kwargs:
config_dict[field] = kwargs.pop(field)
return config_dict, kwargs
class T5AttentionWithPrefix(T5Attention):
def __init__(self, config: T5WithPrefixConfig, has_relative_attention_bias=False):
super().__init__(config, has_relative_attention_bias=has_relative_attention_bias)
self.num_prefix = config.num_prefix
# fmt: off
def forward(
self,
hidden_states,
mask=None,
key_value_states=None,
position_bias=None,
past_key_value=None,
layer_head_mask=None,
query_length=None,
use_cache=False,
output_attentions=False,
):
"""
Self-attention (if key_value_states is None) or attention over source sentence (provided by key_value_states). # noqa: E501
"""
# Input is (batch_size, seq_length, dim)
# Mask is (batch_size, key_length) (non-causal) or (batch_size, key_length, key_length)
# past_key_value[0] is (batch_size, n_heads, q_len - 1, dim_per_head)
batch_size, seq_length = hidden_states.shape[:2]
real_seq_length = seq_length
if past_key_value is not None:
assert (
len(past_key_value) == 2
), f"past_key_value should have 2 past states: keys and values. Got { len(past_key_value)} past states" # noqa: E501
real_seq_length += past_key_value[0].shape[2] if query_length is None else query_length
key_length = real_seq_length if key_value_states is None else key_value_states.shape[1]
def shape(states):
"""projection"""
return states.view(batch_size, -1, self.n_heads, self.key_value_proj_dim).transpose(1, 2)
def unshape(states):
"""reshape"""
return states.transpose(1, 2).contiguous().view(batch_size, -1, self.inner_dim)
def project(hidden_states, proj_layer, key_value_states, past_key_value):
"""projects hidden states correctly to key/query states"""
if key_value_states is None:
# self-attn
# (batch_size, n_heads, seq_length, dim_per_head)
hidden_states = shape(proj_layer(hidden_states))
elif past_key_value is None:
# cross-attn
# (batch_size, n_heads, seq_length, dim_per_head)
hidden_states = shape(proj_layer(key_value_states))
if past_key_value is not None:
if key_value_states is None:
# self-attn
# (batch_size, n_heads, key_length, dim_per_head)
hidden_states = torch.cat([past_key_value, hidden_states], dim=2)
else:
# cross-attn
hidden_states = past_key_value
return hidden_states
# get query states
query_states = shape(self.q(hidden_states)) # (batch_size, n_heads, seq_length, dim_per_head)
# get key/value states
key_states = project(
hidden_states, self.k, key_value_states, past_key_value[0] if past_key_value is not None else None
)
value_states = project(
hidden_states, self.v, key_value_states, past_key_value[1] if past_key_value is not None else None
)
# <CHANGE>
# move this up to not include layer-specific prefix
present_key_value_state = (key_states, value_states) if (self.is_decoder and use_cache) else None
# prefix tuning
key_states = torch.cat([self.prefix_key, key_states], dim=2)
value_states = torch.cat([self.prefix_value, value_states], dim=2)
if mask is not None:
prefix_mask = torch.zeros(
batch_size, 1, mask.size(2), self.num_prefix, device=hidden_states.device
)
mask = torch.cat([prefix_mask, mask], dim=-1)
# </CHANGE>
# compute scores
scores = torch.matmul(
query_states, key_states.transpose(3, 2)
) # equivalent of torch.einsum("bnqd,bnkd->bnqk", query_states, key_states), compatible with onnx op>9
if position_bias is None:
if not self.has_relative_attention_bias:
position_bias = torch.zeros(
(1, self.n_heads, real_seq_length, key_length), device=scores.device, dtype=scores.dtype
)
if self.gradient_checkpointing and self.training:
position_bias.requires_grad = True
else:
position_bias = self.compute_bias(real_seq_length, key_length)
# if key and values are already calculated
# we want only the last query position bias
if past_key_value is not None:
position_bias = position_bias[:, :, -hidden_states.size(1) :, :]
# <CHANGE>
position_bias = torch.cat(
[
torch.zeros(
position_bias.shape[:3] + (self.num_prefix,),
device=position_bias.device,
),
position_bias,
],
dim=3,
)
# </CHANGE>
if mask is not None:
position_bias = position_bias + mask # (batch_size, n_heads, seq_length, key_length)
scores += position_bias
attn_weights = nn.functional.softmax(scores.float(), dim=-1).type_as(
scores
) # (batch_size, n_heads, seq_length, key_length)
attn_weights = nn.functional.dropout(
attn_weights, p=self.dropout, training=self.training
) # (batch_size, n_heads, seq_length, key_length)
# Mask heads if we want to
if layer_head_mask is not None:
attn_weights = attn_weights * layer_head_mask
attn_output = unshape(torch.matmul(attn_weights, value_states)) # (batch_size, seq_length, dim)
attn_output = self.o(attn_output)
# <CHANGE> moved one line up
# </CHANGE>
outputs = (attn_output,) + (present_key_value_state,) + (position_bias,)
if output_attentions:
outputs = outputs + (attn_weights,)
return outputs
# fmt: on
class T5LayerSelfAttentionWithPrefix(T5LayerSelfAttention):
def __init__(self, config, has_relative_attention_bias=False):
super().__init__(config, has_relative_attention_bias=has_relative_attention_bias)
if not config.no_decoder_self_attn:
self.SelfAttention = T5AttentionWithPrefix(
config, has_relative_attention_bias=has_relative_attention_bias
)
class T5LayerCrossAttentionWithPrefix(T5LayerCrossAttention):
def __init__(self, config):
super().__init__(config)
self.EncDecAttention = T5AttentionWithPrefix(config, has_relative_attention_bias=False)
class T5BlockWithPrefix(T5Block):
def __init__(self, config, has_relative_attention_bias=False):
super().__init__(config, has_relative_attention_bias=has_relative_attention_bias)
self.layer[0] = T5LayerSelfAttentionWithPrefix(
config, has_relative_attention_bias=has_relative_attention_bias
)
if self.is_decoder:
self.layer[1] = T5LayerCrossAttentionWithPrefix(config)
class T5StackWithPrefix(T5Stack):
def __init__(self, config, embed_tokens=None):
super().__init__(config, embed_tokens=embed_tokens)
# prefix tuning - reparam 'trick'
self.input_tokens = torch.arange(self.config.num_prefix)
per_layer_dim = self.config.num_heads * self.config.d_kv
total_dim = self.config.num_layers * 2 * per_layer_dim
self.prefix_embed = (
nn.Sequential(
nn.Embedding(self.config.num_prefix, per_layer_dim),
nn.Linear(per_layer_dim, self.config.reparam_dim),
nn.Tanh(),
nn.Linear(self.config.reparam_dim, total_dim),
)
if self.config.reparam
else nn.Embedding(self.config.num_prefix, total_dim)
)
if self.is_decoder:
self.prefix_embed_cross = (
nn.Sequential(
nn.Embedding(self.config.num_prefix, per_layer_dim),
nn.Linear(per_layer_dim, self.config.reparam_dim),
nn.Tanh(),
nn.Linear(self.config.reparam_dim, total_dim),
)
if self.config.reparam
else nn.Embedding(self.config.num_prefix, total_dim)
)
self.block = torch.nn.ModuleList(
[
T5BlockWithPrefix(self.config, has_relative_attention_bias=bool(i == 0))
for i in range(self.config.num_layers)
]
)
# T5Stack has a self.init_weights() call here, but it's repetitive since we do it in
# T5ForConditionalGenerationWithPrefix anyway.
def generate_prefix_item(self, input_ids, embedding):
bsz = input_ids.size(0)
input_tokens = self.input_tokens.unsqueeze(0).expand(bsz, -1).to(input_ids.device)
prefix = embedding(input_tokens) # batch, seq, layer * embed * 2
prefix = prefix.view(
bsz,
self.config.num_prefix,
self.config.num_layers,
2,
self.config.num_heads,
self.config.d_kv,
)
prefix = prefix.permute([3, 2, 0, 4, 1, 5])
return prefix[0], prefix[1]
def forward(self, input_ids=None, **kwargs):
prefix_key, prefix_value = self.generate_prefix_item(input_ids, self.prefix_embed)
prefix_key_cross = prefix_value_cross = [None] * len(prefix_key)
if self.is_decoder:
prefix_key_cross, prefix_value_cross = self.generate_prefix_item(
input_ids, self.prefix_embed_cross
)
for block, k, v, k_cross, v_cross in zip(
self.block, prefix_key, prefix_value, prefix_key_cross, prefix_value_cross
):
for layer in block.layer:
if isinstance(layer, T5LayerSelfAttentionWithPrefix):
layer.SelfAttention.prefix_key = k
layer.SelfAttention.prefix_value = v
if isinstance(layer, T5LayerCrossAttentionWithPrefix):
layer.EncDecAttention.prefix_key = k_cross
layer.EncDecAttention.prefix_value = v_cross
output = super().forward(input_ids=input_ids, **kwargs)
self.clean_up()
return output
def clean_up(self):
# For safety, in case other code uses it
for block in self.block:
for layer in block.layer:
if isinstance(layer, T5LayerSelfAttentionWithPrefix):
del layer.SelfAttention.prefix_key
del layer.SelfAttention.prefix_value
if isinstance(layer, T5LayerCrossAttentionWithPrefix):
del layer.EncDecAttention.prefix_key
del layer.EncDecAttention.prefix_value
class T5ForConditionalGenerationWithPrefix(T5ForConditionalGeneration):
def __init__(self, config):
super().__init__(config)
self.encoder = T5StackWithPrefix(self.encoder.config, self.shared)
self.decoder = T5StackWithPrefix(self.decoder.config, self.shared)
self.init_weights()
| better-promptability-main | better_promptability/models/t5_with_prefix.py |
from __future__ import annotations
import logging
from typing import Any, Dict
from allennlp.training.metrics import Metric
from learn2learn.utils import clone_module
from tango.common.lazy import Lazy
import torch
import torch.distributed as dist
from tango.common.params import logger as tango_logger
from tango.integrations.torch.optim import Optimizer
from .model import Model
from .prefix_transformer import PrefixTransformer
from ..modules.with_prefix_embedding import logger as wpe_logger
from ..train.optim import resolve_optimizer_conf
logger = logging.getLogger(__name__)
def split_batch(batch, split_size):
bsz = batch["input_ids"].shape[0]
assert bsz % split_size == 0
assert all(v.shape[0] == bsz for v in batch.values())
splits = None
for k, v in batch.items():
v_splits = v.split(split_size)
if splits is None:
splits = [{} for _ in v_splits]
for i, v_split in enumerate(v_splits):
splits[i][k] = v_split
return splits
@Model.register("meta_learner")
class MetaLearner(Model):
def __init__(
self,
model: PrefixTransformer,
adaptation_steps: int,
algorithm: str,
meta_optimizer: Lazy[Optimizer],
different_inner_loop_batches: bool = False,
meta_accumulate_grad_batches: int = 1,
reuse_inner_opt_state: bool = True,
):
# TODO: anneal meta LR?
assert algorithm in {"fomaml", "reptile"}
super().__init__(model.config, model.dataset, optimizer=meta_optimizer, epochs=model.epochs)
self.model = model
self.algorithm = algorithm
self.adaptation_steps = adaptation_steps
self.different_inner_loop_batches = different_inner_loop_batches
self.meta_accumulate_grad_batches = meta_accumulate_grad_batches
self.reuse_inner_opt_state = reuse_inner_opt_state
inner_optimizer = resolve_optimizer_conf(self.model.configure_optimizers())
if self.reuse_inner_opt_state:
self.inner_optimizer_state = inner_optimizer.state_dict()
self.model.metrics = self.model.setup_metrics()
self.metrics = self.model.metrics
# ShardedDataParallel uses .requires_grad for sharding, and yet we use this property in
# quite complicated ways for meta learning. We need to make sure that this property
# correctly reflects the learnablity of each parameter after initialization. We restore
# it for our purposes in the first forward pass.
self.orig_requires_grad = self.model.unfreeze()
self.restored_requires_grad = False
def setup(self, stage: str = None):
pass
def setup_metrics(self) -> Dict[str, Dict[str, Metric]]:
return {}
def compute_loss(self, *args, **kwargs):
raise NotImplementedError
def get_predictions(self, *args, **kwargs):
raise NotImplementedError
def forward(self, meta_batch: list[tuple[dict, dict]]) -> dict[str, torch.Tensor]:
if not self.restored_requires_grad:
for p in self.model.parameters():
p.requires_grad = self.orig_requires_grad[p]
self.restored_requires_grad = True
for p in self.model.parameters():
p.grad = torch.zeros_like(p.data)
# These are for logging only
support_loss = 0.0
query_loss = torch.zeros([]) # a dummy query loss for reptile
for support_batch, query_batch in meta_batch:
# Disable <ERROR logging from model recreation which would otherwise pollute stdout
# TODO: this is ugly, but I don't know how to globally change logging level. A better
# solution may be something like warn_once.
wpe_logger_level = wpe_logger.level
wpe_logger.setLevel(logging.ERROR)
tango_logger_level = tango_logger.level
tango_logger.setLevel(logging.ERROR)
learner = clone_module(self.model)
detach_module(learner, keep_requires_grad=True)
learner.train() # for meta-evaluation, though we don't have it right now
inner_optimizer = resolve_optimizer_conf(
learner.configure_optimizers()
)
if self.reuse_inner_opt_state:
inner_optimizer.load_state_dict(self.inner_optimizer_state)
wpe_logger.setLevel(wpe_logger_level)
tango_logger.setLevel(tango_logger_level)
support_batch_size = support_batch["input_ids"].shape[0]
if self.different_inner_loop_batches:
support_batch_size = support_batch_size // self.adaptation_steps
support_batches = split_batch(support_batch, support_batch_size)
support_split_size = support_batch_size // self.meta_accumulate_grad_batches
query_split_size = (
query_batch["input_ids"].shape[0] // self.meta_accumulate_grad_batches
)
for i, adaptation_step in enumerate(range(self.adaptation_steps)):
inner_optimizer.zero_grad()
curr_support_batch = (
support_batches[i] if self.different_inner_loop_batches else support_batch
)
for support_batch_split in split_batch(curr_support_batch, support_split_size):
output = learner(support_batch_split)
loss = self.model.compute_loss(
output["logits"],
support_batch_split["target_ids"],
support_batch_split.get("target_mask"),
)
# Don't worry, this backward doesn't trigger unwanted gradient sync in
# distributed training, because self.model is a torch module, not a
# distributed wrapper.
loss.backward()
if adaptation_step == self.adaptation_steps - 1:
support_loss += loss.detach().cpu()
inner_optimizer.step()
# In the inner loop we only tune the prompt embeddings, and in the outer loop we
# unfreeze the model to tune it in its entirety.
learner.unfreeze()
inner_optimizer.zero_grad()
for query_batch_split in split_batch(query_batch, query_split_size):
query_output = learner(query_batch_split)
loss = self.model.compute_loss(
query_output["logits"],
query_batch_split["target_ids"],
query_batch_split.get("target_mask"),
)
loss.backward()
query_loss += loss.detach().cpu()
if self.algorithm == "fomaml":
for p, l in zip(self.model.parameters(), learner.parameters()):
p.grad.data.add_(l.grad.data)
elif self.algorithm == "reptile":
inner_optimizer.step()
for p, l in zip(self.model.parameters(), learner.parameters()):
p.grad.data.add_(-1.0, l.data)
else:
assert False
if self.reuse_inner_opt_state:
self.inner_optimizer_state = inner_optimizer.state_dict()
for p in self.model.parameters():
# In distributed training, these averages are in most cases exact. The only exception
# is at the end of an epoch where different GPUs might have different-sized data.
# But since that happens VERY infrequently, we can live with this rather than
# implementating custom ddp comm hooks.
if self.algorithm == "fomaml":
p.grad.data.div_(len(meta_batch))
elif self.algorithm == "reptile":
p.grad.data.div_(len(meta_batch)).add_(p.data)
support_loss /= len(meta_batch)
query_loss /= len(meta_batch)
if dist.is_initialized():
# Gradient sync is normally performed in backward(), but we don't call backward for meta
# learning since we modify .grad directly. So we need to manually sync gradients.
# self.trainer.model is the distributed wrapper.
self.trainer.model.reduce()
# reduce uses SUM, but we want averages
for p in self.model.parameters():
if p.grad is not None: # in sharded ddp, each worker only gets some gradients
p.grad.data.div_(dist.get_world_size())
return {"support_loss": support_loss, "query_loss": query_loss}
def backward(self, *args, **kwargs):
# Gradients are manually populated
pass
def optimizer_zero_grad(self, *args, **kwargs):
# Gradients are manually populated, and we don't want them to be zeroed
pass
def training_step(self, batch: dict[str, torch.Tensor], batch_idx: int) -> dict[str, Any]:
output = self(batch)
for k, v in output.items():
self.log(k, v)
return {"loss": output["query_loss"]}
def eval_step(
self, batch: dict[str, torch.Tensor], batch_idx: int, dataloader_idx=0, compute_loss=True
) -> dict[str, Any]:
return self.model.eval_step(batch, batch_idx, dataloader_idx, compute_loss)
def on_save_checkpoint(self, checkpoint: dict[str, Any]):
"""
PyTorch's native optimizer state checkpoint logic is very fragile, so we also do it on our
own. See https://github.com/pytorch/pytorch/issues/1489
"""
optimizer_states = self.optimizers(use_pl_optimizer=False).state
param_to_name = {p: n for n, p in self.named_parameters()}
states = {param_to_name[p]: states for p, states in optimizer_states.items()}
checkpoint["custom_optimizer_states"] = states
def detach_module(module, keep_requires_grad=False):
"""
Adapted from learn2learn.utils.detach_module to add the `keep_requires_grad` flag.
This will no longer be necessary once https://github.com/learnables/learn2learn/pull/294 is
merged.
"""
if not isinstance(module, torch.nn.Module):
return
# First, re-write all parameters
for param_key in module._parameters:
if module._parameters[param_key] is not None:
requires_grad = module._parameters[param_key].requires_grad
detached = module._parameters[param_key].detach_() # noqa: F841; consistency w/ orig.
if keep_requires_grad and requires_grad:
module._parameters[param_key].requires_grad_()
# Second, handle the buffers if necessary
for buffer_key in module._buffers:
if module._buffers[buffer_key] is not None and module._buffers[buffer_key].requires_grad:
module._buffers[buffer_key] = module._buffers[buffer_key].detach_()
if keep_requires_grad: # requires_grad checked above
module._buffers[buffer_key].requires_grad_()
# Then, recurse for each submodule
for module_key in module._modules:
detach_module(module._modules[module_key], keep_requires_grad=keep_requires_grad)
| better-promptability-main | better_promptability/models/meta_learner.py |
better-promptability-main | better_promptability/models/__init__.py |
|
from __future__ import annotations
from collections import defaultdict
from typing import Any, Dict, List, Optional, Tuple, Union
from allennlp.training.metrics import Metric
import torch
import torch.nn.functional as F
from tango.common.lazy import Lazy
from tango.integrations.pytorch_lightning.model import LightningModule
from tango.integrations.torch.optim import Optimizer
from ..data.config import Config
from ..data.data_module import DataModule
class Model(LightningModule):
def __init__(
self,
config: Config,
dataset: DataModule,
optimizer: Optional[Lazy[Optimizer]] = None,
epochs: int = 3,
weight_decay: float = 0.0,
accumulate_grad_batches: int = 1,
warmup_steps: int = 0,
):
super().__init__()
self.config = config
self.dataset = dataset
self._optimizer = optimizer
if self._optimizer is not None:
assert isinstance(self._optimizer, Lazy)
self.epochs = epochs
self.optimizer_kwargs = {
"weight_decay": weight_decay,
"accumulate_grad_batches": accumulate_grad_batches,
"warmup_steps": warmup_steps,
}
self.metrics = self.setup_metrics()
def setup(self, stage: str = None):
"""To set up self.dataset_size"""
if stage != "fit":
return
self.dataset_size = len(self.dataset.dataset_dict[self.dataset.train_split])
def setup_metrics(self) -> Dict[str, Dict[str, Metric]]:
return {
split: {
name: self.dataset.instantiate_metric(name, split)
for name in self.dataset.metric_names
}
for split in self.dataset.dev_splits + self.dataset.test_splits
}
def configure_optimizers(self) -> Union[List[Optimizer], Tuple[List[Optimizer], List[Dict]]]:
"""Prepare optimizer and schedule (linear warmup and decay)"""
assert self._optimizer is not None
no_decay = ["bias", "LayerNorm.weight", "layernorm.weight", "layer_norm.weight"]
optimizer_grouped_parameters = [
{
"params": [
p for n, p in self.named_parameters() if not any(nd in n for nd in no_decay)
],
"weight_decay": self.optimizer_kwargs["weight_decay"],
},
{
"params": [
p for n, p in self.named_parameters() if any(nd in n for nd in no_decay)
],
"weight_decay": 0.0,
},
]
optimizer = self._optimizer.construct(params=optimizer_grouped_parameters) # type: ignore
return [optimizer]
def optimizer_zero_grad(
self, epoch: int, batch_idx: int, optimizer: Optimizer, optimizer_idx: int
):
"""See https://pytorch.org/docs/stable/generated/torch.optim.Optimizer.zero_grad.html"""
optimizer.zero_grad()
def compute_loss(
self,
logits: torch.Tensor,
labels: torch.Tensor,
mask: Optional[torch.Tensor] = None,
reduce=True,
) -> torch.Tensor:
assert mask is not None
loss = F.cross_entropy(logits.view(-1, logits.shape[-1]), labels.view(-1), reduction="none")
loss = loss.view_as(labels) * mask
if reduce:
assert mask.any(dim=-1).all()
loss = loss.sum() / mask.sum() # type: ignore
return loss
def training_step(self, batch: dict[str, torch.Tensor], batch_idx: int) -> dict[str, Any]:
loss = self.compute_loss(
self(batch)["logits"], batch["target_ids"], batch.get("target_mask")
)
self.log("train_loss", loss)
return {"loss": loss}
def get_predictions(self, logits: torch.Tensor, batch: dict[str, torch.Tensor]) -> torch.Tensor:
return logits.argmax(dim=-1)
def eval_step(
self,
batch: dict[str, torch.Tensor],
batch_idx: int,
dataloader_idx=0,
compute_loss=True,
) -> dict[str, Any]:
logits = self(batch)["logits"]
preds = self.get_predictions(logits, batch).masked_fill(
~batch["is_correct_mask"], torch.finfo(logits.dtype).min
)
targets = batch["target_ids"] # target sequences.
if "is_correct" in batch:
labels = (batch["is_correct"] & batch["is_correct_mask"]).byte().argmax(dim=-1)
split = self.dataset.dev_splits[dataloader_idx]
for metric in self.metrics[split].values():
metric(*metric.detach_tensors(preds, labels))
return (
{"loss": self.compute_loss(logits, targets, batch.get("targets_mask")).detach().cpu()}
if compute_loss
else {}
)
def eval_epoch_end(self, outputs: Union[list[list[dict[str, Any]]], list[dict[str, Any]]]):
# pytorch-lightning "conveniently" unwraps the list when there's only one dataloader,
# so we need a check here.
num_splits = 1 if isinstance(outputs[0], dict) else len(outputs)
# We gather individual metrics from each dataloader and compute the average if there is
# more than one
if num_splits > 1:
sums: defaultdict = defaultdict(int)
for i in range(num_splits):
split = self.dataset.dev_splits[i]
assert split != "avg" # reserved keyword for below
metrics = self.get_metrics(split, reset=True)
for k, v in metrics.items():
if num_splits > 1:
self.log(f"{k}_{split}", v)
sums[k] += v
else:
self.log(k, v)
if num_splits > 1:
for k, v in sums.items():
self.log(f"{k}_avg", v / num_splits)
def get_metrics(self, split: str, reset=False) -> dict[str, Any]:
metrics = {name: metric.get_metric() for name, metric in self.metrics[split].items()}
if reset:
for metric in self.metrics[split].values():
metric.reset()
return metrics
def validation_step(
self, batch: dict[str, torch.Tensor], batch_idx: int, dataloader_idx=0
) -> dict[str, Any]:
return self.eval_step(batch, batch_idx, dataloader_idx=dataloader_idx)
def validation_epoch_end(self, outputs: list[dict[str, Any]]):
return self.eval_epoch_end(outputs)
def test_step(
self, batch: dict[str, torch.Tensor], batch_idx: int, dataloader_idx=0
) -> dict[str, Any]:
return self.eval_step(batch, batch_idx, dataloader_idx=dataloader_idx)
def test_epoch_end(self, outputs: list[dict[str, Any]]):
return self.eval_epoch_end(outputs)
| better-promptability-main | better_promptability/models/model.py |
from __future__ import annotations
import logging
from typing import Any, Callable, IO, Optional, Union, Dict
import torch
from tango.common.lazy import Lazy
from tango.integrations.torch.optim import Optimizer
from transformers import T5ForConditionalGeneration
from ..data.config import Config
from ..data.prompt_data_module import PromptDataModule
from ..data.t0_multitask_data_module import T0MultiTaskDataModule
from ..modules.transformer import Transformer
from ..modules.with_prefix_embedding import WithPrefixEmbedding
from .model import Model
from .t5_with_prefix import T5WithPrefixConfig, T5ForConditionalGenerationWithPrefix
logger = logging.getLogger(__name__)
@Model.register("prefix_transformer")
@Model.register("prefix_transformer_from_checkpoint", constructor="load_from_checkpoint")
class PrefixTransformer(Model):
def __init__(
self,
config: Config,
dataset: PromptDataModule,
transformer_model: str,
optimizer: Optional[Lazy[Optimizer]] = None,
epochs: int = 3,
weight_decay: float = 0.0,
accumulate_grad_batches: int = 1,
warmup_steps: int = 0,
train_full_model: bool = False,
**transformer_kwargs,
):
self.transformer_name = transformer_model
self.train_full_model = train_full_model
self.deep = dataset.deep
super().__init__(
config,
dataset,
optimizer=optimizer,
epochs=epochs,
weight_decay=weight_decay,
accumulate_grad_batches=accumulate_grad_batches,
warmup_steps=warmup_steps,
)
if not self.deep:
self.transformer = Transformer(transformer_model, "seq2seq-lm", **transformer_kwargs)
else:
self.transformer = Transformer(
transformer_model,
"seq2seq-lm",
config_cls=T5WithPrefixConfig,
model_cls=T5ForConditionalGenerationWithPrefix,
num_prefix=dataset.num_prefix,
**transformer_kwargs,
)
transformer_model: T5ForConditionalGeneration = self.transformer.model
assert isinstance(transformer_model, T5ForConditionalGeneration)
if not self.train_full_model:
for n, param in self.transformer.named_parameters():
if n.startswith("model.encoder.prefix_") or n.startswith("model.decoder.prefix_"):
assert self.deep
else:
param.requires_grad = False
if not self.deep:
transformer_model.set_input_embeddings(
WithPrefixEmbedding(
transformer_model.shared,
self.dataset.tokenizer.vocab_size,
self.dataset.num_prefix,
)
)
def unfreeze(self) -> dict[torch.nn.Parameter, bool]:
orig_requires_grad = {}
for param in self.transformer.parameters():
orig_requires_grad[param] = param.requires_grad
param.requires_grad = True
return orig_requires_grad
def forward(self, batch: dict[str, torch.Tensor]) -> dict[str, torch.Tensor]:
input_ids = batch["input_ids"]
input_mask = batch["input_mask"]
target_ids = batch["target_ids"]
target_mask = batch["target_mask"]
return_dict = {}
assert input_ids.shape == input_mask.shape and input_ids.dim() in (2, 3)
if not self.training: # for inference we have an additional dimension for classes
orig_shape = input_ids.shape # bs x num_classes x seq_len
input_ids = input_ids.reshape(-1, orig_shape[-1])
input_mask = input_mask.reshape(-1, orig_shape[-1])
orig_decoder_shape = target_ids.shape
target_ids = target_ids.reshape(-1, orig_decoder_shape[-1])
target_mask = target_mask.reshape(-1, orig_decoder_shape[-1])
logits = self.transformer(
input_ids=input_ids,
attention_mask=input_mask,
labels=target_ids,
decoder_attention_mask=target_mask,
).logits
if not self.training:
logits = logits.reshape(*(orig_decoder_shape + (-1,)))
return_dict["logits"] = logits
return return_dict
def get_predictions(self, logits: torch.Tensor, batch: dict[str, torch.Tensor]) -> torch.Tensor:
"""
Input:
logits: (bsz, num_classes, seq_len, vocab_size)
Output:
scores: (bsz, num_classes)
"""
mask = batch["target_mask"] # (bsz, num_classes, seq_len)
loss = self.compute_loss(logits, batch["target_ids"], mask, reduce=False)
scores = -loss.sum(-1) / (mask.sum(-1) + 1e-6) # already masked in compute_loss()
return scores
def eval_step(
self,
batch: dict[str, torch.Tensor],
batch_idx: int,
dataloader_idx=0,
compute_loss=True,
) -> dict[str, Any]:
if isinstance(self.dataset, T0MultiTaskDataModule):
preds = self(batch)["logits"]
split = self.dataset.dev_splits[dataloader_idx]
for metric in self.metrics[split].values():
metric(*metric.detach_tensors(preds, batch["target_ids"], batch["target_mask"]))
return {}
else:
return super().eval_step(
batch, batch_idx, dataloader_idx=dataloader_idx, compute_loss=False
)
def on_save_checkpoint(self, checkpoint: dict[str, Any]):
"""
PyTorch's native optimizer state checkpoint logic is very fragile, so we also do it on our
own. See https://github.com/pytorch/pytorch/issues/1489
Also, when prompt-tuning, only stores prompt embedding in the checkpoint.
"""
optimizer_states = self.optimizers(use_pl_optimizer=False).state
if not self.train_full_model:
weight_keys = (
["transformer.model.shared.new_embed.weight"]
if not self.deep
else [
k
for k in checkpoint["state_dict"].keys()
if k.startswith("transformer.model.encoder.prefix_")
or k.startswith("transformer.model.decoder.prefix_")
]
)
checkpoint["state_dict"] = {k: checkpoint["state_dict"][k] for k in weight_keys}
name_to_param = {n: p for n, p in self.named_parameters()}
states = {k: optimizer_states[name_to_param[k]] for k in weight_keys}
else:
param_to_name = {p: n for n, p in self.named_parameters()}
states = {param_to_name[p]: states for p, states in optimizer_states.items()}
checkpoint["custom_optimizer_states"] = states
def on_load_checkpoint(self, checkpoint: dict[str, Any]) -> None:
if any(k.startswith("model.") for k in checkpoint["state_dict"].keys()):
# Unwrap the meta-learning model
new_state_dict = {}
for k, v in checkpoint["state_dict"].items():
assert k.startswith("model.")
new_state_dict[k[len("model.") :]] = v
checkpoint["state_dict"] = new_state_dict
# TODO: optimizer states
return super().on_load_checkpoint(checkpoint)
def meta_learning_copy(self):
new = PrefixTransformer(
self.config,
self.dataset,
self.transformer_name,
optimizer=self._optimizer,
epochs=self.epochs,
weight_decay=self.optimizer_kwargs["weight_decay"],
accumulate_grad_batches=self.optimizer_kwargs["accumulate_grad_batches"],
warmup_steps=self.optimizer_kwargs["warmup_steps"],
train_full_model=self.train_full_model,
deep=self.deep,
)
new.to(self.device)
new.load_state_dict(self.state_dict())
return new
@classmethod
def load_from_checkpoint(
cls,
checkpoint_path: Union[str, IO],
map_location: Optional[Union[Dict[str, str], str, torch.device, int, Callable]] = None,
hparams_file: Optional[str] = None,
strict: bool = True,
optimizer: Optional[Lazy[Optimizer]] = None,
**kwargs,
):
# We need to tell tango the type of optimizer, or otherwise it will only give us a Params
# object
return super().load_from_checkpoint(
checkpoint_path,
map_location=map_location,
hparams_file=hparams_file,
strict=strict,
optimizer=optimizer,
**kwargs,
)
| better-promptability-main | better_promptability/models/prefix_transformer.py |
better-promptability-main | better_promptability/common/__init__.py |
|
from contextlib import contextmanager
from copy import deepcopy
import logging
import os
import shutil
import tempfile
from pathlib import Path
from typing import List, Dict, Any, Optional, cast, Union
from tango.common.registrable import Registrable
from tango.common.util import PathOrStr
class BetterPromptabilityTestCase:
"""
A custom testing class that
* disables some of the more verbose logging,
* creates and destroys a temp directory as a test fixture, and
* restores the internal state of the `Registrable` class at the end of each test method.
"""
PROJECT_ROOT = (Path(__file__).parent / ".." / "..").resolve()
"""
Root of the git repository.
"""
MODULE_ROOT = PROJECT_ROOT / "better_promptability"
"""
Root of the tango module.
"""
TESTS_ROOT = PROJECT_ROOT / "tests"
"""
Root of the tests directory.
"""
FIXTURES_ROOT = PROJECT_ROOT / "test_fixtures"
"""
Root of the test fixtures directory.
"""
def setup_method(self):
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", level=logging.DEBUG
)
# Disabling some of the more verbose logging statements that typically aren't very helpful
# in tests.
logging.getLogger("urllib3.connectionpool").disabled = True
# Create a temporary scratch directory.
self.TEST_DIR = Path(tempfile.mkdtemp(prefix="better_promptability_tests"))
os.makedirs(self.TEST_DIR, exist_ok=True)
@classmethod
def setup_class(cls):
# During teardown we'll restore the state of `Registrable`'s internal registry
# to make sure any registered mock test classes are removed so they don't conflict
# with other tests.
cls._original_registry = deepcopy(Registrable._registry)
def teardown_method(self):
shutil.rmtree(self.TEST_DIR)
@classmethod
def teardown_class(cls):
Registrable._registry = cls._original_registry
def run(
self,
config: Union[PathOrStr, Dict[str, Any]],
overrides: Optional[Union[Dict[str, Any], str]] = None,
include_package: Optional[List[str]] = None,
) -> Path:
from .params import Params
from tango.__main__ import _run, TangoGlobalSettings
if isinstance(config, dict):
params = Params(config)
config = self.TEST_DIR / "config.json"
params.to_file(cast(Path, config))
if isinstance(overrides, dict):
import json
overrides = json.dumps(overrides)
run_dir = self.TEST_DIR / "run"
_run(
TangoGlobalSettings(),
str(config),
directory=str(run_dir),
overrides=overrides,
include_package=include_package,
)
return run_dir
@contextmanager
def run_experiment(
config: Union[PathOrStr, Dict[str, Any]], overrides: Optional[Union[Dict[str, Any], str]] = None
):
"""
A context manager to make testing experiments easier. On ``__enter__`` it runs
the experiment and returns the path to the cache directory, a temporary directory that will be
cleaned up on ``__exit__``.
"""
test_case = BetterPromptabilityTestCase()
try:
test_case.setup_method()
yield test_case.run(config, overrides=overrides)
finally:
test_case.teardown_method()
| better-promptability-main | better_promptability/common/testing.py |
from .process_dataset import ProcessDataset
| better-promptability-main | better_promptability/steps/__init__.py |
import logging
import os
from typing import Dict
from datasets import Dataset, DatasetDict
from tango.step import Step
from allennlp.common import cached_transformers
logger = logging.getLogger(__name__)
@Step.register("process_story_cloze")
class ProcessStoryCloze(Step):
DETERMINISTIC: bool = True
CACHEABLE = False # use datasets caching.
def run(
self,
old_data_path: str,
new_data_path: str,
process_if_exists: bool = False,
tokenizer_model: str = "google/t5-small-lm-adapt",
) -> DatasetDict: # type: ignore[override]
if not process_if_exists and os.path.exists(new_data_path):
logger.info(
f"The processed dataset already exists at {new_data_path}. "
"Set `process_if_exists` to `True` if you want to process again. "
"Returning existing dataset."
)
return DatasetDict.load_from_disk(new_data_path)
tokenizer = cached_transformers.get_tokenizer(tokenizer_model)
dataset_dict = DatasetDict.load_from_disk(old_data_path)
new_splits = {}
for split_name in dataset_dict:
split = dataset_dict[split_name]
new_instances: Dict = {
"inputs": [],
"inputs_pretokenized": [],
"targets": [],
"targets_pretokenized": [],
"is_correct": [],
}
for instance in split:
actual_targets_pretokenized = instance["targets_pretokenized"]
is_correct = [
choice.strip() == actual_targets_pretokenized.strip()
for choice in (instance["answer_choices"])
]
targets = [
tokenizer(choice, add_special_tokens=False)["input_ids"]
for choice in instance["answer_choices"]
]
targets_pretokenized = instance["answer_choices"]
new_instances["inputs"].append(instance["inputs"])
new_instances["inputs_pretokenized"].append(instance["inputs_pretokenized"])
new_instances["targets"].append(targets)
new_instances["targets_pretokenized"].append(targets_pretokenized)
new_instances["is_correct"].append(is_correct)
# Story cloze doesn't have a training set, so we use validation for training and test
# for validation. We in general don't use test sets.
if split_name == "validation":
split_name = "train"
if split_name == "test":
split_name = "validation"
new_splits[split_name] = Dataset.from_dict(new_instances)
new_dataset_dict: DatasetDict = DatasetDict(new_splits)
logger.info(f"Saving processed dataset at {new_data_path}.")
new_dataset_dict.save_to_disk(new_data_path)
return new_dataset_dict
| better-promptability-main | better_promptability/steps/process_story_cloze.py |
import logging
import os
from typing import Dict
from datasets import Dataset, DatasetDict
from tango.step import Step
logger = logging.getLogger(__name__)
@Step.register("process_dataset")
class ProcessDataset(Step):
DETERMINISTIC: bool = True
CACHEABLE = False # use datasets caching.
def run(
self, old_data_path: str, new_data_path: str, process_if_exists: bool = False
) -> DatasetDict: # type: ignore[override]
if not process_if_exists and os.path.exists(new_data_path):
logger.info(
f"The processed dataset already exists at {new_data_path}. "
"Set `process_if_exists` to `True` if you want to process again. "
"Returning existing dataset."
)
return DatasetDict.load_from_disk(new_data_path)
dataset_dict = DatasetDict.load_from_disk(old_data_path)
new_splits = {}
for split_name in dataset_dict:
split = dataset_dict[split_name]
new_instances: Dict = {
"inputs": [],
"inputs_pretokenized": [],
"targets": [],
"targets_pretokenized": [],
"is_correct": [],
}
instance: Dict = {
"inputs": None,
"inputs_pretokenized": None,
"targets": [],
"targets_pretokenized": [],
"is_correct": [],
}
# TODO: assert for presence of the right keys in the dataset.
for row in split:
if row["idx"][1] == 0 and instance["inputs"] is not None:
new_instances["inputs"].append(instance["inputs"])
new_instances["inputs_pretokenized"].append(instance["inputs_pretokenized"])
new_instances["targets"].append(instance["targets"])
new_instances["targets_pretokenized"].append(instance["targets_pretokenized"])
new_instances["is_correct"].append(instance["is_correct"])
instance = {
"inputs": None,
"inputs_pretokenized": None,
"targets": [],
"targets_pretokenized": [],
"is_correct": [],
}
instance["inputs"] = row["inputs"]
instance["inputs_pretokenized"] = row["inputs_pretokenized"]
instance["targets"].append(row["targets"])
instance["targets_pretokenized"].append(row["targets_pretokenized"])
instance["is_correct"].append(row["is_correct"])
new_instances["inputs"].append(instance["inputs"])
new_instances["inputs_pretokenized"].append(instance["inputs_pretokenized"])
new_instances["targets"].append(instance["targets"])
new_instances["targets_pretokenized"].append(instance["targets_pretokenized"])
new_instances["is_correct"].append(instance["is_correct"])
new_splits[split_name] = Dataset.from_dict(new_instances)
new_dataset_dict: DatasetDict = DatasetDict(new_splits)
logger.info(f"Saving processed dataset at {new_data_path}.")
new_dataset_dict.save_to_disk(new_data_path)
return new_dataset_dict
| better-promptability-main | better_promptability/steps/process_dataset.py |
from collections import defaultdict
from typing import Any, Dict, List, Tuple, Set, Optional
import numpy as np
from tango import Format, JsonFormat, Step
from tango.common import Params
import torch
@Step.register("aggregate_results")
class AggregateResults(Step):
DETERMINISTIC = True
CACHEABLE = True
FORMAT: Format = JsonFormat()
VERSION = "002"
def run(self, results: Dict[str, Tuple[str, List[Dict[str, Any]]]]) -> Dict[str, Any]:
"""
Aggregate the results of a bunch of `TrainStep`s. `results` is a mapping of `task_name`
the output from the corresponding `TrainStep`.
"""
t0_task_info = Params.from_file("configs/t0_task_info.jsonnet")["tasks"].as_dict(quiet=True)
def accuracy_for_task(task_name: str) -> float:
acc = results[task_name][1][-1]["best_categorical_accuracy"]
if isinstance(acc, (float, int)):
return float(acc)
elif isinstance(acc, torch.Tensor):
return acc.item()
else:
raise TypeError(acc)
def stats_for_tasks(tasks: Set[str]) -> Dict[str, Optional[float]]:
accuracies = [accuracy_for_task(task_name) for task_name in tasks]
return {
"mean": np.mean(accuracies),
"std": None if len(accuracies) <= 1 else np.std(accuracies),
}
dataset_to_tasks: Dict[str, Set[str]] = defaultdict(set)
dataset_to_subset_to_tasks: Dict[str, Dict[str, Set[str]]] = defaultdict(
lambda: defaultdict(set)
)
for task_name in results:
dataset_name = t0_task_info[task_name]["dataset_name"]
subset_name = t0_task_info[task_name]["subset_name"]
dataset_to_tasks[dataset_name].add(task_name)
dataset_to_subset_to_tasks[dataset_name][subset_name].add(task_name)
# For direct copying into a spreadsheet
flattened_results = []
for dataset_name, subset_to_tasks in dataset_to_subset_to_tasks.items():
for subset_name in subset_to_tasks:
stats = stats_for_tasks(subset_to_tasks[subset_name])
flattened_results.extend([stats["mean"], stats["std"]])
return {
"categorical_accuracy_all": stats_for_tasks(set(results.keys())),
"categorical_accuracy_by_dataset": {
dataset_name: stats_for_tasks(tasks)
for dataset_name, tasks in dataset_to_tasks.items()
},
"categorical_accuracy_by_dataset_and_subset": {
dataset_name: {
subset_name: stats_for_tasks(subset_to_tasks[subset_name])
for subset_name in subset_to_tasks
}
for dataset_name, subset_to_tasks in dataset_to_subset_to_tasks.items()
},
"flattened": ",".join(
[str(n * 100) if n is not None else "0" for n in flattened_results]
),
}
| better-promptability-main | better_promptability/train/aggregate_results.py |
better-promptability-main | better_promptability/train/__init__.py |
|
import logging
import os
import sys
from pathlib import Path
from typing import Dict, List, Tuple, Optional
import dill
import pytorch_lightning as pl
import transformers
from pytorch_lightning.plugins import DDPShardedPlugin
from pytorch_lightning.utilities import rank_zero_only
from tango.common.lazy import Lazy
from tango.common.util import get_extra_imported_modules
from tango.integrations.pytorch_lightning import (
LightningCallback,
LightningModule,
LightningTrainer,
)
from tango.format import JsonFormat
from tango.integrations.torch import Optimizer
from tango.step import Step
from better_promptability.data.config import Config
from better_promptability.data.prompt_data_module import PromptDataModule
from better_promptability.data.t0_multitask_data_module import T0MultiTaskDataModule
from better_promptability.models.model import Model
Optimizer.register("transformers::adafactor")(transformers.optimization.Adafactor)
logger = logging.getLogger(__name__)
@LightningCallback.register("my_logger")
class LoggingCallback(LightningCallback):
def __init__(self):
self.best_epoch = None
self.best_dev_metric = None
self.best_dev_metrics = None
self.metrics_history = []
@rank_zero_only
def on_validation_end(self, trainer: LightningTrainer, pl_module: LightningModule):
logger.info("")
logger.info(f"***** Validation results at epoch {trainer.current_epoch} *****")
assert pl_module.dataset.metric_watch_mode in {"max", "min"}
self.metrics_history.append({})
metrics = trainer.callback_metrics
# Log results
for key in sorted(metrics):
if key not in ["log", "progress_bar"]:
logger.info("{} = {}".format(key, str(metrics[key])))
self.metrics_history[-1][key] = metrics[key]
if key == pl_module.dataset.metric_to_watch and not trainer.sanity_checking:
curr_metric = metrics[key]
if (
self.best_dev_metric is None
or (
pl_module.dataset.metric_watch_mode == "max"
and curr_metric > self.best_dev_metric
)
or (
pl_module.dataset.metric_watch_mode == "min"
and curr_metric < self.best_dev_metric
)
):
self.best_epoch = trainer.current_epoch
self.best_dev_metric = curr_metric
self.best_dev_metrics = {
k: v
for k, v in metrics.items()
if k not in {"log", "progress_bar", "loss", "val_loss", "lr", "epoch"}
}
if not trainer.sanity_checking:
logger.info(f"best_epoch = {self.best_epoch}")
self.metrics_history[-1]["best_epoch"] = self.best_epoch
for key, value in sorted(self.best_dev_metrics.items()):
logger.info(f"best_{key} = {value}")
self.metrics_history[-1][f"best_{key}"] = value
@LightningCallback.register("t0_multitask")
class T0MultiTaskCallback(LightningCallback):
"""
A Lightning callback for resampling the ``MixerDataset`` at the end of each epoch.
"""
def on_epoch_end(self, trainer: LightningTrainer, pl_module: LightningModule):
assert isinstance(pl_module.dataset, T0MultiTaskDataModule)
for dataset in pl_module.dataset.dataset_dict.values():
dataset.resample()
# Since both FairScale and DeepSpeed are insane and will restart your whole process to make workers, we have
# to be able to do this when train.py is called as a standalone script.
def _train_step(
work_dir: Path,
config: Config,
trainer: Lazy[LightningTrainer],
strategy: Optional[str],
model: Lazy[Model],
datamodule: Lazy[PromptDataModule],
) -> Tuple[str, List[Dict]]:
pl.seed_everything(config.seed)
datamodule = datamodule.construct(config=config)
datamodule.prepare_data()
datamodule.setup()
logger.info("Constructing trainer ...")
trainer: LightningTrainer = trainer.construct(
work_dir=work_dir,
gpus=config.gpus,
precision=config.precision,
strategy=strategy,
auto_select_gpus=config.auto_select_gpus,
# Need to reload the dataloaders each epoch when using the T0MultiTaskDataModule.
reload_dataloaders_every_n_epochs=1 if isinstance(datamodule, T0MultiTaskDataModule) else 0,
)
logger.info("Done constructing trainer ...")
# Make sure we're using the `T0MultiTaskCallback` if using the `T0MultiTaskDataModule`
if isinstance(datamodule, T0MultiTaskDataModule):
for callback in trainer.callbacks:
if isinstance(callback, T0MultiTaskCallback):
break
else:
raise RuntimeError("T0MultiTaskCallback required when using T0MultiTaskDataModule")
epochs = trainer.max_epochs
logger.info("Constructing model ...")
model = model.construct(
config=config,
dataset=datamodule,
epochs=epochs,
accumulate_grad_batches=trainer.accumulate_grad_batches,
)
logger.info("Done constructing model ...")
assert model.epochs == epochs
# Find the checkpoint callback and make sure it uses the right directory.
# Also find the logging callback.
checkpoint_callback: pl.callbacks.model_checkpoint.ModelCheckpoint = None
logging_callback: LoggingCallback
for callback in trainer.callbacks:
if isinstance(callback, pl.callbacks.model_checkpoint.ModelCheckpoint):
callback.dirpath = work_dir
checkpoint_callback = callback
if isinstance(callback, LoggingCallback):
logging_callback = callback
resume_from_checkpoint = None
if "last.ckpt" in os.listdir(work_dir):
resume_from_checkpoint = os.path.join(work_dir, "last.ckpt")
trainer.fit(model, datamodule=datamodule, ckpt_path=resume_from_checkpoint)
if not trainer.state.finished:
raise ValueError(f"Trainer did not succeed! Final trainer state was {trainer.state}.")
return (
checkpoint_callback.best_model_path if checkpoint_callback is not None else None,
logging_callback.metrics_history,
)
@Step.register("train_step")
class TrainStep(Step):
VERSION = "004"
DETERMINISTIC: bool = True
CACHEABLE = True
FORMAT = JsonFormat()
def run( # type: ignore[override]
self,
config: Config,
trainer: Lazy[LightningTrainer],
model: Lazy[Model],
datamodule: Lazy[PromptDataModule],
) -> Tuple[str, List[Dict]]:
if config.gpus == 1:
strategy = None
elif config.gpus > 1:
# strategy = "deepspeed_stage_3_offload"
# strategy = "deepspeed_stage_3"
# strategy = "deepspeed_stage_2"
# strategy = "ddp_sharded"
# We never change trainability of parameters, so this is unnecessary. And actually
# we rely on this flag being False for the current meta learning implementation.
strategy = DDPShardedPlugin(auto_refresh_trainable=False)
# strategy = "ddp"
else:
strategy = None
kwargs_file = self.work_dir / "train_kwargs.dill"
with kwargs_file.open("wb") as f:
dill.dump(
{
"work_dir": self.work_dir,
"extra_modules": get_extra_imported_modules(),
"config": config,
"trainer": trainer,
"strategy": strategy,
"model": model,
"datamodule": datamodule,
},
f,
)
results_file = self.work_dir / "train_results.dill"
import subprocess
subprocess.check_call(
[
sys.executable,
"-m",
"better_promptability.train.train_main",
str(kwargs_file),
str(results_file),
]
)
with open(results_file, "rb") as f:
results = dill.load(f)
return results
| better-promptability-main | better_promptability/train/train.py |
from typing import Dict, List, Optional, Tuple
import pytorch_lightning as pl
from tango.common.lazy import Lazy
from tango.integrations.pytorch_lightning import LightningTrainer
from tango.format import JsonFormat
from tango.step import Step
from ..data.config import Config
from ..data.prompt_data_module import PromptDataModule
from ..models.model import Model
@Step.register("eval_step")
class EvalStep(Step):
DETERMINISTIC: bool = True
CACHEABLE = True
FORMAT = JsonFormat()
def run( # type: ignore[override]
self,
config: Config,
trainer: Lazy[LightningTrainer],
model: Lazy[Model],
datamodule: Lazy[PromptDataModule],
) -> Tuple[Optional[str], List[Dict[str, float]]]:
pl.seed_everything(config.seed)
datamodule = datamodule.construct(config=config)
datamodule.prepare_data()
datamodule.setup()
trainer: LightningTrainer = trainer.construct(
work_dir=self.work_dir,
gpus=config.gpus,
accelerator="gpu" if config.gpus else "cpu",
auto_select_gpus=True,
)
model = model.construct(config=config, dataset=datamodule)
output = trainer.test(model, dataloaders=datamodule.val_dataloader())
# Make output the same format as TrainStep for results aggregation.
# Maybe it's cleaner to make the aggregation more flexible instead.
assert len(output) == 1
output = [{"best_" + k: v for k, v in output[0].items()}]
return None, output
| better-promptability-main | better_promptability/train/eval.py |
from __future__ import annotations
from typing import Union
from transformers.optimization import Adafactor as HFAdafactor
from tango.integrations.torch.optim import Optimizer
@Optimizer.register("adafactor")
class Adafactor(HFAdafactor):
"""See https://github.com/huggingface/transformers/issues/14830
Nevertheless, this is only here for backward compatibility, and I suspect technically
you can just use transformers::adafactor in your config.
"""
@staticmethod
def _get_options(param_group, param_shape, min_dim_size_to_factor=128):
factored, use_first_moment = HFAdafactor._get_options(param_group, param_shape)
if all(d < min_dim_size_to_factor for d in param_shape):
factored = False
return factored, use_first_moment
def resolve_optimizer_conf(
opt_conf: Union[list[Optimizer], tuple[list[Optimizer], list[dict]]]
) -> Optimizer:
"""
Get the optimizer from the lightning's configure_optimizers() output.
"""
if (
isinstance(opt_conf, (list, tuple))
and len(opt_conf) == 2
and isinstance(opt_conf[0][0], Optimizer)
):
# optimizers + schedulers
optimizers = opt_conf[0]
else:
optimizers = opt_conf
assert len(optimizers) == 1
return optimizers[0]
| better-promptability-main | better_promptability/train/optim.py |
import sys
import dill
from tango.common.logging import initialize_logging
from tango.common.util import import_extra_module
from better_promptability.train.train import _train_step
def main():
initialize_logging()
_, kwargs_file, results_file = sys.argv
with open(kwargs_file, "rb") as f:
training_kwargs = dill.load(f)
for module in training_kwargs["extra_modules"]:
import_extra_module(module)
results = _train_step(
training_kwargs["work_dir"],
training_kwargs["config"],
training_kwargs["trainer"],
training_kwargs["strategy"],
training_kwargs["model"],
datamodule=training_kwargs["datamodule"],
)
with open(results_file, "wb") as f:
dill.dump(results, f)
if __name__ == "__main__":
main()
| better-promptability-main | better_promptability/train/train_main.py |
better-promptability-main | better_promptability/modules/__init__.py |
|
import logging
import torch
from transformers import (
AutoConfig,
AutoModel,
AutoModelForPreTraining,
AutoModelForQuestionAnswering,
AutoModelForSeq2SeqLM,
AutoModelForSequenceClassification,
AutoModelForTokenClassification,
AutoModelForCausalLM,
)
logger = logging.getLogger(__name__)
TASKS = {
"base": AutoModel,
"sequence-classification": AutoModelForSequenceClassification,
"question-answering": AutoModelForQuestionAnswering,
"pretraining": AutoModelForPreTraining,
"token-classification": AutoModelForTokenClassification,
"causal-lm": AutoModelForCausalLM,
"summarization": AutoModelForSeq2SeqLM,
"translation": AutoModelForSeq2SeqLM,
"seq2seq-lm": AutoModelForSeq2SeqLM,
}
class Transformer(torch.nn.Module):
def __init__(
self,
transformer_model: str,
task: str,
trainable=True,
config_cls=AutoConfig,
model_cls=None,
**config_kwargs,
):
super().__init__()
config_args = dict(config_kwargs)
if task == "base": # TODO: this might break models that don't support this flag
config_args["add_pooling_layer"] = False
self.config = config_cls.from_pretrained(transformer_model, **config_args)
model_cls = model_cls if model_cls is not None else TASKS[task]
self.model = model_cls.from_pretrained(transformer_model, config=self.config)
if not trainable: # TODO: support this
assert task == "base", "No support for freezing the backbone for headed tasks yet"
self.trainable = trainable
def forward(self, *args, **kwargs):
if "attention_mask" in kwargs: # `transformers` doesn't take bool masks which is crazy
kwargs["attention_mask"] = kwargs["attention_mask"].float()
if "decoder_attention_mask" in kwargs:
kwargs["decoder_attention_mask"] = kwargs["decoder_attention_mask"].float()
# If grad was previous disabled (e.g., in eval), don't change it
with torch.set_grad_enabled(torch.is_grad_enabled() and self.trainable):
return self.model(*args, **kwargs)
| better-promptability-main | better_promptability/modules/transformer.py |
import logging
import numpy as np
import torch
from torch import nn
import torch.nn.functional as F
logger = logging.getLogger(__name__)
class WithPrefixEmbedding(nn.Module):
"""
From
https://github.com/shmsw25/Channel-LM-Prompting/blob/cbbb92cc97039c73475ddf0db46896e9efeff3c1/model_util.py#L113
"""
def __init__(self, orig_embed, expected_vocab_size, n_prefix):
super().__init__()
self.expected_vocab_size = expected_vocab_size
orig_embed_len = orig_embed.weight.shape[0]
assert expected_vocab_size <= orig_embed_len
if expected_vocab_size < orig_embed_len:
logger.warning(
f"Embedding matrix will be resized from {orig_embed_len} to {expected_vocab_size}. "
"This is expected for at least T5, and maybe some other models too. "
"See https://github.com/huggingface/transformers/issues/4875#issuecomment-997299787"
)
self.embed = orig_embed
self.new_embed = nn.Embedding(n_prefix, self.embed.embedding_dim)
# following Lester et al. 2021 in initializing using the top 5000 random vocabs
indices = np.random.permutation(range(5000))[:n_prefix]
init_weight = self.embed.state_dict()["weight"][indices]
self.new_embed._load_from_state_dict({"weight": init_weight}, "", None, True, [], [], "")
def forward(self, input):
return F.embedding(
input,
torch.cat([self.embed.weight[: self.expected_vocab_size], self.new_embed.weight], 0),
self.embed.padding_idx,
self.embed.max_norm,
self.embed.norm_type,
self.embed.scale_grad_by_freq,
self.embed.sparse,
)
| better-promptability-main | better_promptability/modules/with_prefix_embedding.py |
from __future__ import annotations
from typing import Mapping, Optional
from tango.common import PathOrStr, Params
from .config import Config
from .t0_module import T0Module
class T0Mixture:
"""
This class is used to initialize a collection of T0Module.
"""
def __init__(
self,
mixture_name: str, # should be "d4_train", "d4_dev", or "green"
config: Config,
num_prefix: int,
transformer_model: PathOrStr,
t0_data_cache: PathOrStr,
subsample_indices_file: Optional[str] = None,
**data_module_kwargs,
):
assert mixture_name in {"d4_train", "d4_dev", "green"}
self.mixture_name = mixture_name
self.data_modules: dict[str, T0Module] = {}
for task_name in Params.from_file("configs/t0_mixtures.jsonnet")[mixture_name]:
self.data_modules[task_name] = T0Module(
config=config,
num_prefix=num_prefix,
transformer_model=transformer_model,
mixture_name=self.mixture_name,
task_name=task_name,
t0_data_cache=t0_data_cache,
subsample_indices_file=subsample_indices_file,
**data_module_kwargs,
)
assert len(self.data_modules) > 0
| better-promptability-main | better_promptability/data/t0_mixture.py |
from __future__ import annotations
import random
from typing import Any, Optional, Union
from datasets import Dataset as HFDataset
from torch.utils.data import Dataset
from tango.common import Tqdm
class MixerDataset(Dataset):
"""
This dataset mixes multiple other datasets into a single :class:`Dataset`.
The `sampling_cap` argument sets an artificial size limit for all of the datasets which
controls the sampling probability for each. This is useful when you have a mix of small
and large datasets. When using `sampling_cap`, you should call :meth:`resample()` after every
epoch to randomize the examples that get picked from the undersampled datasets, i.e. the datasets
that are bigger than `sampling_cap`.
"""
def __init__(
self,
datasets: list[HFDataset],
sampling_cap: Optional[int] = None,
seed: int = 3, # this is important during distributed training
no_resample: bool = False, # useful for validation
):
self._datasets: list[Union[Dataset, HFDataset]] = []
self._total_size: int = 0
self._no_resample = no_resample
for dataset in Tqdm.tqdm(datasets, desc="Mixing datasets"):
if sampling_cap is not None and len(dataset) > sampling_cap:
self._total_size += sampling_cap
self._datasets.append(_UndersampledDataset(dataset, sampling_cap, seed=seed))
else:
self._total_size += len(dataset)
self._datasets.append(dataset)
def __getitem__(self, key: int) -> Any: # type: ignore[override]
for dataset in self._datasets:
if key < len(dataset):
return dataset[key]
key -= len(dataset)
raise IndexError("index out of bounds")
def __len__(self) -> int:
return self._total_size
def get_all_example_lens(self) -> list[int]:
lens = []
for dataset in Tqdm.tqdm(self._datasets, desc="Getting lengths for sampler"):
if isinstance(dataset, HFDataset):
lens.extend(dataset["sort_key_len"])
elif isinstance(dataset, _UndersampledDataset):
lens.extend(dataset.get_active_example_lens())
else:
assert False
return lens
def resample(self):
if self._no_resample:
return
for dataset in self._datasets:
if isinstance(dataset, _UndersampledDataset):
dataset.resample()
class _UndersampledDataset(Dataset):
def __init__(
self,
dataset: HFDataset,
sampling_cap: int,
seed: int = 3,
):
assert sampling_cap < len(dataset)
self._dataset = dataset
self._sampling_cap = sampling_cap
self._indices = list(range(len(self._dataset)))
self._num_taken = sampling_cap
self._seed = seed
# It's important that we can shuffle deterministically in order to guarantee
# that different processes shuffle the data in exactly the same way during distributed
# data parallel training, so we always set the seed before shuffling in this class.
# However, we don't want to mess with the RNG state outside of this class, so
# we make sure to reset it right after we shuffle.
state = random.getstate()
random.seed(self._seed)
random.shuffle(self._indices)
random.setstate(state)
def __getitem__(self, i: int) -> Any: # type: ignore[override]
if i > self._sampling_cap:
raise IndexError("index out of bounds")
return self._dataset[self._indices[i]]
def __len__(self) -> int:
return self._sampling_cap
def get_active_example_lens(self) -> list[int]:
return self._dataset.select(self._indices[: self._sampling_cap])["sort_key_len"]
def resample(self):
self._seed += 1
state = random.getstate()
random.seed(self._seed)
if self._num_taken + self._sampling_cap <= len(self._dataset):
# Re-organize `self._indices` so that the latest used chunk is pulled off and put on the end.
self._indices = (
self._indices[self._sampling_cap :] + self._indices[: self._sampling_cap]
)
self._num_taken += self._sampling_cap
else:
# Re-shuffle `self._indices` in a way that ensures the last chunk we have got to is
# used next.
used = (
self._indices[: self._sampling_cap]
+ self._indices[self._sampling_cap + (len(self._dataset) - self._num_taken) :]
)
unused = self._indices[
self._sampling_cap : self._sampling_cap + (len(self._dataset) - self._num_taken)
]
# `used` will be sliced up and moved around before being added back into `self._indices`,
# so we shuffle it now to add randomness.
random.shuffle(used)
# `next_up` is the next chunk of `self._sampling_cap` which will include all
# of `unused` and however many examples from `used` that we need to reach
# `self._sampling_cap` instances.
next_up = unused + used[: self._sampling_cap - len(unused)]
random.shuffle(next_up)
# Put everything back together into `self._indices`.
self._indices = next_up + used[self._sampling_cap - len(unused) :]
# clean up to hopefully help GC
del used, unused, next_up
self._num_taken = self._sampling_cap
random.setstate(state)
def fast_forward(self, num_epochs):
# Technically we can manipulate self._seed, self._indices, and self._num_taken directly,
# but this is easier and I think not much slower
for _ in range(num_epochs):
self.resample()
| better-promptability-main | better_promptability/data/mixer_dataset.py |
from typing import Optional, Union
from tango.common.aliases import PathOrStr
from tango.common.registrable import Registrable
class Config(Registrable):
def __init__(
self,
seed: int = 42,
gpus: int = 1,
precision: Union[int, str] = 32,
output_dir: Optional[PathOrStr] = None,
auto_select_gpus: bool = True,
):
self.seed = seed
self.precision = precision
self.gpus = gpus # TODO: do stuff with visible devices.
self.output_dir = output_dir
self.auto_select_gpus = auto_select_gpus
Config.register("default")(Config)
| better-promptability-main | better_promptability/data/config.py |
from __future__ import annotations
from typing import Any, Mapping
from urllib.error import HTTPError
from tango.common.aliases import PathOrStr
from transformers import T5Tokenizer, PreTrainedTokenizerBase
from .data_utils import PAD_TYPE
from .data_module import DataModule
from .config import Config
class PromptDataModule(DataModule):
def __init__(
self,
config: Config,
num_prefix: int,
transformer_model: PathOrStr,
deep: bool = False,
**kwargs,
):
self.num_prefix = num_prefix
self.transformer_model = transformer_model
self.deep = deep
if not self.deep:
self.task_tokens = ["<TASK{}>".format(str(i).zfill(2)) for i in range(self.num_prefix)]
super().__init__(config, **kwargs)
self.inputs_max_length = 768
self.targets_max_length = 192
@property
def hash_fields(self) -> list[Any]:
return super().hash_fields + [
self.num_prefix,
self.deep,
self.inputs_max_length,
self.targets_max_length,
]
def setup_tokenizer(self, retry=10) -> PreTrainedTokenizerBase:
while True:
try:
tokenizer = T5Tokenizer.from_pretrained(self.transformer_model)
break
except HTTPError as e:
if retry == 0:
raise e
retry -= 1
if not self.deep:
tokenizer.add_tokens(self.task_tokens)
task_token_ids = tokenizer(
" ".join(self.task_tokens), return_tensors="pt", add_special_tokens=False
)["input_ids"]
assert task_token_ids.shape[-1] == self.num_prefix
self.task_token_ids = task_token_ids.squeeze(0).tolist()
return tokenizer
def tokenize(self, example: dict[str, Any], split: str):
return NotImplementedError
def pad_token_map(self, split: str) -> Mapping[str, PAD_TYPE]: # type: ignore
"""
Specifies the padding for each key. Only keys including in this map will be
included in the batch.
"""
pad_token_map_ = {
"input_ids": 0,
"input_mask": False,
"target_ids": 0,
"target_mask": False,
}
return pad_token_map_
| better-promptability-main | better_promptability/data/prompt_data_module.py |
from __future__ import annotations
import logging
import os
from abc import abstractmethod, abstractproperty
from collections.abc import ItemsView
from typing import Any, Mapping, Optional, Union
from allennlp.training.metrics import Metric
import datasets
from datasets import Dataset as HFDataset, DatasetDict as HFDatasetDict
from tango.common import DatasetDict as TangoDatasetDict
from tango.common.aliases import PathOrStr
from tango.integrations.pytorch_lightning.data import LightningDataModule
from torch.utils.data import DataLoader
from transformers import PreTrainedTokenizerBase
from transformers.trainer_pt_utils import LengthGroupedSampler, DistributedLengthGroupedSampler
from .config import Config
from .data_utils import PAD_TYPE, collate_fn as default_collate_fn, md5
from .mixer_dataset import MixerDataset
# Sometimes we want to change the implementation of methods, etc., which cache ignores.
# We maintain our own cache so this is not very useful anyway.
datasets.set_caching_enabled(False)
logger = logging.getLogger(__name__)
DatasetDictType = Union[TangoDatasetDict, HFDatasetDict]
class DataModule(LightningDataModule):
"""
Abstract class representing a lightning data module using HF datasets, relevant properties,
and a tokenizer.
"""
def __init__(
self,
config: Config,
data_dir: Optional[PathOrStr] = None,
max_length: Optional[int] = None,
preprocess_and_save: bool = True,
batch_size: int = 32,
eval_batch_size: int = 32,
num_workers: int = 1,
):
super().__init__()
self.config = config
self.data_dir = data_dir or "/tmp/better-promptability/data-dir"
self.max_length = max_length
self.preprocess_and_save = preprocess_and_save
self.batch_size = batch_size
self.eval_batch_size = eval_batch_size
self.num_workers = num_workers
self._tokenizer: Optional[PreTrainedTokenizerBase] = None
def setup(self, stage: Optional[str] = None):
if self.preprocess_and_save:
if os.path.exists(self.cache_path):
self.dataset_dict = HFDatasetDict.load_from_disk(self.cache_path)
return
self.dataset_dict = self.load()
if self.preprocess_and_save:
self.dataset_dict = self.preprocess(self.dataset_dict)
logger.info(f"Saving dataset cache at {self.cache_path}")
self.dataset_dict.save_to_disk(self.cache_path)
def _to_params(self):
return {}
def __getitem__(self, key: str) -> HFDataset:
return self.dataset_dict[key]
@property
def hash_fields(self) -> list[Any]:
"""For cache purpose"""
return [self.config.seed, self.tokenizer.__repr__()]
@property
def cache_path(self) -> str:
hash_fields = "".join([str(f) for f in self.hash_fields])
return os.path.join(
self.data_dir,
f"{self.__class__.__name__}_{md5(hash_fields)}.datacache",
)
@property
def train_split(self) -> str:
return "train"
@property
def dev_splits(self) -> list[str]:
return ["dev"]
@property
def test_splits(self) -> list[str]:
return ["test"] # we don't actually use this
@property
@abstractproperty
def sort_key(self) -> str:
raise NotImplementedError("This is an abstract property. Did you forget to implement it?")
@property
@abstractproperty
def metric_names(self) -> list[str]:
raise NotImplementedError("This is an abstract property. Did you forget to implement it?")
def instantiate_metric(self, metric_name: str, split: str) -> Metric:
return Metric.by_name(metric_name)()
@property
def metric_to_watch(self) -> str:
if len(self.metric_names) == 1:
return self.metric_names[0]
else:
raise NotImplementedError(
"This is an abstract property. Did you forget to implement it?"
)
@property
@abstractproperty
def metric_watch_mode(self) -> str:
raise NotImplementedError("This is an abstract property. Did you forget to implement it?")
@abstractmethod
def load(self) -> DatasetDictType:
raise NotImplementedError("This is an abstract method. Did you forget to implement it?")
@abstractmethod
def tokenize(self, examples: dict[str, list], split: str) -> dict[str, list]:
raise NotImplementedError("This is an abstract method. Did you forget to implement it?")
def preprocess(self, dataset_dict: DatasetDictType) -> DatasetDictType:
logger.info("Begin preprocessing")
assert isinstance(dataset_dict, HFDatasetDict)
dataset_dict = HFDatasetDict( # reimplementing DatasetDict.map to provide `split`
{
split: dataset.map(
lambda examples: self.tokenize(examples, split),
batched=False, # to make tokenization/transformation easier
num_proc=4,
)
for split, dataset in dataset_dict.items()
}
)
logger.info("End preprocessing")
# Rename validation -> dev
if "validation" in dataset_dict and "dev" not in dataset_dict:
dataset_dict["dev"] = dataset_dict["validation"]
del dataset_dict["validation"]
return dataset_dict
@property
def tokenizer(self) -> PreTrainedTokenizerBase:
if self._tokenizer is None:
tokenizer = self.setup_tokenizer()
self._tokenizer = tokenizer
return tokenizer
else:
return self._tokenizer
@tokenizer.setter
def tokenizer(self, tokenizer: PreTrainedTokenizerBase):
self._tokenizer = tokenizer
@abstractmethod
def setup_tokenizer(self) -> PreTrainedTokenizerBase:
raise NotImplementedError("This is an abstract method. Did you forget to implement it?")
def items(self) -> ItemsView:
return self.dataset_dict.items()
def dataloader(
self, split: str, batch_size: int, collate_fn=default_collate_fn
) -> DataLoader:
dataset_split = self.dataset_dict[split]
# LengthGroupedSampler sorts from longest to shortest; we want the reverse
if isinstance(dataset_split, MixerDataset):
# The naive processing is slow and takes too much memory
lens = [-l for l in dataset_split.get_all_example_lens()] # noqa: E741
else:
lens = [-len(ids) for ids in dataset_split[self.sort_key]]
if self.config.gpus is None or self.config.gpus <= 1:
sampler = LengthGroupedSampler(batch_size, lengths=lens)
else:
sampler = DistributedLengthGroupedSampler(batch_size, lengths=lens)
pad_token_map = self.pad_token_map(split)
assert all(pad is not None for pad in pad_token_map.values())
dataloader = DataLoader(
dataset_split,
batch_size=batch_size,
shuffle=False,
sampler=sampler,
num_workers=self.num_workers,
collate_fn=lambda batch: collate_fn(batch, pad_token_map, self.tokenizer.padding_side),
pin_memory=True,
)
return dataloader
@abstractmethod
def pad_token_map(self, split: str) -> Mapping[str, PAD_TYPE]:
"""
Specifies the padding for each key. Only keys including in this map will be
included in the batch.
"""
raise NotImplementedError("This is an abstract method. Did you forget to implement it?")
def train_dataloader(self) -> DataLoader:
return self.dataloader(self.train_split, self.batch_size)
def val_dataloader(self, shuffle: bool = False):
return [self.dataloader(split, self.eval_batch_size) for split in self.dev_splits]
def test_dataloader(self, shuffle: bool = False):
return [self.dataloader(split, self.eval_batch_size) for split in self.test_splits]
| better-promptability-main | better_promptability/data/data_module.py |
from __future__ import annotations
import random
from typing import Optional, Mapping
from datasets import Dataset as HFDataset
from tango.common import PathOrStr, Tqdm
import torch
import torch.distributed as dist
from torch.utils.data.dataloader import DataLoader
from transformers.trainer_pt_utils import LengthGroupedSampler
from .config import Config
from .data_utils import collate_fn as default_collate_fn, PAD_TYPE
from .mixer_dataloader import MixerDataLoader
from .mixer_dataset import _UndersampledDataset
from .prompt_data_module import PromptDataModule
from .t0_multitask_data_module import T0MultiTaskDataModule
def split_batch(meta_batch: list, support_batch_size: int) -> list:
# Because each batch is internally sorted by length, the naive split will cause a distributional
# difference.
processed_meta_batch = []
for batch in meta_batch:
batch_size = len(list(batch.values())[0])
assert all(len(v) == batch_size for v in batch.values())
support_indices = random.sample(range(batch_size), support_batch_size)
support_indices_set = set(support_indices)
query_indices = [i for i in range(batch_size) if i not in support_indices_set]
support_batch = {k: v[support_indices] for k, v in batch.items()}
query_batch = {k: v[query_indices] for k, v in batch.items()}
processed_meta_batch.append((support_batch, query_batch))
return processed_meta_batch
@PromptDataModule.register("t0_meta_learning")
class T0MetaLearningDataModule(T0MultiTaskDataModule):
def __init__(
self,
meta_batch_size: int,
support_batch_size: int,
mixture_name: str, # should be 'd4_train', 'd4_dev', or 'green'.
config: Config,
num_prefix: int,
transformer_model: PathOrStr,
sampling_cap: Optional[int] = 500000,
**kwargs
):
self.meta_batch_size = meta_batch_size
self._meta_batch_size_per_device = self.meta_batch_size // (
dist.get_world_size() if dist.is_initialized() else 1
)
self.support_batch_size = support_batch_size
super().__init__(
mixture_name, config, num_prefix, transformer_model, sampling_cap=sampling_cap, **kwargs
)
def collate_fn(
self, batch: list[dict[str, list]], pad_token_map: Mapping[str, PAD_TYPE], padding_side: str
) -> list[tuple[dict[str, torch.Tensor], dict[str, torch.Tensor]]]:
batch = [
default_collate_fn(batch[i : i + self.real_batch_size], pad_token_map, padding_side)
for i in range(0, len(batch), self.real_batch_size)
]
if len(batch[-1]["input_ids"]) < self.real_batch_size:
batch = batch[:-1]
return split_batch(batch, self.support_batch_size)
def dataloader(
self, split: str, batch_size: int, collate_fn=default_collate_fn
) -> DataLoader:
if split != "train":
return super().dataloader(split, batch_size)
dataset_split = self.dataset_dict[split]
pad_token_map = self.pad_token_map(split)
assert all(pad is not None for pad in pad_token_map.values())
dataloaders = []
for dataset in Tqdm.tqdm(dataset_split._datasets, desc="Creating dataloaders"):
# zhaofeng: I don't particularly like this design because of the redundancy with
# DataModule. But this is necessary at least to accomodate _UndersampledDataset at the
# moment, unless we can somehow turn it into a DataModule too.
if isinstance(dataset, HFDataset):
lens = dataset["sort_key_len"]
elif isinstance(dataset, _UndersampledDataset):
lens = dataset.get_active_example_lens()
else:
assert False
# LengthGroupedSampler sorts from longest to shortest; we want the reverse
lens = [-l for l in lens] # noqa: E741
# It's important we don't used the distributed sampler here since distributed logic
# is handled in MixerDataloader
sampler = LengthGroupedSampler(batch_size, lengths=lens)
dataloader = DataLoader(
dataset,
batch_size=batch_size,
shuffle=False,
sampler=sampler,
num_workers=0, # avoid too many open files error
collate_fn=lambda batch: collate_fn(
batch, pad_token_map, self.tokenizer.padding_side
),
pin_memory=True,
drop_last=True, # division into support/query is unclear with incomplete batches
)
dataloaders.append(dataloader)
return MixerDataLoader(
dataloaders,
self.meta_batch_size,
batch_postprocessor=lambda b: split_batch(b, self.support_batch_size),
)
| better-promptability-main | better_promptability/data/t0_meta_learning_data_module.py |
from .t0_mixture import T0Mixture
from .t0_module import T0Module
| better-promptability-main | better_promptability/data/__init__.py |
from __future__ import annotations
from typing import Any, List, Mapping, Optional
from pathlib import Path
import pickle
from allennlp.training.metrics import Metric
import numpy as np
from tango.common import Params, PathOrStr
import datasets
from .data_module import DatasetDictType
from .data_utils import md5, PAD_TYPE
from .prompt_data_module import PromptDataModule
from .config import Config
def read_task_info() -> dict[str, tuple[str, Optional[str], str]]:
task_name_to_info: dict[str, tuple[str, Optional[str], str]] = {}
for task_name, info in (
Params.from_file("configs/t0_task_info.jsonnet").as_dict(quiet=True)["tasks"].items()
):
task_name_to_info[task_name] = (
info["dataset_name"],
info["subset_name"],
info["template_name"],
)
return task_name_to_info
@PromptDataModule.register("t0", exist_ok=True)
class T0Module(PromptDataModule):
"""
Represents a single dataset AND template, but all the splits.
"""
def __init__(
self,
config: Config,
num_prefix: int,
transformer_model: PathOrStr,
mixture_name: str,
task_name: str,
t0_data_cache: PathOrStr,
subsample_indices_file: Optional[str] = None,
**kwargs,
):
super().__init__(config, num_prefix, transformer_model, **kwargs)
self.mixture_name = mixture_name
self.task_name = task_name
self.dataset_name, self.subset_name, self.template_name = read_task_info()[self.task_name]
self.t0_data_cache = Path(t0_data_cache)
self.subsample_indices = None
if subsample_indices_file is not None:
self.subsample_indices = pickle.load(open(subsample_indices_file, "rb"))[task_name]
@property
def hash_fields(self) -> list[Any]:
return super().hash_fields + [self.task_name]
def setup(self, stage: Optional[str] = None):
super().setup(stage)
if self.subsample_indices is not None:
indices, checksum = self.subsample_indices
dataset = self.dataset_dict[self.train_split].select(indices)
assert md5("".join(str(ex["inputs"] + ex["targets"]) for ex in dataset)) == checksum
self.dataset_dict[self.train_split] = dataset
@property
def dev_splits(self) -> list[str]:
# d4_dev and green datasets should have dev splits, d4_train may not.
if (
self.mixture_name in {"d4_dev", "green"}
or "dev" in self.dataset_dict
):
return ["dev"]
return []
@property
def test_splits(self) -> list[str]:
# We don't need the test sets. The test set labels of some datasets are hidden
# (e.g., superglue), and T0 only evaluated on the dev sets.
return []
@property
def metric_names(self) -> list[str]:
# For all the green (i.e., d4_score_eval) datasets, all tasks have accuracy as the metric.
return ["categorical_accuracy"]
@property
def metric_watch_mode(self) -> str:
return "max"
@property
def sort_key(self) -> str:
return "inputs"
def load(self) -> DatasetDictType:
data_path = self.t0_data_cache / self.task_name
assert data_path.is_dir()
dataset_dict = datasets.load_from_disk(data_path)
# See comment in test_splits(), above
dataset_dict.pop("test", None)
return dataset_dict
def tokenize(self, example: dict[str, Any], split: str) -> dict[str, Any]:
inputs = example["inputs"][: self.inputs_max_length]
# Make sure there are no other EOS in `inputs` and `targets`.
# The EOS token is really the only special token we are concerned about with T5.
# T5 has no BOS token. There might be UNK tokens in the inputs though, but that's okay.
assert self.tokenizer.eos_token_id not in inputs
single_target: bool = False
is_correct: Optional[List[bool]] = None
targets = example["targets"]
if self.mixture_name == "d4_train":
single_target = True
elif self.mixture_name == "d4_dev" and split == self.train_split:
single_target = True
# We want to evaluate d4_dev datasets same way as the green ones.
# Some d4_dev datasets do not have answer_choices at all
# (eg. "web_questions_get_the_answer" simply wants a knowledge-based answer).
# We ignore these datasets.
elif self.mixture_name == "d4_dev" and split != self.train_split:
single_target = False
# The format in d4_dev is the same as train (there is no is_correct).
# To get multiple targets, we need to use "answer_choices", and tokenize them.
is_correct = [
choice.strip() == example["targets_pretokenized"].strip()
for choice in example["answer_choices"]
]
targets = [self.tokenizer(choice)["input_ids"] for choice in example["answer_choices"]]
elif self.mixture_name == "green" and split == self.train_split:
single_target = True
# Actually getting the single target.
correct_idx = np.argmax(example["is_correct"])
targets = targets[correct_idx]
else:
single_target = False
is_correct = example["is_correct"]
if single_target:
targets = targets[:-1][ # exclude EOS in example['targets'] (we add later)
: self.targets_max_length
]
assert self.tokenizer.eos_token_id not in targets
input_ids, target_ids, input_mask, target_mask = assemble_prompt(
inputs,
targets,
self.tokenizer.eos_token_id,
self.task_token_ids if not self.deep else [],
)
else:
input_ids = []
input_mask = []
target_mask = []
target_ids = []
for target in targets:
target = target[:-1][ # exclude EOS in example['targets'] (we add later)
: self.targets_max_length
]
assert self.tokenizer.eos_token_id not in target
_input_ids, _target_ids, _input_mask, _target_mask = assemble_prompt(
inputs,
target,
self.tokenizer.eos_token_id,
self.task_token_ids if not self.deep else [],
)
input_ids.append(_input_ids)
input_mask.append(_input_mask)
target_ids.append(_target_ids)
target_mask.append(_target_mask)
return_dict = {
"input_ids": input_ids,
"input_mask": input_mask,
"target_ids": target_ids,
"target_mask": target_mask,
"sort_key_len": len(example[self.sort_key]),
}
if not single_target:
assert is_correct is not None and sum(is_correct) == 1
return_dict["is_correct"] = is_correct
return_dict["is_correct_mask"] = [True] * len(is_correct)
return return_dict
def pad_token_map(self, split: str) -> Mapping[str, PAD_TYPE]: # type: ignore
"""
Specifies the padding for each key. Only keys including in this map will be
included in the batch.
"""
pad_token_map_ = {
"input_ids": 0,
"input_mask": False,
"target_ids": 0,
"target_mask": False,
}
if (
self.mixture_name in {"d4_dev", "green"}
and split != self.train_split
):
pad_token_map_["is_correct"] = False
pad_token_map_["is_correct_mask"] = False
return pad_token_map_
def assemble_prompt(inputs, targets, eos_token_id, task_token_ids):
input_ids = task_token_ids + inputs + [eos_token_id]
target_ids = targets + [eos_token_id]
input_mask = [True] * len(input_ids)
target_mask = [True] * len(target_ids)
return input_ids, target_ids, input_mask, target_mask
| better-promptability-main | better_promptability/data/t0_module.py |
from __future__ import annotations
from typing import Optional, Mapping, Any
from tango.common import Tqdm, DatasetDict, PathOrStr
from .data_utils import PAD_TYPE
from .config import Config
from .mixer_dataset import MixerDataset, _UndersampledDataset
from .prompt_data_module import PromptDataModule
from .t0_mixture import T0Mixture
@PromptDataModule.register("t0_multitask")
class T0MultiTaskDataModule(PromptDataModule):
def __init__(
self,
mixture_name: str, # should be 'd4_train', 'd4_dev', or 'green'.
config: Config,
num_prefix: int,
transformer_model: PathOrStr,
t0_data_cache: PathOrStr,
sampling_cap: Optional[int] = 500000,
dev_sampling_cap: Optional[int] = 400,
**kwargs,
):
super().__init__(config, num_prefix, transformer_model, preprocess_and_save=False, **kwargs)
self.mixture_name = mixture_name
self.t0_mixture = T0Mixture(
mixture_name,
config,
num_prefix,
transformer_model,
t0_data_cache=t0_data_cache,
**kwargs,
)
self.sampling_cap = sampling_cap
self.dev_sampling_cap = dev_sampling_cap
@property
def hash_fields(self) -> list[Any]:
return super().hash_fields + [
self.mixture_name,
self.sampling_cap,
self.dev_sampling_cap,
]
@property
def dev_splits(self) -> list[str]:
return ["dev"]
@property
def test_splits(self) -> list[str]:
# We don't need the test sets. The test set labels of some datasets are hidden
# (e.g., superglue), and T0 only evaluated on the dev sets.
return []
@property
def metric_names(self) -> list[str]:
return ["categorical_accuracy"]
@property
def metric_watch_mode(self) -> str:
return "max"
@property
def sort_key(self) -> str:
return "inputs"
def pad_token_map(self, split: str) -> Mapping[str, PAD_TYPE]: # type: ignore
pad_token_map_ = {
"input_ids": 0,
"input_mask": False,
"target_ids": 0,
"target_mask": False,
}
if (
self.mixture_name in {"d4_dev", "green"}
and split != self.train_split
):
pad_token_map_["is_correct"] = False
pad_token_map_["is_correct_mask"] = False
return pad_token_map_
def load(self) -> DatasetDict:
with Tqdm.tqdm(self.t0_mixture.data_modules.items(), "Loading T0 datasets") as dm_iter:
for name, data_module in dm_iter:
dm_iter.set_postfix({"module": name if len(name) < 30 else (name[:27] + "...")})
data_module.tokenizer = self.tokenizer
assert data_module.deep == self.deep
if not self.deep:
data_module.task_token_ids = self.task_token_ids
data_module.setup()
return DatasetDict(
splits={
"train": MixerDataset(
[dm[dm.train_split] for dm in self.t0_mixture.data_modules.values()],
sampling_cap=self.sampling_cap,
),
"dev": MixerDataset(
[
dm[dm.dev_splits[0]]
for dm in self.t0_mixture.data_modules.values()
if len(dm.dev_splits) > 0
],
sampling_cap=self.dev_sampling_cap,
no_resample=True,
),
}
)
def on_load_checkpoint(self, checkpoint: dict[str, Any]):
epochs_elapsed = checkpoint["epoch"] # verified that this is 1-based, so we're good
assert self.dataset_dict is not None # loaded already
for mixer_dataset in self.dataset_dict.values():
assert isinstance(mixer_dataset, MixerDataset)
for dataset in mixer_dataset._datasets:
if isinstance(dataset, _UndersampledDataset):
dataset.fast_forward(epochs_elapsed)
super().on_load_checkpoint(checkpoint)
| better-promptability-main | better_promptability/data/t0_multitask_data_module.py |
from __future__ import annotations
import hashlib
from typing import Iterable, Mapping, Union
import math
import numpy as np
import torch
from torch.utils.data._utils.collate import default_collate
PAD_TYPE = Union[int, float, bool]
def _find_max_shapes(
batch: list[dict[str, np.ndarray]], allow_keys: Iterable[str], pad_to_multiples_of_8: bool
) -> dict[str, np.ndarray]:
max_shapes = {}
for e in batch:
for k, v in e.items():
if k not in allow_keys:
continue
shape = np.array(v.shape)
if k not in max_shapes:
max_shapes[k] = shape
else:
try:
max_shapes[k] = np.maximum(max_shapes[k], shape)
except ValueError: # more informed error message
raise ValueError(f"Different shapes for {k}: {max_shapes[k]} vs. {shape}")
if pad_to_multiples_of_8:
for k, v in max_shapes.items():
max_shapes[k] = np.array([int(math.ceil(i / 8)) * 8 for i in v])
return max_shapes
def _pad_last_dim(sequence: list[list], padding_token: PAD_TYPE, padding_side: str):
"""
In-place pads the last dimension of a 2d list.
"""
assert padding_side in {"left", "right"}
max_len = max(len(e) for e in sequence)
for i, e in enumerate(sequence):
pad_len = max_len - len(e)
sequence[i] = (
([padding_token] * pad_len if padding_side == "left" else [])
+ e
+ ([padding_token] * pad_len if padding_side == "right" else [])
)
def _pad(
sequence: np.ndarray, padding_token: PAD_TYPE, padding_shape: np.ndarray, padding_side: str
) -> np.ndarray:
assert padding_side in {"left", "right"}
if sequence is None:
return None
padding = [(p, 0) if padding_side == "left" else (0, p) for p in padding_shape]
return np.pad(sequence, padding, constant_values=padding_token)
def _tensorize(sequence: np.ndarray, name: str) -> torch.Tensor:
dtype = torch.long
if "_mask" in name or "is_correct" in name: # TODO: there should be a smarter way to do this
dtype = torch.bool
return torch.tensor(sequence, dtype=dtype)
def collate_fn(
batch: list[dict[str, list]],
pad_token_map: Mapping[str, PAD_TYPE],
padding_side: str,
pad_to_multiples_of_8: bool = False,
) -> dict[str, torch.Tensor]:
"""
Input:
pad_token_map: specifies the padding for each key. Only keys including in this map
will be included in the batch.
"""
# This is a bit ad-hoc to deal with 3d elements, but it works
for e in batch:
for k, v in e.items():
if k in pad_token_map and isinstance(v[0], list):
_pad_last_dim(v, pad_token_map[k], padding_side)
batch = [{k: np.array(v) for k, v in e.items() if k in pad_token_map} for e in batch]
max_shapes = _find_max_shapes(
batch, pad_token_map.keys(), pad_to_multiples_of_8=pad_to_multiples_of_8
)
for i, e in enumerate(batch):
batch[i] = {
k: _pad(e[k], pad_token, max_shapes[k] - np.array(e[k].shape), padding_side)
for k, pad_token in pad_token_map.items()
}
batch[i] = {k: _tensorize(v, k) for k, v in batch[i].items()}
return default_collate(batch)
def md5(s):
return hashlib.md5(s.encode("utf-8")).hexdigest()
| better-promptability-main | better_promptability/data/data_utils.py |
from __future__ import annotations
import math
import random
from typing import Callable
import torch.distributed as dist
from torch.utils.data.dataloader import DataLoader, _BaseDataLoaderIter
class MixerDataLoader(DataLoader):
"""
A dataloader that encapsulates multiple dataloaders. At each iteration, yields the next batch
from a random dataloader.
"""
def __init__(
self,
dataloaders: list[DataLoader],
meta_batch_size: int,
batch_postprocessor: Callable[[list], list] = lambda b: b,
):
self._dataloader_iters = [iter(dataloader) for dataloader in dataloaders]
self._meta_batch_size = self._meta_batch_size_per_device = meta_batch_size
self._batch_postprocessor = batch_postprocessor
if dist.is_initialized():
self._world_size = dist.get_world_size()
self._rank = dist.get_rank()
assert self._meta_batch_size % self._world_size == 0
self._meta_batch_size_per_device = self._meta_batch_size // self._world_size
num_batches = sum(len(dataloader) for dataloader in dataloaders)
if dist.is_initialized():
self._total_len = num_batches // meta_batch_size
if num_batches % meta_batch_size > self._rank:
# Some GPUs have one more batch, depending on the number of samples in the final
# batch.
self._total_len += 1
else:
self._total_len = int(math.ceil(num_batches / meta_batch_size))
self._weights = [len(dataloader) for dataloader in dataloaders]
self._seed = 1
self.num_workers = 0 # TODO: multiprocessing
self.collate_fn = None
self.dataset = None
def sample_one_batch(self):
dataloader_idx = random.choices(range(len(self._dataloader_iters)), self._weights)[0]
self._weights[dataloader_idx] -= 1
assert all(w >= 0 for w in self._weights)
dataloader_iter = self._dataloader_iters[dataloader_idx]
return next(dataloader_iter)
def __iter__(self) -> _BaseDataLoaderIter:
while True:
batches = []
for _ in range(self._meta_batch_size_per_device):
if dist.is_initialized():
# For every GPU, we sample the same WORLD_SIZE samples (achieved by temporarily
# syncing the rng state), and give each GPU the sample whose index is the same
# as its rank. Technically we only need to increment the seed at the end of an
# epoch, but there's no harm in doing it more often.
rngstate = random.getstate()
self._seed += 1
random.seed(self._seed)
for i in range(min(self._world_size, sum(self._weights))):
sample = self.sample_one_batch()
if i == self._rank:
batches.append(sample)
random.setstate(rngstate)
else:
batches.append(self.sample_one_batch())
if all(w == 0 for w in self._weights): # early stopping
if len(batches) > 0:
yield self._batch_postprocessor(batches)
return
assert len(batches) > 0
yield self._batch_postprocessor(batches)
def __len__(self) -> int:
return self._total_len
| better-promptability-main | better_promptability/data/mixer_dataloader.py |
def test_hello():
print("Hello, World!")
| better-promptability-main | tests/hello_test.py |
better-promptability-main | tests/__init__.py |
|
import os
from tango.common import Params
def test_few_shot_baseline_all():
os.environ["CKPT"] = "null"
d = Params.from_file("configs/fewshot_eval_all_green.jsonnet").as_dict()
del os.environ["CKPT"]
assert "result_anli_GPT_3_style_r1_score_eval" in d["steps"]
assert "aggregated_results" in d["steps"]
| better-promptability-main | tests/configs_test.py |
better-promptability-main | tests/models/__init__.py |
|
better-promptability-main | tests/steps/__init__.py |
|
from better_promptability.steps.process_story_cloze import ProcessStoryCloze
from better_promptability.common.testing import BetterPromptabilityTestCase
class ProcessStoryClozeTest(BetterPromptabilityTestCase):
def test_process_story_cloze(self):
step = ProcessStoryCloze()
result = step.run(
old_data_path=str(
self.FIXTURES_ROOT
/ "data"
/ "cache"
/ "story_cloze_2016_Story_Continuation_and_Options_score_eval"
),
new_data_path=str(
self.FIXTURES_ROOT
/ "data"
/ "processed_cache"
/ "story_cloze_2016_Story_Continuation_and_Options_score_eval"
),
process_if_exists=True,
)
assert len(result["train"]) == 28
assert len(result["train"][0]["targets"]) == 2
assert len(result["train"][0]["targets_pretokenized"]) == 2
assert len(result["train"][0]["is_correct"]) == 2
assert "validation" in result
assert "test" not in result
| better-promptability-main | tests/steps/process_story_cloze_test.py |
from better_promptability.steps.process_dataset import ProcessDataset
from better_promptability.common.testing import BetterPromptabilityTestCase
class ProcessDatasetTest(BetterPromptabilityTestCase):
def test_process_dataset(self):
step = ProcessDataset()
result = step.run(
old_data_path=str(
self.FIXTURES_ROOT / "data" / "cache" / "hellaswag_complete_first_then_score_eval"
),
new_data_path=str(
self.FIXTURES_ROOT
/ "data"
/ "processed_cache"
/ "hellaswag_complete_first_then_score_eval"
),
process_if_exists=True,
)
assert len(result["train"]) == 7
assert len(result["train"][0]["targets"]) == 4
assert len(result["train"][0]["targets_pretokenized"]) == 4
assert len(result["train"][0]["is_correct"]) == 4
| better-promptability-main | tests/steps/process_dataset_test.py |
import pytest
from transformers.models import t5 as hf_t5
from better_promptability.modules.transformer import Transformer
@pytest.fixture(scope="module")
def model_name():
return "google/t5-small-lm-adapt"
@pytest.fixture(scope="module")
def tokenizer(model_name):
return hf_t5.T5Tokenizer.from_pretrained(model_name)
@pytest.mark.parametrize(
"task",
[
"seq2seq-lm",
],
)
def test_transformer(task: str, model_name: str, tokenizer: hf_t5.T5Tokenizer):
model = Transformer(model_name, task=task)
input_ids = tokenizer(
["The <extra_id_0> walks in <extra_id_1> park", "The <extra_id_0> barked"],
return_tensors="pt",
padding=True,
).input_ids
assert input_ids.tolist() == [
[37, 32099, 10681, 16, 32098, 2447, 1],
[37, 32099, 1207, 5100, 1, 0, 0],
]
attention_mask = ~(input_ids == 0)
labels = tokenizer(
["<extra_id_0> cute dog <extra_id_1> the <extra_id_2>", "<extra_id_0> dog"],
return_tensors="pt",
padding=True,
).input_ids
assert labels.tolist() == [
[32099, 5295, 1782, 32098, 8, 32097, 1],
[32099, 1782, 1, 0, 0, 0, 0],
]
decoder_attention_mask = ~(labels == 0)
output = model.forward(
input_ids,
attention_mask=attention_mask,
labels=labels,
decoder_attention_mask=decoder_attention_mask,
)
assert output.logits is not None
| better-promptability-main | tests/modules/transformer_test.py |
better-promptability-main | tests/modules/__init__.py |
|
better-promptability-main | tests/data/__init__.py |
|
from better_promptability.data.config import Config
from better_promptability.data import T0Module
from better_promptability.common.testing import BetterPromptabilityTestCase
class T0ModuleTest(BetterPromptabilityTestCase):
def test_t0_module_green(self):
t0 = T0Module(
config=Config(),
data_dir=str(self.FIXTURES_ROOT / "data"),
num_prefix=1,
transformer_model="google/t5-small-lm-adapt",
mixture_name="green",
task_name="hellaswag_complete_first_then_score_eval",
t0_data_cache=str(self.FIXTURES_ROOT / "data" / "processed_cache"),
)
t0.setup()
data = t0.load()
assert "train" in data
train_batch = list(t0.train_dataloader())[0]
assert train_batch["target_ids"].dim() == 2
val_batch = list(t0.val_dataloader()[0])[0]
assert val_batch["target_ids"].dim() == 3
def test_t0_module_green_story_cloze(self):
# Story_cloze special case.
t0 = T0Module(
config=Config(),
data_dir=str(self.FIXTURES_ROOT / "data"),
num_prefix=1,
transformer_model="google/t5-small-lm-adapt",
mixture_name="green",
task_name="story_cloze_2016_Story_Continuation_and_Options_score_eval",
t0_data_cache=str(self.FIXTURES_ROOT / "data" / "processed_cache"),
)
t0.setup()
data = t0.load()
assert "train" in data
train_batch = list(t0.train_dataloader())[0]
assert train_batch["target_ids"].dim() == 2
val_batch = list(t0.val_dataloader()[0])[0]
assert val_batch["target_ids"].dim() == 3
def test_t0_module_d4_train(self):
t0 = T0Module(
config=Config(),
data_dir=str(self.FIXTURES_ROOT / "data"),
num_prefix=1,
transformer_model="google/t5-small-lm-adapt",
mixture_name="d4_train",
task_name="adversarial_qa_dbert_based_on",
t0_data_cache=str(self.FIXTURES_ROOT / "data" / "cache"),
)
t0.setup()
data = t0.load()
assert "train" in data
train_batch = list(t0.train_dataloader())[0]
assert train_batch["target_ids"].dim() == 2
val_batch = list(t0.val_dataloader()[0])[0]
assert val_batch["target_ids"].dim() == 2
def test_t0_module_d4_dev(self):
t0 = T0Module(
config=Config(),
data_dir=str(self.FIXTURES_ROOT / "data"),
num_prefix=1,
transformer_model="google/t5-small-lm-adapt",
mixture_name="d4_dev",
task_name="openbookqa_main_choices",
t0_data_cache=str(self.FIXTURES_ROOT / "data" / "cache"),
)
t0.setup()
data = t0.load()
assert "train" in data
train_batch = list(t0.train_dataloader())[0]
assert train_batch["target_ids"].dim() == 2
val_batch = list(t0.val_dataloader()[0])[0]
assert val_batch["target_ids"].dim() == 3
| better-promptability-main | tests/data/t0_data_module_test.py |
import pytest
from better_promptability.data.mixer_dataset import MixerDataset
@pytest.fixture
def datasets():
return [["a1", "a2", "a3"], ["b1", "b2", "b3", "b4", "b5", "b6", "b7"]]
def test_mixer_dataset(datasets):
mixer = MixerDataset(datasets)
assert len(mixer) == 10
assert [x for x in mixer] == [x for dataset in datasets for x in dataset]
def test_mixer_dataset_with_size_limit(datasets):
mixer = MixerDataset(datasets, sampling_cap=3)
assert len(mixer) == 6
assert [x for x in mixer][:3] == ["a1", "a2", "a3"]
for x in [x for x in mixer][3:]:
assert x in datasets[1]
# Make sure we get to all instances in all datasets if we call `resample` enough times.
seen = set(iter(mixer))
for _ in range(2):
mixer.resample()
for x in mixer:
seen.add(x)
assert seen == set((x for dataset in datasets for x in dataset))
| better-promptability-main | tests/data/mixer_dataset_test.py |
import random
import sys
from tqdm import tqdm
random.seed(100)
TASKS_METADATA = [ # task name, num templates, random performance
("ANLI", 45, 1/3),
("Hellaswag", 4, 1/4),
("StoryCloze", 5, 1/2),
("CB", 15, 1/3),
("COPA", 12, 1/2),
("RTE", 10, 1/2),
("WIC", 10, 1/2),
("WSC", 10, 1/2),
("Winogrande", 5, 1/2),
]
NUM_INSTANCES = [1000,1000,1200,1000,1000,1200,1000,1000,1200,1000,1000,1200,1000,1000,1200,1000,1000,1200,1000,1000,1200,1000,1000,1200,1000,1000,1200,1000,1000,1200,1000,1000,1200,1000,1000,1200,1000,1000,1200,1000,1000,1200,1000,1000,1200,10042,10042,10042,10042,1871,1871,1871,1871,1871,56,56,56,56,56,56,56,56,56,56,56,56,56,56,56,48,100,100,100,100,100,100,100,100,48,52,52,277,277,277,277,277,277,277,277,277,277,638,638,638,638,638,638,638,638,638,638,104,104,104,104,104,104,104,104,104,104,1267,1267,1267,1267,1267]
RANDOM_SCORES = [e for metadata in TASKS_METADATA for e in [metadata[2]] * metadata[1]]
XL_T0REPRO_DEEP = [0.382999986410141,0.3400000035762787,0.32499998807907104,0.3630000054836273,0.3160000145435333,0.398333340883255,0.39500001072883606,0.3569999933242798,0.40583333373069763,0.34299999475479126,0.3619999885559082,0.4008333384990692,0.3720000088214874,0.34299999475479126,0.3166666626930237,0.37599998712539673,0.3630000054836273,0.3958333432674408,0.34599998593330383,0.35499998927116394,0.40416666865348816,0.3709999918937683,0.3479999899864197,0.36916667222976685,0.3400000035762787,0.35899999737739563,0.3408333361148834,0.4009999930858612,0.34200000762939453,0.36916667222976685,0.3659999966621399,0.37599998712539673,0.41083332896232605,0.375,0.3440000116825104,0.398333340883255,0.3930000066757202,0.3310000002384186,0.3824999928474426,0.38499999046325684,0.36500000953674316,0.3916666805744171,0.35600000619888306,0.34700000286102295,0.4258333444595337,0.28719377517700195,0.2502489686012268,0.28779128193855286,0.2903803884983063,0.9182255268096924,0.911811888217926,0.9262426495552063,0.9160876274108887,0.9203634262084961,0.8214285969734192,0.8392857313156128,0.6785714030265808,0.7857142686843872,0.8214285969734192,0.8035714030265808,0.7678571343421936,0.7857142686843872,0.6785714030265808,0.8392857313156128,0.8035714030265808,0.8214285969734192,0.8571428656578064,0.8035714030265808,0.5714285969734192,0.875,0.7900000214576721,0.7200000286102295,0.7900000214576721,0.8500000238418579,0.8100000023841858,0.7799999713897705,0.8299999833106995,0.8299999833106995,0.9166666865348816,0.7692307829856873,0.7692307829856873,0.8122743964195251,0.7653429508209229,0.8050541281700134,0.833935022354126,0.8050541281700134,0.7870036363601685,0.7689530849456787,0.7833935022354126,0.7761732935905457,0.7797833681106567,0.5470219254493713,0.5250783562660217,0.5297805666923523,0.554858922958374,0.5423197746276855,0.5313479900360107,0.5595611333847046,0.5250783562660217,0.5329153537750244,0.5423197746276855,0.49038460850715637,0.5288461446762085,0.4423076808452606,0.5192307829856873,0.6538461446762085,0.682692289352417,0.5480769276618958,0.5480769276618958,0.567307710647583,0.5769230723381042,0.5406472086906433,0.5548539757728577,0.5390686392784119,0.518547773361206,0.5351223349571228]
XL_MTL_DEEP = [0.36000001430511475,0.36000001430511475,0.335833340883255,0.4129999876022339,0.33399999141693115,0.4099999964237213,0.41600000858306885,0.3499999940395355,0.3916666805744171,0.33000001311302185,0.36500000953674316,0.3916666805744171,0.3540000021457672,0.34299999475479126,0.3241666555404663,0.39899998903274536,0.3930000066757202,0.39500001072883606,0.40299999713897705,0.375,0.4099999964237213,0.37700000405311584,0.34200000762939453,0.3708333373069763,0.35899999737739563,0.36500000953674316,0.3774999976158142,0.41600000858306885,0.35600000619888306,0.36666667461395264,0.36500000953674316,0.3619999885559082,0.4050000011920929,0.3799999952316284,0.36399999260902405,0.3841666579246521,0.4020000100135803,0.32499998807907104,0.3933333456516266,0.40799999237060547,0.3799999952316284,0.37833333015441895,0.3709999918937683,0.36899998784065247,0.4074999988079071,0.30860385298728943,0.26986655592918396,0.29247161746025085,0.29784902930259705,0.920897901058197,0.9326563477516174,0.9321218729019165,0.921432375907898,0.9203634262084961,0.8035714030265808,0.8214285969734192,0.8214285969734192,0.8035714030265808,0.7678571343421936,0.7857142686843872,0.8571428656578064,0.8392857313156128,0.7857142686843872,0.8392857313156128,0.75,0.8035714030265808,0.8214285969734192,0.8571428656578064,0.75,0.7291666865348816,0.8100000023841858,0.7099999785423279,0.7699999809265137,0.800000011920929,0.800000011920929,0.7599999904632568,0.8299999833106995,0.8100000023841858,0.7708333134651184,0.75,0.7307692170143127,0.7797833681106567,0.7292418479919434,0.7039711475372314,0.7942238450050354,0.7328519821166992,0.6931408047676086,0.7039711475372314,0.750902533531189,0.6859205961227417,0.7220216393470764,0.5768024921417236,0.5219435691833496,0.5454545617103577,0.5877742767333984,0.5877742767333984,0.5611284971237183,0.5203761458396912,0.5094043612480164,0.568965494632721,0.5626959204673767,0.4711538553237915,0.5769230723381042,0.5192307829856873,0.6346153616905212,0.6346153616905212,0.6538461446762085,0.4711538553237915,0.4711538553237915,0.5,0.5769230723381042,0.5217047929763794,0.5611681342124939,0.5730071067810059,0.5493291020393372,0.5603788495063782]
assert all(len(RANDOM_SCORES) == len(NUM_INSTANCES) == sum(m[1] for m in TASKS_METADATA) == len(l) for k, l in globals().items() if any(k.startswith(p) for p in ("LARGE", "XL", "T0")))
def avg(l):
return sum(l) / len(l)
def macro_avg(l):
per_task = []
for _, num_prompts, _ in TASKS_METADATA:
per_task.append(avg(l[:num_prompts]))
l = l[num_prompts:]
assert len(l) == 0
return avg(per_task)
def arg(results):
assert len(RANDOM_SCORES) == len(results)
scores = [sum(r) / num for r, num in zip(results, NUM_INSTANCES)]
rgs = [(score - baseline) / baseline for baseline, score in zip(RANDOM_SCORES, scores)]
return macro_avg(rgs)
def pairwise_test(worse_scores, better_scores):
worse_n_correct = [round(score * num) for score, num in zip(worse_scores, NUM_INSTANCES)]
better_n_correct = [round(score * num) for score, num in zip(better_scores, NUM_INSTANCES)]
worse_results = [[1] * n_correct + [0] * (num - n_correct) for n_correct, num in zip(worse_n_correct, NUM_INSTANCES)]
better_results = [[1] * n_correct + [0] * (num - n_correct) for n_correct, num in zip(better_n_correct, NUM_INSTANCES)]
print(f"Original ARG: worse {arg(worse_results)}, better {arg(better_results)}")
arg_diffs = []
for _ in tqdm(range(1000)):
new_worse_results = []
new_better_results = []
for worse, better in zip(worse_results, better_results):
new_worse, new_better = zip(*random.choices(list(zip(worse, better)), k=len(worse)))
new_worse_results.append(new_worse)
new_better_results.append(new_better)
worse_arg = arg(new_worse_results)
better_arg = arg(new_better_results)
arg_diffs.append(better_arg - worse_arg)
print(f"arg p: {avg([d < 0 for d in arg_diffs])}")
def main():
pairwise_test(XL_T0REPRO_DEEP, XL_MTL_DEEP)
if __name__ == "__main__":
main(*sys.argv[1:]) # pylint: disable=no-value-for-parameter
| better-promptability-main | scripts/bootstrap.py |
import logging
import os
import sys
from tango.common import Params
from tqdm import tqdm
from better_promptability.steps.process_dataset import ProcessDataset
from better_promptability.steps.process_story_cloze import ProcessStoryCloze
logging.basicConfig(level=logging.INFO)
def process_green_datasets(old_base_path, new_base_path):
datasets = Params.from_file("configs/t0_mixtures.jsonnet")["green"]
for dataset in tqdm(datasets):
dataset = dataset.strip()
if "story_cloze" not in dataset:
step = ProcessDataset()
else:
step = ProcessStoryCloze()
try:
step.run(
old_data_path=os.path.join(old_base_path, dataset),
new_data_path=os.path.join(new_base_path, dataset),
)
except KeyError:
print(f"error in {dataset}")
if __name__ == "__main__":
process_green_datasets(sys.argv[1], sys.argv[2])
| better-promptability-main | scripts/process_green_datasets.py |
"""
Download all of the data from the [bigscience/P3](https://huggingface.co/datasets/bigscience/P3)
corresponding to a particular mixture. This script should only be run from the root of this repository.
"""
import importlib
import json
import os
import sys
from pathlib import Path
import datasets
from tango.common import Params
from tango.common.file_lock import FileLock
from tqdm import tqdm
STORY_CLOZE_PATH = Path("/data/cl/user/zfw/story_cloze_dir")
def main(mixture_name: str, cache_dir: str):
cache_dir = Path(cache_dir)
def download_task_dataset(task_name: str):
local_path = cache_dir / task_name # type: ignore
if not os.path.isdir(local_path) or not os.listdir(local_path):
if task_name.startswith("story_cloze_"):
data_dir = STORY_CLOZE_PATH / task_name
# Hack to add story cloze to the config in the P3 dataset builder -- import it first
# and change relevant data structures
dataset_module = datasets.load.dataset_module_factory(
"bigscience/P3",
revision=None,
download_config=None,
download_mode=None,
data_files=None,
)
p3_module = importlib.import_module(dataset_module.module_path)
# Mostly following https://huggingface.co/datasets/bigscience/P3/blob/main/P3.py
task_splits_and_features = p3_module._TASK_SPLITS_AND_FEATURES_DICT # type: ignore
assert task_name not in task_splits_and_features
for split_name in ("validation", "test"): # story cloze has no training set
split_info = json.load(open(data_dir / f"info.{split_name}.json"))
features_dict = split_info["features"]
assert split_info["num_shards"] == 1
if task_name not in task_splits_and_features:
task_splits_and_features[task_name] = {
"splits": [],
"features_dict": features_dict,
}
task_splits_and_features[task_name]["splits"].append(split_name)
assert features_dict == task_splits_and_features[task_name]["features_dict"]
splits_and_features_dict = task_splits_and_features[task_name]
assert task_name not in p3_module._URLs # type: ignore
p3_module._URLs[task_name] = { # type: ignore
split_name: {"tfrecord": data_dir / f"{split_name}.tfrecord-00000-of-00001"}
for split_name in splits_and_features_dict["splits"]
}
p3_module.P3.BUILDER_CONFIGS.append( # type: ignore
p3_module.P3Config( # type: ignore
name=task_name,
splits=splits_and_features_dict["splits"],
features_dict=splits_and_features_dict["features_dict"],
score_eval=task_name.endswith("score_eval"),
)
)
p3_module.P3.builder_configs = { # type: ignore
config.name: config for config in p3_module.P3.BUILDER_CONFIGS # type: ignore
}
retries = 0
while True:
try:
dataset = datasets.load_dataset("bigscience/P3", task_name)
break
except ConnectionError:
retries += 1
if retries > 3:
raise
with FileLock(str(local_path) + ".lock"):
dataset.save_to_disk(local_path)
tasks = Params.from_file("configs/t0_mixtures.jsonnet")[mixture_name]
for task in tqdm(tasks):
download_task_dataset(task)
if __name__ == "__main__":
main(*sys.argv[1:])
| better-promptability-main | scripts/download_t0_training_set.py |
"""
Subsamples the training set for each dataset (i.e., for all tepmlates).
Ideally we want to sample the same examples across templates for a given dataset, but unfortunately
this is impossible since the P3 dataset cache does not guarantee the same example order across
templates. Check out, for example, hellaswag_complete_first_then_score_eval[29372] and
hellaswag_Predict_ending_with_hint_score_eval[29372].
"""
from pathlib import Path
import pickle
import sys
import random
from tqdm import tqdm
sys.path.append(str(Path(__file__).parent.parent.absolute()))
from better_promptability.data.config import Config # noqa: E402
from better_promptability.data.data_utils import md5 # noqa: E402
from better_promptability.data.t0_mixture import T0Mixture # noqa: E402
def main(mixture_name, n_shot, seed, output_file):
n_shot = int(n_shot)
seed = int(seed)
random.seed(seed)
# All arguments apart from the first two are dummy
mixture = T0Mixture(
mixture_name=mixture_name,
t0_data_cache="/data/cl/user/zfw/better-promptability/t0_cache/",
config=Config(),
data_dir="tmp",
num_prefix=20,
transformer_model="t5-base",
)
taskname_to_indices = {}
for data_module in tqdm(mixture.data_modules.values()):
task_name = data_module.task_name
dataset_dict = data_module.load()
train_split = dataset_dict[data_module.train_split]
total_len = len(train_split)
print(f"Sampling {n_shot} examples from {total_len} for {task_name} with seed {seed}")
indices = random.sample(range(total_len), n_shot)
checksum = md5(
"".join(str(train_split[i]["inputs"] + train_split[i]["targets"]) for i in indices)
)
taskname_to_indices[task_name] = (indices, checksum)
pickle.dump(taskname_to_indices, open(output_file, "wb"))
if __name__ == "__main__":
main(*sys.argv[1:]) # pylint: disable=no-value-for-parameter
| better-promptability-main | scripts/subsample_t0_training_set.py |
import sys
import os
sys.path.append(os.path.abspath(os.path.join("..", "nla_semparse")))
from nla_semparse.nla_metric import NlaMetric
def test_metric_basic():
metric = NlaMetric()
metric(["2"], ["2"])
assert metric.get_metric() == {
"well_formedness": 1.0,
"denotation_accuracy": 1.0,
"sequence_accuracy": 1.0,
}
metric.reset()
def test_metric_one_operation():
metric = NlaMetric()
metric(["(add 2 3)"], ["(add 2 3)"])
assert metric.get_metric() == {
"well_formedness": 1.0,
"denotation_accuracy": 1.0,
"sequence_accuracy": 1.0,
}
metric.reset()
metric(["(add 2 3)"], ["5"])
assert metric.get_metric() == {
"well_formedness": 1.0,
"denotation_accuracy": 1.0,
"sequence_accuracy": 0.0,
}
metric.reset()
metric(["(add 2 3)"], ["(add 1 4)"])
assert metric.get_metric() == {
"well_formedness": 1.0,
"denotation_accuracy": 1.0,
"sequence_accuracy": 0.0,
}
metric.reset()
metric(["(add 2 3)"], ["(subtract 1 4)"])
assert metric.get_metric() == {
"well_formedness": 1.0,
"denotation_accuracy": 0.0,
"sequence_accuracy": 0.0,
}
metric.reset()
def test_metric_ill_formed_sequences():
metric = NlaMetric()
metric(["(add 2)"], ["(add 2 3)"])
assert metric.get_metric() == {
"well_formedness": 0.0,
"denotation_accuracy": 0.0,
"sequence_accuracy": 0.0,
}
metric.reset()
metric(["(add 2))"], ["(add 2 3)"])
assert metric.get_metric() == {
"well_formedness": 0.0,
"denotation_accuracy": 0.0,
"sequence_accuracy": 0.0,
}
metric.reset()
metric(["()"], ["(add 2 3)"])
assert metric.get_metric() == {
"well_formedness": 0.0,
"denotation_accuracy": 0.0,
"sequence_accuracy": 0.0,
}
metric.reset()
def test_metric_real_cases():
predictions1 = [
"(subtract (multiply (((((((((())))))",
"(subtract (add ((multiply (((()))))))))",
]
predictions2 = ["9", "9"]
predictions3 = ["(subtract (multiply (((((((((())))))", "9"]
targets = [
"(add (add (multiply 5 2) (divide 2 7)) (add (add 7 7) (multiply 3 (multiply 6 6))))",
"(subtract (add 8 7) (subtract (add (add 6 (divide 7 7)) 7) (multiply (divide 5 4) 8)))",
]
metric = NlaMetric()
metric(predictions1, targets)
assert metric.get_metric() == {
"well_formedness": 0.0,
"denotation_accuracy": 0.0,
"sequence_accuracy": 0.0,
}
metric.reset()
metric(predictions2, targets)
assert metric.get_metric() == {
"well_formedness": 1.0,
"denotation_accuracy": 0.5,
"sequence_accuracy": 0.0,
}
metric.reset()
metric(predictions3, targets)
assert metric.get_metric() == {
"well_formedness": 0.5,
"denotation_accuracy": 0.5,
"sequence_accuracy": 0.0,
}
metric.reset()
metric(targets, targets)
assert metric.get_metric() == {
"well_formedness": 1.0,
"denotation_accuracy": 1.0,
"sequence_accuracy": 1.0,
}
metric.reset()
| allennlp-guide-master | nla_semparse/tests/nla_metric_test.py |
allennlp-guide-master | nla_semparse/nla_semparse/__init__.py |
|
from allennlp_semparse import DomainLanguage, predicate
class NlaLanguage(DomainLanguage):
def __init__(self):
super().__init__(
start_types={int},
allowed_constants={
"0": 0,
"1": 1,
"2": 2,
"3": 3,
"4": 4,
"5": 5,
"6": 6,
"7": 7,
"8": 8,
"9": 9,
},
)
@predicate
def add(self, num1: int, num2: int) -> int:
return num1 + num2
@predicate
def subtract(self, num1: int, num2: int) -> int:
return num1 - num2
@predicate
def multiply(self, num1: int, num2: int) -> int:
return num1 * num2
@predicate
def divide(self, num1: int, num2: int) -> int:
return num1 // num2 if num2 != 0 else 0
| allennlp-guide-master | nla_semparse/nla_semparse/nla_language.py |
from typing import Dict, List, Optional
from allennlp.training.metrics.metric import Metric
from allennlp_semparse.domain_languages.domain_language import ExecutionError
from .nla_language import NlaLanguage
@Metric.register("nla_metric")
class NlaMetric(Metric):
"""
Metric for evaluating prefix arithmetic sequences against targets, useful for Natural Language Arithmetic
parsing. This metric evaluates predicted sequences on three things: 1) whether the predicted metric is a
well-formed prefix arithmetic expression, 2) whether the predicted sequence and the target seqquence evaluate
to the same value, 3) whether the predicted sequence and the target sequence are identical.
"""
def __init__(self):
self._language = NlaLanguage()
self._num_well_formed = 0
self._num_correct_denotation = 0
self._num_same_sequence = 0
self._num_all_sequences = 0
def __call__(self, predictions, targets) -> None:
for prediction, target in zip(predictions, targets):
if isinstance(prediction, list):
prediction = " ".join(prediction).replace("( ", "(").replace(" )", ")")
target = " ".join(target).replace("( ", "(").replace(" )", ")")
if isinstance(prediction, str) and not prediction.startswith("("):
prediction = f"({prediction})"
if isinstance(target, str) and not target.startswith("("):
target = f"({target})"
evaluated_prediction = None
evaluated_target = None
try:
evaluated_target = self._language.execute(target)
evaluated_prediction = self._language.execute(prediction)
except (TypeError, ExecutionError, IndexError):
pass
if isinstance(evaluated_prediction, int):
self._num_well_formed += 1
if evaluated_prediction == evaluated_target:
self._num_correct_denotation += 1
if prediction == target:
self._num_same_sequence += 1
self._num_all_sequences += 1
def get_metric(self, reset: bool = False) -> Dict[str, float]:
if self._num_all_sequences == 0:
metrics = {
"well_formedness": 0.0,
"denotation_accuracy": 0.0,
"sequence_accuracy": 0.0,
}
else:
metrics = {
"well_formedness": self._num_well_formed / self._num_all_sequences,
"denotation_accuracy": self._num_correct_denotation
/ self._num_all_sequences,
"sequence_accuracy": self._num_same_sequence / self._num_all_sequences,
}
if reset:
self.reset()
return metrics
def reset(self):
self._num_well_formed = 0
self._num_same_sequence = 0
self._num_correct_denotation = 0
self._num_all_sequences = 0
| allennlp-guide-master | nla_semparse/nla_semparse/nla_metric.py |
import sys
import os
import random
import math
import argparse
from typing import List, Dict, Any
sys.path.append(os.path.abspath(os.path.join("..", "nla_semparse")))
from nla_semparse.nla_language import NlaLanguage
class DataGenerator:
"""
Generator for data points for natural language arithmetic.
"""
def __init__(self):
self.language = NlaLanguage()
self.numbers = [
{"meaning": "0", "translation": "zero"},
{"meaning": "1", "translation": "one"},
{"meaning": "2", "translation": "two"},
{"meaning": "3", "translation": "three"},
{"meaning": "4", "translation": "four"},
{"meaning": "5", "translation": "five"},
{"meaning": "6", "translation": "six"},
{"meaning": "7", "translation": "seven"},
{"meaning": "8", "translation": "eight"},
{"meaning": "9", "translation": "nine"},
]
# The order below defines precedence (in ascending order).
self.operators = [
{"meaning": "subtract", "translation": "minus"},
{"meaning": "add", "translation": "plus"},
{"meaning": "multiply", "translation": "times"},
{"meaning": "divide", "translation": "over"},
]
def generate_expression(
self, num_operations: int, allowed_operators: List[Dict] = None
):
"""
Generates a single expression that contains the given number of operations.
"""
if num_operations == 0:
return random.sample(self.numbers, 1)[0]
# Expressions will be of the type (OP EXP1 EXP2)
if allowed_operators is None:
allowed_operators = self.operators
operator_index = random.randint(0, len(allowed_operators) - 1)
operator = allowed_operators[operator_index]
# Decide how many operators will be in each of EXP1 and EXP2
random_value = random.random()
num_operations_for_first = int(num_operations * random_value)
num_operations_for_second = num_operations - num_operations_for_first - 1
# The operations in the children will be the same as the operator already sampled, or one of a higher
# precedence.
first_argument = self.generate_expression(
num_operations_for_first, allowed_operators[operator_index:]
)
second_argument = self.generate_expression(
num_operations_for_second, allowed_operators[operator_index:]
)
meaning_representation_parts = [
operator["meaning"],
first_argument["meaning"],
second_argument["meaning"],
]
meaning_representation = "(" + " ".join(meaning_representation_parts) + ")"
return {
"meaning": meaning_representation,
"translation": " ".join(
[
first_argument["translation"],
operator["translation"],
second_argument["translation"],
]
),
"denotation": self.language.execute(meaning_representation),
}
def generate_data(
self,
num_expressions: int,
min_num_operations: int = 1,
max_num_operations: int = 10,
split_data: bool = False,
train_proportion: float = 0.8,
test_proportion: float = 0.1,
):
"""
Returns ``num_expressions`` expressions, containing number of operations in the range
``(min_num_operations, max_num_operations)``. Optionally, you can also have the data split into
train, test, and dev sets, ans specify their proportions.
"""
data: List[Dict[str, Any]] = []
while len(data) < num_expressions:
num_operations = random.randint(min_num_operations, max_num_operations)
try:
expression = self.generate_expression(num_operations)
data.append(expression)
except ZeroDivisionError:
pass
if not split_data:
return {"data": data}
test_size = math.ceil(test_proportion * num_expressions)
if train_proportion + test_proportion < 1.0:
dev_size = math.ceil(
(1 - (train_proportion + test_proportion)) * num_expressions
)
else:
dev_size = 0
return {
"test": data[:test_size],
"dev": data[test_size : test_size + dev_size],
"train": data[test_size + dev_size :],
}
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
"--num-expressions",
type=int,
required=True,
dest="num_expressions",
help="Number of expressions to generate",
)
parser.add_argument(
"--min-num-operations", type=int, dest="min_num_operations", default=1
)
parser.add_argument(
"--max-num-operations", type=int, dest="max_num_operations", default=10
)
parser.add_argument(
"--output",
type=str,
required=True,
help="""Location where output will be written. If splitting data, the name of the split
will be appended to the file name""",
)
parser.add_argument("--split-data", action="store_true", dest="split")
parser.add_argument(
"--train-proportion",
type=float,
dest="train_proportion",
help="How big should the train split be? (Between 0 and 1)",
)
parser.add_argument(
"--test-proportion",
type=float,
dest="test_proportion",
help="""How big should the test split be? (Between 0 and 1). Will also make a dev split
if train_proportion + test_proportion < 1""",
)
parser.add_argument(
"--no-meaning",
action="store_true",
dest="no_meaning",
help="Generated data will have denotations instead of meaning",
)
args = parser.parse_args()
if args.no_meaning:
raise NotImplementedError
data_generator = DataGenerator()
data = data_generator.generate_data(
num_expressions=args.num_expressions,
min_num_operations=args.min_num_operations,
max_num_operations=args.max_num_operations,
split_data=args.split,
train_proportion=args.train_proportion,
test_proportion=args.test_proportion,
)
if args.split:
filename_parts = args.output.split(".")
assert (
len(filename_parts) == 2
), "Cannot decide how to alter the file name. Expected just one ."
train_file_name = f"{filename_parts[0]}_train.{filename_parts[1]}"
dev_file_name = f"{filename_parts[0]}_dev.{filename_parts[1]}"
test_file_name = f"{filename_parts[0]}_test.{filename_parts[1]}"
with open(train_file_name, "w") as output_file:
for datum in data["train"]:
source = datum["translation"]
target = datum["meaning"].replace("(", "( ").replace(")", " )")
print(f"{source}\t{target}", file=output_file)
with open(dev_file_name, "w") as output_file:
for datum in data["dev"]:
source = datum["translation"]
target = datum["meaning"].replace("(", "( ").replace(")", " )")
print(f"{source}\t{target}", file=output_file)
with open(test_file_name, "w") as output_file:
for datum in data["test"]:
source = datum["translation"]
target = datum["meaning"].replace("(", "( ").replace(")", " )")
print(f"{source}\t{target}", file=output_file)
else:
with open(args.output, "w") as output_file:
for datum in data["data"]:
source = datum["translation"]
target = datum["meaning"].replace("(", "( ").replace(")", " )")
print(f"{source}\t{target}", file=output_file)
if __name__ == "__main__":
main()
| allennlp-guide-master | nla_semparse/scripts/generate_data.py |
# Inputs
text: TextField
title: TextField
stars: LabelField
# Outputs
aspect: LabelField
sentiment: LabelField
| allennlp-guide-master | exercises/chapter05/input_output/add_list_source.py |
# Inputs
text: TextField
title: TextField
# Outputs
sentiment: LabelField
| allennlp-guide-master | exercises/chapter05/input_output/add_stars_source.py |
# Inputs
text: TextField
title: TextField
# Outputs
sentiment: LabelField
| allennlp-guide-master | exercises/chapter05/input_output/add_title_solution.py |
# Inputs
text: TextField
# Outputs
sentiment: LabelField
| allennlp-guide-master | exercises/chapter05/input_output/add_title_source.py |
# Inputs
text: TextField
title: TextField
stars: LabelField
aspect: LabelField # either here
# Outputs
sentiment: LabelField
aspect: LabelField # or here
| allennlp-guide-master | exercises/chapter05/input_output/add_aspect_solution.py |
# Inputs
text: TextField
title: TextField
stars: LabelField
# Outputs
aspect: List[LabelField]
sentiment: List[LabelField] # or a SequenceLabelField that depends on `aspect`
| allennlp-guide-master | exercises/chapter05/input_output/add_list_solution.py |
# Inputs
text: TextField
title: TextField
stars: LabelField
# OR stars: ArrayField, if you want to model the numerical value
# Outputs
sentiment: LabelField
| allennlp-guide-master | exercises/chapter05/input_output/add_stars_solution.py |
# Inputs
text: TextField
title: TextField
stars: LabelField
# Outputs
sentiment: LabelField
| allennlp-guide-master | exercises/chapter05/input_output/add_aspect_source.py |
from allennlp.common import Params
from allennlp.data import DatasetReader, Instance, Vocabulary
from allennlp.data.fields import LabelField, TextField
from allennlp.data.iterators import BasicIterator
from allennlp.data.token_indexers import SingleIdTokenIndexer
from allennlp.data.tokenizers import WordTokenizer
from allennlp.models import Model
from allennlp.modules import Embedding, Seq2VecEncoder, TextFieldEmbedder
from allennlp.modules.text_field_embedders import BasicTextFieldEmbedder
from allennlp.modules.seq2vec_encoders import BagOfEmbeddingsEncoder
import json
# Data will be formatted as:
# [title][tab][text][tab][stars][tab][aspect][tab][sentiment]
@DatasetReader.register("classification-tsv")
class ClassificationTsvReader(DatasetReader):
def __init__(
self,
tokenizer: Tokenizer = None,
token_indexers: Dict[str, TokenIndexer] = None,
):
self.tokenizer = tokenizer or WordTokenizer()
self.token_indexers = token_indexers or {"tokens": SingleIdTokenIndexer()}
def _read(self, file_path: str) -> Iterable[Instance]:
with open(file_path, "r") as lines:
for line in lines:
text, sentiment = line.strip().split("\t")
text_field = TextField(
self.tokenizer.tokenize(text), self.token_indexers
)
sentiment_field = LabelField(sentiment)
fields = {"text": text_field, "sentiment": sentiment_field}
yield Instance(fields)
reader_params = """
{
"type": "classification-tsv",
"token_indexers": {"tokens": {"type": "single-id"}}
}
"""
reader = DatasetReader.from_params(Params(json.loads(reader_params)))
instances = reader.read("exercises/chapter05/input_output_reader/example_data.tsv")
print(instances)
vocab = Vocabulary.from_instances(instances)
print(vocab)
@Model.register("simple_classifier")
class SimpleClassifier(Model):
def __init__(
self, vocab: Vocabulary, embedder: TextFieldEmbedder, encoder: Seq2VecEncoder
):
super().__init__(vocab)
self.embedder = embedder
self.encoder = encoder
num_labels = vocab.get_vocab_size("labels")
self.classifier = torch.nn.Linear(encoder.get_output_dim(), num_labels)
def forward(
self, text: Dict[str, torch.Tensor], label: torch.Tensor
) -> Dict[str, torch.Tensor]:
# Shape: (batch_size, num_tokens, embedding_dim)
embedded_text = self.embedder(text)
# Shape: (batch_size, num_tokens)
mask = util.get_text_field_mask(text)
# Shape: (batch_size, encoding_dim)
encoded_text = self.encoder(embedded_text, mask)
# Shape: (batch_size, num_labels)
logits = self.classifier(encoded_text)
# Shape: (batch_size, num_labels)
probs = torch.nn.functional.softmax(logits)
# Shape: (1,)
loss = torch.nn.functional.cross_entropy(logits, label)
return {"loss": loss, "probs": probs}
iterator = BasicIterator(batch_size=2)
iterator.index_with(vocab)
model_params = """
{
"type": "simple_classifier",
"embedder": {"token_embedders": {
"tokens": {"type": "embedding", "embedding_dim": 10}
}},
"encoder": {"type": "bag_of_embeddings"}
}
"""
model = Model.from_params(vocab, Params(json.loads(model_params)))
for batch in iterator(instances, num_epochs=1):
outputs = model(batch)
print(f"Model outputs: {outputs}")
| allennlp-guide-master | exercises/chapter05/putting_them_together/config_source.py |
from allennlp.data import DatasetReader, Instance, Vocabulary
from allennlp.data.fields import LabelField, TextField
from allennlp.data.iterators import BasicIterator
from allennlp.data.token_indexers import SingleIdTokenIndexer
from allennlp.data.tokenizers import WordTokenizer
from allennlp.models import Model
from allennlp.modules import Embedding, Seq2VecEncoder, TextFieldEmbedder
from allennlp.modules.text_field_embedders import BasicTextFieldEmbedder
from allennlp.modules.seq2vec_encoders import BagOfEmbeddingsEncoder
# Data will be formatted as:
# [title][tab][text][tab][stars][tab][aspect][tab][sentiment]
@DatasetReader.register("classification-tsv")
class ClassificationTsvReader(DatasetReader):
def __init__(self):
self.tokenizer = WordTokenizer()
self.token_indexers = {"tokens": SingleIdTokenIndexer()}
def _read(self, file_path: str) -> Iterable[Instance]:
with open(file_path, "r") as lines:
for line in lines:
text, sentiment = line.strip().split("\t")
text_field = TextField(
self.tokenizer.tokenize(text), self.token_indexers
)
sentiment_field = LabelField(sentiment)
fields = {"text": text_field, "sentiment": sentiment_field}
yield Instance(fields)
reader = ClassificationTsvReader()
instances = reader.read("exercises/chapter05/input_output_reader/example_data.tsv")
print(instances)
vocab = Vocabulary.from_instances(instances)
print(vocab)
@Model.register("simple_classifier")
class SimpleClassifier(Model):
def __init__(
self, vocab: Vocabulary, embedder: TextFieldEmbedder, encoder: Seq2VecEncoder
):
super().__init__(vocab)
self.embedder = embedder
self.encoder = encoder
num_labels = vocab.get_vocab_size("labels")
self.classifier = torch.nn.Linear(encoder.get_output_dim(), num_labels)
def forward(
self, text: Dict[str, torch.Tensor], label: torch.Tensor
) -> Dict[str, torch.Tensor]:
# Shape: (batch_size, num_tokens, embedding_dim)
embedded_text = self.embedder(text)
# Shape: (batch_size, num_tokens)
mask = util.get_text_field_mask(text)
# Shape: (batch_size, encoding_dim)
encoded_text = self.encoder(embedded_text, mask)
# Shape: (batch_size, num_labels)
logits = self.classifier(encoded_text)
# Shape: (batch_size, num_labels)
probs = torch.nn.functional.softmax(logits)
# Shape: (1,)
loss = torch.nn.functional.cross_entropy(logits, label)
return {"loss": loss, "probs": probs}
iterator = BasicIterator(batch_size=2)
iterator.index_with(vocab)
embedding = Embedding(num_embeddings=vocab.get_vocab_size("tokens"), embedding_dim=10)
model = SimpleClassifier(
vocab, BasicTextFieldEmbedder({"tokens": embedding}), BagOfEmbeddingsEncoder()
)
for batch in iterator(instances, num_epochs=1):
outputs = model(batch)
print(f"Model outputs: {outputs}")
| allennlp-guide-master | exercises/chapter05/putting_them_together/code_source.py |
from allennlp.data import DatasetReader, Instance
from allennlp.data.fields import LabelField, TextField
from allennlp.data.token_indexers import SingleIdTokenIndexer
from allennlp.data.tokenizers import WordTokenizer
# Data will be formatted as:
# [title][tab][text][tab][stars][tab][aspect][tab][sentiment]
@DatasetReader.register("classification-tsv")
class ClassificationTsvReader(DatasetReader):
def __init__(self):
self.tokenizer = WordTokenizer()
self.token_indexers = {"tokens": SingleIdTokenIndexer()}
def _read(self, file_path: str) -> Iterable[Instance]:
with open(file_path, "r") as lines:
for line in lines:
text, sentiment = line.strip().split("\t")
text_field = TextField(
self.tokenizer.tokenize(text), self.token_indexers
)
sentiment_field = LabelField(sentiment)
fields = {"text": text_field, "sentiment": sentiment_field}
yield Instance(fields)
reader = ClassificationTsvReader()
instances = reader.read("exercises/chapter05/input_output_reader/example_data.tsv")
print(instances)
| allennlp-guide-master | exercises/chapter05/input_output_reader/add_fields_source.py |
from allennlp.data import DatasetReader, Instance
from allennlp.data.fields import LabelField, TextField
from allennlp.data.token_indexers import SingleIdTokenIndexer
from allennlp.data.tokenizers import WordTokenizer
# Data will be formatted as:
# [title][tab][text][tab][stars][tab][aspect][tab][sentiment]
@DatasetReader.register("classification-tsv")
class ClassificationTsvReader(DatasetReader):
def __init__(self):
self.tokenizer = WordTokenizer()
self.token_indexers = {"tokens": SingleIdTokenIndexer()}
def _read(self, file_path: str) -> Iterable[Instance]:
with open(file_path, "r") as lines:
for line in lines:
title, text, stars, aspect, sentiment = line.strip().split("\t")
title_field = TextField(
self.tokenizer.tokenize(title), self.token_indexers
)
text_field = TextField(
self.tokenizer.tokenize(text), self.token_indexers
)
stars_field = LabelField(stars)
aspect_field = LabelField(aspect)
sentiment_field = LabelField(sentiment)
fields = {
"title": title_field,
"text": text_field,
"stars": stars_field,
"aspect": aspect_field,
"sentiment": sentiment_field,
}
yield Instance(fields)
reader = ClassificationTsvReader()
print(reader.read("exercises/chapter05/input_output_reader/example_data.tsv"))
| allennlp-guide-master | exercises/chapter05/input_output_reader/add_fields_solution.py |
def test():
assert len(instances) == 2, "You didn't get two instances"
expected_fields = {"text", "title", "stars", "aspect", "sentiment"}
assert (
instances[0].fields.keys() == expected_fields
), "You don't have the right fields in your Instance"
assert (
instances[0]["sentiment"] == "negative"
), "You didn't read the fields correctly"
assert instances[0]["aspect"] == "tutorials", "You didn't read the fields correctly"
assert (
instances[1]["sentiment"] == "positive"
), "You didn't read the fields correctly"
assert instances[1]["aspect"] == "library", "You didn't read the fields correctly"
__msg__.good("Well done!")
| allennlp-guide-master | exercises/chapter05/input_output_reader/add_fields_test.py |
inputs = {"sentence": "a very well-made, funny and entertaining picture."}
archive = (
"https://storage.googleapis.com/allennlp-public-models/"
"basic_stanford_sentiment_treebank-2020.06.09.tar.gz"
)
predictor = Predictor.from_path(archive)
interpreter = SimpleGradient(predictor)
interpretation = interpreter.saliency_interpret_from_json(inputs)
print(interpretation)
| allennlp-guide-master | exercises/part3/interpret/saliency_source.py |
inputs = {"sentence": "a very well-made, funny and entertaining picture."}
archive = (
"https://storage.googleapis.com/allennlp-public-models/"
"basic_stanford_sentiment_treebank-2020.06.09.tar.gz"
)
predictor = Predictor.from_path(archive)
reducer = InputReduction(predictor) # or Hotflip(predictor)
# if it is Hotflip, we need an extra step: reducer.initialize()
reduced = reducer.attack_from_json(inputs, "tokens", "grad_input_1")
print(reduced)
| allennlp-guide-master | exercises/part3/interpret/attacker_source.py |
from allennlp.interpret.saliency_interpreters import SimpleGradient
from allennlp.predictors import Predictor
| allennlp-guide-master | exercises/part3/interpret/saliency_setup.py |
from allennlp.models.archival import load_archive
from allennlp.predictors import Predictor
from allennlp.interpret.attackers import InputReduction
| allennlp-guide-master | exercises/part3/interpret/attacker_setup.py |
inputs = ["one plus three minus four", "five minus six times seven over one"]
for nla_input in inputs:
output = translate_nla(nla_input)
print(f"Input: {nla_input}")
print(f"Prediction: {output}\n")
| allennlp-guide-master | exercises/part3/semantic-parsing-seq2seq/predictor_source_medium.py |
import csv
from typing import Dict
from allennlp.common.file_utils import cached_path
from allennlp.common.util import START_SYMBOL, END_SYMBOL
from allennlp.data import DatasetReader, Instance
from allennlp.data.fields import TextField
from allennlp.data.token_indexers import TokenIndexer, SingleIdTokenIndexer
from allennlp.data.tokenizers import Token, Tokenizer, WhitespaceTokenizer
| allennlp-guide-master | exercises/part3/semantic-parsing-seq2seq/dataset_reader_setup.py |
class Seq2SeqDatasetReader(DatasetReader):
def __init__(
self,
source_tokenizer: Tokenizer = None,
target_tokenizer: Tokenizer = None,
source_token_indexers: Dict[str, TokenIndexer] = None,
target_token_indexers: Dict[str, TokenIndexer] = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self._source_tokenizer = source_tokenizer or WhitespaceTokenizer()
self._target_tokenizer = target_tokenizer or self._source_tokenizer
self._source_token_indexers = source_token_indexers or {
"tokens": SingleIdTokenIndexer()
}
self._target_token_indexers = (
target_token_indexers or self._source_token_indexers
)
def _read(self, file_path: str):
with open(cached_path(file_path), "r") as data_file:
for line_num, row in enumerate(csv.reader(data_file, delimiter="\t")):
source_sequence, target_sequence = row
yield self.text_to_instance(source_sequence, target_sequence)
def text_to_instance(
self, source_string: str, target_string: str = None
) -> Instance:
tokenized_source = self._source_tokenizer.tokenize(source_string)
source_field = TextField(tokenized_source, self._source_token_indexers)
if target_string is not None:
tokenized_target = self._target_tokenizer.tokenize(target_string)
tokenized_target.insert(0, Token(START_SYMBOL))
tokenized_target.append(Token(END_SYMBOL))
target_field = TextField(tokenized_target, self._target_token_indexers)
return Instance(
{"source_tokens": source_field, "target_tokens": target_field}
)
else:
return Instance({"source_tokens": source_field})
dataset_reader = Seq2SeqDatasetReader(
source_token_indexers={"tokens": SingleIdTokenIndexer(namespace="source_tokens")},
target_token_indexers={"tokens": SingleIdTokenIndexer(namespace="target_tokens")},
)
instances = list(
dataset_reader.read("nla_semparse/data/nla_with_meaning_rep_train.tsv")
)
for instance in instances[:10]:
print(instance)
| allennlp-guide-master | exercises/part3/semantic-parsing-seq2seq/dataset_reader_source.py |
from typing import List
from allennlp.predictors import Predictor
from allennlp.models.archival import load_archive
from allennlp_models.generation import (
ComposedSeq2Seq,
) # Need this for loading model archive
from nla_semparse.nla_semparse.nla_metric import (
NlaMetric,
) # Need this for loading model archive
archive = load_archive("nla_semparse/trained_models/seq2seq_model.tar.gz")
predictor = Predictor.from_archive(archive, "seq2seq")
def translate_nla(source: str) -> str:
prediction_data = predictor.predict_json({"source": source})
return " ".join(prediction_data["predicted_tokens"])
inputs = ["one plus three", "five minus six", "seven times two", "four over nine"]
for nla_input in inputs:
output = translate_nla(nla_input)
print(f"Input: {nla_input}")
print(f"Prediction: {output}\n")
| allennlp-guide-master | exercises/part3/semantic-parsing-seq2seq/predictor_source_easy.py |
from typing import Dict, List, Optional
from allennlp.training.metrics.metric import Metric
from allennlp_semparse.domain_languages.domain_language import ExecutionError
from nla_semparse.nla_semparse.nla_language import NlaLanguage
@Metric.register("nla_metric")
class NlaMetric(Metric):
"""
Metric for evaluating prefix arithmetic sequences against targets, useful for Natural Language Arithmetic
parsing. This metric evaluates predicted sequences on three things: 1) whether the predicted metric is a
well-formed prefix arithmetic expression, 2) whether the predicted sequence and the target seqquence evaluate
to the same value, 3) whether the predicted sequence and the target sequence are identical.
"""
def __init__(self):
self._language = NlaLanguage()
self._num_well_formed = 0
self._num_correct_denotation = 0
self._num_same_sequence = 0
self._num_all_sequences = 0
def __call__(self, predictions, targets) -> None:
for prediction, target in zip(predictions, targets):
if isinstance(prediction, list):
prediction = " ".join(prediction).replace("( ", "(").replace(" )", ")")
target = " ".join(target).replace("( ", "(").replace(" )", ")")
if isinstance(prediction, str) and not prediction.startswith("("):
prediction = f"({prediction})"
if isinstance(target, str) and not target.startswith("("):
target = f"({target})"
evaluated_prediction = None
evaluated_target = None
try:
evaluated_target = self._language.execute(target)
evaluated_prediction = self._language.execute(prediction)
except (TypeError, ExecutionError, IndexError):
pass
if isinstance(evaluated_prediction, int):
self._num_well_formed += 1
if evaluated_prediction == evaluated_target:
self._num_correct_denotation += 1
if prediction == target:
self._num_same_sequence += 1
self._num_all_sequences += 1
def get_metric(self, reset: bool = False) -> Dict[str, float]:
if self._num_all_sequences == 0:
metrics = {
"well_formedness": 0.0,
"denotation_accuracy": 0.0,
"sequence_accuracy": 0.0,
}
else:
metrics = {
"well_formedness": self._num_well_formed / self._num_all_sequences,
"denotation_accuracy": self._num_correct_denotation
/ self._num_all_sequences,
"sequence_accuracy": self._num_same_sequence / self._num_all_sequences,
}
if reset:
self.reset()
return metrics
def reset(self):
self._num_well_formed = 0
self._num_same_sequence = 0
self._num_correct_denotation = 0
self._num_all_sequences = 0
| allennlp-guide-master | exercises/part3/semantic-parsing-seq2seq/metric_setup.py |
from typing import List
from allennlp.predictors import Predictor
from allennlp.models.archival import load_archive
from allennlp_models.generation import (
ComposedSeq2Seq,
) # Need this for loading model archive
from nla_semparse.nla_semparse.nla_metric import (
NlaMetric,
) # Need this for loading model archive
archive = load_archive("nla_semparse/trained_models/seq2seq_model.tar.gz")
predictor = Predictor.from_archive(archive, "seq2seq")
def translate_nla(source: str) -> str:
prediction_data = predictor.predict_json({"source": source})
return " ".join(prediction_data["predicted_tokens"])
| allennlp-guide-master | exercises/part3/semantic-parsing-seq2seq/predictor_setup.py |
def evaluate(prediction: str, target: str) -> Dict[str, float]:
metric = NlaMetric()
metric([prediction], [target])
return metric.get_metric(reset=True)
target = "(subtract (multiply 7 3) 2)"
predictions = [
"(subtract (multiply 7 3) 2)",
"(subtract (multiply 6 4) 5)",
"subtract () add divide",
]
for prediction in predictions:
metrics = evaluate(prediction, target)
print(f"Prediction: {prediction}")
print(f"Target: {target}")
print(f"Well formedness: {metrics['well_formedness']}")
print(f"Accuracy: {metrics['sequence_accuracy']}\n")
| allennlp-guide-master | exercises/part3/semantic-parsing-seq2seq/metric_source.py |
inputs = [
"eight over nine times six minus three plus seven over five minus one",
"seven times eight plus five minus six plus one plus three plus two over seven",
]
for nla_input in inputs:
output = translate_nla(nla_input)
print(f"Input: {nla_input}")
print(f"Prediction: {output}\n")
| allennlp-guide-master | exercises/part3/semantic-parsing-seq2seq/predictor_source_hard.py |
from collections import Counter, defaultdict
from typing import Dict
from allennlp.data.instance import Instance
from allennlp.data.fields import Field, TextField, LabelField, SequenceLabelField
from allennlp.data.token_indexers import TokenIndexer, SingleIdTokenIndexer
from allennlp.data.tokenizers import Token
from allennlp.data.vocabulary import Vocabulary
| allennlp-guide-master | exercises/part2/reading-data/instances_setup.py |
# You can implement your own dataset reader by subclassing DatasetReader.
# At the very least, you need to implement the _read() method, preferably
# text_to_instance() as well.
@DatasetReader.register("classification-tsv")
class ClassificationTsvReader(DatasetReader):
def __init__(
self,
tokenizer: Tokenizer = None,
token_indexers: Dict[str, TokenIndexer] = None,
max_tokens: int = None,
**kwargs
):
super().__init__(**kwargs)
self.tokenizer = tokenizer or WhitespaceTokenizer()
self.token_indexers = token_indexers or {"tokens": SingleIdTokenIndexer()}
self.max_tokens = max_tokens
def text_to_instance(self, text: str, label: str = None) -> Instance:
tokens = self.tokenizer.tokenize(text)
if self.max_tokens:
tokens = tokens[: self.max_tokens]
text_field = TextField(tokens, self.token_indexers)
fields: Dict[str, Field] = {"text": text_field}
if label:
fields["label"] = LabelField(label)
return Instance(fields)
def _read(self, file_path: str) -> Iterable[Instance]:
with open(file_path, "r") as lines:
for line in lines:
text, sentiment = line.strip().split("\t")
yield self.text_to_instance(text, sentiment)
# Instantiate and use the dataset reader to read a file containing the data
reader = ClassificationTsvReader()
dataset = list(reader.read("quick_start/data/movie_review/train.tsv"))
print("type of its first element: ", type(dataset[0]))
print("size of dataset: ", len(dataset))
| allennlp-guide-master | exercises/part2/reading-data/dataset_reader_basic_source.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.