code
string | signature
string | docstring
string | loss_without_docstring
float64 | loss_with_docstring
float64 | factor
float64 |
---|---|---|---|---|---|
def wrapped_callback(context_p, device_p, event, _):
assert addressof(context_p.contents) == addressof(
self.__context_p.contents), (context_p, self.__context_p)
device = USBDevice(
self,
device_p,
# pylint: disable=undefined-variable
event != HOTPLUG_EVENT_DEVICE_LEFT,
# pylint: enable=undefined-variable
)
self.__close_set.add(device)
unregister = bool(callback(
self,
device,
event,
))
if unregister:
del self.__hotplug_callback_dict[handle]
return unregister
handle = c_int()
callback_p = libusb1.libusb_hotplug_callback_fn_p(wrapped_callback)
mayRaiseUSBError(libusb1.libusb_hotplug_register_callback(
self.__context_p, events, flags, vendor_id, product_id, dev_class,
callback_p, None, byref(handle),
))
handle = handle.value
# Keep strong references
assert handle not in self.__hotplug_callback_dict, (
handle,
self.__hotplug_callback_dict,
)
self.__hotplug_callback_dict[handle] = (callback_p, wrapped_callback)
return handle | def hotplugRegisterCallback(
self, callback,
# pylint: disable=undefined-variable
events=HOTPLUG_EVENT_DEVICE_ARRIVED | HOTPLUG_EVENT_DEVICE_LEFT,
flags=HOTPLUG_ENUMERATE,
vendor_id=HOTPLUG_MATCH_ANY,
product_id=HOTPLUG_MATCH_ANY,
dev_class=HOTPLUG_MATCH_ANY,
# pylint: enable=undefined-variable
) | Registers an hotplug callback.
On success, returns an opaque value which can be passed to
hotplugDeregisterCallback.
Callback must accept the following positional arguments:
- this USBContext instance
- an USBDevice instance
If device has left, configuration descriptors may not be
available. Its device descriptor will be available.
- event type, one of:
HOTPLUG_EVENT_DEVICE_ARRIVED
HOTPLUG_EVENT_DEVICE_LEFT
Callback must return whether it must be unregistered (any true value
to be unregistered, any false value to be kept registered).
Note: given callback will be invoked during event handling, meaning
it cannot call any synchronous libusb function. | 2.893618 | 3.05529 | 0.947085 |
del self.__hotplug_callback_dict[handle]
libusb1.libusb_hotplug_deregister_callback(self.__context_p, handle) | def hotplugDeregisterCallback(self, handle) | Deregisters an hotplug callback.
handle (opaque)
Return value of a former hotplugRegisterCallback call. | 5.232378 | 6.202556 | 0.843584 |
arr = create_string_buffer(length)
return i2c_msg(
addr=address, flags=I2C_M_RD, len=length,
buf=arr) | def read(address, length) | Prepares an i2c read transaction.
:param address: Slave address.
:type: address: int
:param length: Number of bytes to read.
:type: length: int
:return: New :py:class:`i2c_msg` instance for read operation.
:rtype: :py:class:`i2c_msg` | 6.289278 | 5.839747 | 1.076978 |
if sys.version_info.major >= 3:
if type(buf) is str:
buf = bytes(map(ord, buf))
else:
buf = bytes(buf)
else:
if type(buf) is not str:
buf = ''.join([chr(x) for x in buf])
arr = create_string_buffer(buf, len(buf))
return i2c_msg(
addr=address, flags=0, len=len(arr),
buf=arr) | def write(address, buf) | Prepares an i2c write transaction.
:param address: Slave address.
:type address: int
:param buf: Bytes to write. Either list of values or str.
:type buf: list
:return: New :py:class:`i2c_msg` instance for write operation.
:rtype: :py:class:`i2c_msg` | 3.174531 | 2.961807 | 1.071822 |
n_msg = len(i2c_msg_instances)
msg_array = (i2c_msg * n_msg)(*i2c_msg_instances)
return i2c_rdwr_ioctl_data(
msgs=msg_array,
nmsgs=n_msg
) | def create(*i2c_msg_instances) | Factory method for creating a i2c_rdwr_ioctl_data struct that can
be called with ``ioctl(fd, I2C_RDWR, data)``.
:param i2c_msg_instances: Up to 42 i2c_msg instances
:rtype: i2c_rdwr_ioctl_data | 4.014223 | 2.922711 | 1.373459 |
self.fd = os.open("/dev/i2c-{}".format(bus), os.O_RDWR)
self.funcs = self._get_funcs() | def open(self, bus) | Open a given i2c bus.
:param bus: i2c bus number (e.g. 0 or 1)
:type bus: int | 4.004024 | 4.238814 | 0.944609 |
if self.fd:
os.close(self.fd)
self.fd = None | def close(self) | Close the i2c connection. | 3.765283 | 3.264997 | 1.153227 |
force = force if force is not None else self.force
if self.address != address or self._force_last != force:
if force is True:
ioctl(self.fd, I2C_SLAVE_FORCE, address)
else:
ioctl(self.fd, I2C_SLAVE, address)
self.address = address
self._force_last = force | def _set_address(self, address, force=None) | Set i2c slave address to use for subsequent calls.
:param address:
:type address: int
:param force:
:type force: Boolean | 2.986325 | 2.853611 | 1.046507 |
f = c_uint32()
ioctl(self.fd, I2C_FUNCS, f)
return f.value | def _get_funcs(self) | Returns a 32-bit value stating supported I2C functions.
:rtype: int | 8.86743 | 4.828413 | 1.83651 |
self._set_address(i2c_addr, force=force)
msg = i2c_smbus_ioctl_data.create(
read_write=I2C_SMBUS_WRITE, command=0, size=I2C_SMBUS_QUICK)
ioctl(self.fd, I2C_SMBUS, msg) | def write_quick(self, i2c_addr, force=None) | Perform quick transaction. Throws IOError if unsuccessful.
:param i2c_addr: i2c address
:type i2c_addr: int
:param force:
:type force: Boolean | 5.247968 | 5.919812 | 0.886509 |
self._set_address(i2c_addr, force=force)
msg = i2c_smbus_ioctl_data.create(
read_write=I2C_SMBUS_READ, command=0, size=I2C_SMBUS_BYTE
)
ioctl(self.fd, I2C_SMBUS, msg)
return msg.data.contents.byte | def read_byte(self, i2c_addr, force=None) | Read a single byte from a device.
:rtype: int
:param i2c_addr: i2c address
:type i2c_addr: int
:param force:
:type force: Boolean
:return: Read byte value | 5.489842 | 6.485455 | 0.846485 |
self._set_address(i2c_addr, force=force)
msg = i2c_smbus_ioctl_data.create(
read_write=I2C_SMBUS_WRITE, command=value, size=I2C_SMBUS_BYTE
)
ioctl(self.fd, I2C_SMBUS, msg) | def write_byte(self, i2c_addr, value, force=None) | Write a single byte to a device.
:param i2c_addr: i2c address
:type i2c_addr: int
:param value: value to write
:type value: int
:param force:
:type force: Boolean | 5.229446 | 5.983099 | 0.874036 |
self._set_address(i2c_addr, force=force)
msg = i2c_smbus_ioctl_data.create(
read_write=I2C_SMBUS_READ, command=register, size=I2C_SMBUS_BYTE_DATA
)
ioctl(self.fd, I2C_SMBUS, msg)
return msg.data.contents.byte | def read_byte_data(self, i2c_addr, register, force=None) | Read a single byte from a designated register.
:param i2c_addr: i2c address
:type i2c_addr: int
:param register: Register to read
:type register: int
:param force:
:type force: Boolean
:return: Read byte value
:rtype: int | 5.558371 | 6.560809 | 0.847208 |
self._set_address(i2c_addr, force=force)
msg = i2c_smbus_ioctl_data.create(
read_write=I2C_SMBUS_WRITE, command=register, size=I2C_SMBUS_BYTE_DATA
)
msg.data.contents.byte = value
ioctl(self.fd, I2C_SMBUS, msg) | def write_byte_data(self, i2c_addr, register, value, force=None) | Write a byte to a given register.
:param i2c_addr: i2c address
:type i2c_addr: int
:param register: Register to write to
:type register: int
:param value: Byte value to transmit
:type value: int
:param force:
:type force: Boolean
:rtype: None | 5.429119 | 6.132092 | 0.885362 |
self._set_address(i2c_addr, force=force)
msg = i2c_smbus_ioctl_data.create(
read_write=I2C_SMBUS_READ, command=register, size=I2C_SMBUS_WORD_DATA
)
ioctl(self.fd, I2C_SMBUS, msg)
return msg.data.contents.word | def read_word_data(self, i2c_addr, register, force=None) | Read a single word (2 bytes) from a given register.
:param i2c_addr: i2c address
:type i2c_addr: int
:param register: Register to read
:type register: int
:param force:
:type force: Boolean
:return: 2-byte word
:rtype: int | 5.610752 | 6.671994 | 0.840941 |
self._set_address(i2c_addr, force=force)
msg = i2c_smbus_ioctl_data.create(
read_write=I2C_SMBUS_WRITE, command=register, size=I2C_SMBUS_PROC_CALL
)
msg.data.contents.word = value
ioctl(self.fd, I2C_SMBUS, msg)
return msg.data.contents.word | def process_call(self, i2c_addr, register, value, force=None) | Executes a SMBus Process Call, sending a 16-bit value and receiving a 16-bit response
:param i2c_addr: i2c address
:type i2c_addr: int
:param register: Register to read/write to
:type register: int
:param value: Word value to transmit
:type value: int
:param force:
:type force: Boolean
:rtype: int | 5.174578 | 5.214519 | 0.99234 |
self._set_address(i2c_addr, force=force)
msg = i2c_smbus_ioctl_data.create(
read_write=I2C_SMBUS_READ, command=register, size=I2C_SMBUS_BLOCK_DATA
)
ioctl(self.fd, I2C_SMBUS, msg)
length = msg.data.contents.block[0]
return msg.data.contents.block[1:length + 1] | def read_block_data(self, i2c_addr, register, force=None) | Read a block of up to 32-bytes from a given register.
:param i2c_addr: i2c address
:type i2c_addr: int
:param register: Start register
:type register: int
:param force:
:type force: Boolean
:return: List of bytes
:rtype: list | 4.666921 | 5.31718 | 0.877706 |
length = len(data)
if length > I2C_SMBUS_BLOCK_MAX:
raise ValueError("Data length cannot exceed %d bytes" % I2C_SMBUS_BLOCK_MAX)
self._set_address(i2c_addr, force=force)
msg = i2c_smbus_ioctl_data.create(
read_write=I2C_SMBUS_WRITE, command=register, size=I2C_SMBUS_BLOCK_DATA
)
msg.data.contents.block[0] = length
msg.data.contents.block[1:length + 1] = data
ioctl(self.fd, I2C_SMBUS, msg) | def write_block_data(self, i2c_addr, register, data, force=None) | Write a block of byte data to a given register.
:param i2c_addr: i2c address
:type i2c_addr: int
:param register: Start register
:type register: int
:param data: List of bytes
:type data: list
:param force:
:type force: Boolean
:rtype: None | 3.286627 | 3.411308 | 0.963451 |
if length > I2C_SMBUS_BLOCK_MAX:
raise ValueError("Desired block length over %d bytes" % I2C_SMBUS_BLOCK_MAX)
self._set_address(i2c_addr, force=force)
msg = i2c_smbus_ioctl_data.create(
read_write=I2C_SMBUS_READ, command=register, size=I2C_SMBUS_I2C_BLOCK_DATA
)
msg.data.contents.byte = length
ioctl(self.fd, I2C_SMBUS, msg)
return msg.data.contents.block[1:length + 1] | def read_i2c_block_data(self, i2c_addr, register, length, force=None) | Read a block of byte data from a given register.
:param i2c_addr: i2c address
:type i2c_addr: int
:param register: Start register
:type register: int
:param length: Desired block length
:type length: int
:param force:
:type force: Boolean
:return: List of bytes
:rtype: list | 4.298564 | 4.432775 | 0.969723 |
ioctl_data = i2c_rdwr_ioctl_data.create(*i2c_msgs)
ioctl(self.fd, I2C_RDWR, ioctl_data) | def i2c_rdwr(self, *i2c_msgs) | Combine a series of i2c read and write operations in a single
transaction (with repeated start bits but no stop bits in between).
This method takes i2c_msg instances as input, which must be created
first with :py:meth:`i2c_msg.read` or :py:meth:`i2c_msg.write`.
:param i2c_msgs: One or more i2c_msg class instances.
:type i2c_msgs: i2c_msg
:rtype: None | 3.908613 | 4.150551 | 0.941709 |
cells = list(cellmap.values())
rows = 0
cols = 0
for cell in cells:
if sheet is None or cell.sheet == sheet:
rows = max(rows, int(cell.row))
cols = max(cols, int(col2num(cell.col)))
return (rows, cols) | def max_dimension(cellmap, sheet = None) | This function calculates the maximum dimension of the workbook or optionally the worksheet. It returns a tupple
of two integers, the first being the rows and the second being the columns.
:param cellmap: all the cells that should be used to calculate the maximum.
:param sheet: (optionally) a string with the sheet name.
:return: a tupple of two integers, the first being the rows and the second being the columns. | 2.971114 | 2.97627 | 0.998267 |
if isinstance(array, (float, int)):
rows = 1 # special case for A1:A1 type ranges which for some reason only return an int/float
elif array is None:
rows = 1 # some A1:A1 ranges return None (issue with ref cell)
else:
rows = len(array.values)
return rows | def rows(array) | Function to find the number of rows in an array.
Excel reference: https://support.office.com/en-ie/article/rows-function-b592593e-3fc2-47f2-bec1-bda493811597
:param array: the array of which the rows should be counted.
:return: the number of rows. | 9.43884 | 8.898951 | 1.060669 |
if isinstance(values, Range):
values = values.values
if guess is not None and guess != 0:
raise ValueError('guess value for excellib.irr() is %s and not 0' % guess)
else:
try:
return np.irr(values)
except Exception as e:
return ExcelError('#NUM!', e) | def irr(values, guess = None) | Function to calculate the internal rate of return (IRR) using payments and periodic dates. It resembles the
excel function IRR().
Excel reference: https://support.office.com/en-us/article/IRR-function-64925eaa-9988-495b-b290-3ad0c163c1bc
:param values: the payments of which at least one has to be negative.
:param guess: an initial guess which is required by Excel but isn't used by this function.
:return: a float being the IRR. | 6.062767 | 6.546112 | 0.926163 |
if isinstance(values, Range):
values = values.values
if isinstance(dates, Range):
dates = dates.values
if guess is not None and guess != 0:
raise ValueError('guess value for excellib.irr() is %s and not 0' % guess)
else:
try:
return scipy.optimize.newton(lambda r: xnpv(r, values, dates, lim_rate=False), 0.0)
except RuntimeError: # Failed to converge?
return scipy.optimize.brentq(lambda r: xnpv(r, values, dates, lim_rate=False), -1.0, 1e10) | def xirr(values, dates, guess=0) | Function to calculate the internal rate of return (IRR) using payments and non-periodic dates. It resembles the
excel function XIRR().
Excel reference: https://support.office.com/en-ie/article/xirr-function-de1242ec-6477-445b-b11b-a303ad9adc9d
:param values: the payments of which at least one has to be negative.
:param dates: the dates as excel dates (e.g. 43571 for 16/04/2019).
:param guess: an initial guess which is required by Excel but isn't used by this function.
:return: a float being the IRR. | 3.897136 | 3.736866 | 1.042889 |
values = values.values
if isinstance(dates, Range):
dates = dates.values
if len(values) != len(dates):
return ExcelError('#NUM!', '`values` range must be the same length as `dates` range in XNPV, %s != %s' % (len(values), len(dates)))
if lim_rate and rate < 0:
return ExcelError('#NUM!', '`excel cannot handle a negative `rate`' % (len(values), len(dates)))
xnpv = 0
for v, d in zip(values, dates):
xnpv += v / np.power(1.0 + rate, (d - dates[0]) / 365)
return xnpv | def xnpv(rate, values, dates, lim_rate = True): # Excel reference: https://support.office.com/en-us/article/XNPV-function-1b42bbf6-370f-4532-a0eb-d67c16b664b7
if isinstance(values, Range) | Function to calculate the net present value (NPV) using payments and non-periodic dates. It resembles the excel function XPNV().
:param rate: the discount rate.
:param values: the payments of which at least one has to be negative.
:param dates: the dates as excel dates (e.g. 43571 for 16/04/2019).
:return: a float being the NPV. | 3.346855 | 3.849259 | 0.86948 |
reference_date = datetime.datetime.today().date()
days_since_epoch = reference_date - EXCEL_EPOCH
# why +2 ?
# 1 based from 1900-01-01
# I think it is "inclusive" / to the _end_ of the day.
# https://support.office.com/en-us/article/date-function-e36c0c8c-4104-49da-ab83-82328b832349
return days_since_epoch.days + 2 | def today() | Note: Excel stores dates as sequential serial numbers so that they can be used in calculations.
January 1, 1900 is serial number 1, and January 1, 2008 is serial number 39448 because it is 39,447 days after January 1, 1900.
You will need to change the number format (Format Cells) in order to display a proper date. | 8.462631 | 8.446766 | 1.001878 |
sio = BytesIO(data)
sio.seek(pos + 22) # size of 'ZIP end of central directory record'
sio.truncate()
sio.seek(0)
return sio
f.seek(0)
return f | def repair_central_directory(zipFile, is_file_instance): # source: https://bitbucket.org/openpyxl/openpyxl/src/93604327bce7aac5e8270674579af76d390e09c0/openpyxl/reader/excel.py?at=default&fileviewer=file-view-default
''' trims trailing data from the central directory
code taken from http://stackoverflow.com/a/7457686/570216, courtesy of Uri Cohen
'''
f = zipFile if is_file_instance else open(zipFile, 'rb+')
data = f.read()
pos = data.find(CENTRAL_DIRECTORY_SIGNATURE) # End of central directory signature
if (pos > 0) | trims trailing data from the central directory
code taken from http://stackoverflow.com/a/7457686/570216, courtesy of Uri Cohen | 5.851758 | 4.994287 | 1.17169 |
return float(value)
return int(value) | def _cast_number(value): # source: https://bitbucket.org/openpyxl/openpyxl/src/93604327bce7aac5e8270674579af76d390e09c0/openpyxl/cell/read_only.py?at=default&fileviewer=file-view-default
"Convert numbers as string to an int or float"
m = FLOAT_REGEX.search(value)
if m is not None | Convert numbers as string to an int or float | 13.411893 | 4.563338 | 2.939053 |
xml_source = archive.read(ARC_WORKBOOK_RELS)
tree = fromstring(xml_source)
for element in safe_iterator(tree, '{%s}Relationship' % PKG_REL_NS):
rId = element.get('Id')
pth = element.get("Target")
typ = element.get('Type')
# normalise path
if pth.startswith("/xl"):
pth = pth.replace("/xl", "xl")
elif not pth.startswith("xl") and not pth.startswith(".."):
pth = "xl/" + pth
yield rId, {'path':pth, 'type':typ} | def read_rels(archive) | Read relationships for a workbook | 3.739829 | 3.459215 | 1.081121 |
xml_source = archive.read(ARC_CONTENT_TYPES)
root = fromstring(xml_source)
contents_root = root.findall('{%s}Override' % CONTYPES_NS)
for type in contents_root:
yield type.get('ContentType'), type.get('PartName') | def read_content_types(archive) | Read content types. | 5.308389 | 4.960901 | 1.070045 |
xml_source = archive.read(ARC_WORKBOOK)
tree = fromstring(xml_source)
for element in safe_iterator(tree, '{%s}sheet' % SHEET_MAIN_NS):
attrib = element.attrib
attrib['id'] = attrib["{%s}id" % REL_NS]
del attrib["{%s}id" % REL_NS]
if attrib['id']:
yield attrib | def read_sheets(archive) | Read worksheet titles and ids for a workbook | 4.430213 | 4.02737 | 1.100026 |
# content types has a list of paths but no titles
# workbook has a list of titles and relIds but no paths
# workbook_rels has a list of relIds and paths but no titles
# rels = {'id':{'title':'', 'path':''} }
content_types = read_content_types(archive)
valid_sheets = dict((path, ct) for ct, path in content_types if ct == WORKSHEET_TYPE)
rels = dict(read_rels(archive))
for sheet in read_sheets(archive):
rel = rels[sheet['id']]
rel['title'] = sheet['name']
rel['sheet_id'] = sheet['sheetId']
rel['state'] = sheet.get('state', 'visible')
if ("/" + rel['path'] in valid_sheets
or "worksheets" in rel['path']): # fallback in case content type is missing
yield rel | def detect_worksheets(archive) | Return a list of worksheets | 5.9243 | 5.896298 | 1.004749 |
strings = []
src = _get_xml_iter(xml_source)
for _, node in iterparse(src):
if node.tag == '{%s}si' % SHEET_MAIN_NS:
text = Text.from_tree(node).content
text = text.replace('x005F_', '')
strings.append(text)
node.clear()
return IndexedList(strings) | def read_string_table(xml_source) | Read in all shared strings in the table | 6.13244 | 5.376578 | 1.140584 |
if not hasattr(xml_source, 'read'):
try:
xml_source = xml_source.encode("utf-8")
except (AttributeError, UnicodeDecodeError):
pass
return BytesIO(xml_source)
else:
try:
xml_source.seek(0)
except:
# could be a zipfile
pass
return xml_source | def _get_xml_iter(xml_source) | Possible inputs: strings, bytes, members of zipfile, temporary file
Always return a file like object | 2.77604 | 2.476501 | 1.120953 |
if t.ttype == "operand":
if t.tsubtype in ["range", "named_range", "pointer"] :
# print 'Creating Node', t.tvalue, t.tsubtype
return RangeNode(t, ref, debug = debug)
else:
return OperandNode(t)
elif t.ttype == "function":
return FunctionNode(t, ref, debug = debug)
elif t.ttype.startswith("operator"):
return OperatorNode(t, ref, debug = debug)
else:
return ASTNode(t, debug = debug) | def create_node(t, ref = None, debug = False) | Simple factory function | 3.404227 | 3.427168 | 0.993306 |
#use a directed graph to store the tree
G = DiGraph()
stack = []
for n in expression:
# Since the graph does not maintain the order of adding nodes/edges
# add an extra attribute 'pos' so we can always sort to the correct order
if isinstance(n,OperatorNode):
if n.ttype == "operator-infix":
arg2 = stack.pop()
arg1 = stack.pop()
# Hack to write the name of sheet in 2argument address
if(n.tvalue == ':'):
if '!' in arg1.tvalue and arg2.ttype == 'operand' and '!' not in arg2.tvalue:
arg2.tvalue = arg1.tvalue.split('!')[0] + '!' + arg2.tvalue
G.add_node(arg1,pos = 1)
G.add_node(arg2,pos = 2)
G.add_edge(arg1, n)
G.add_edge(arg2, n)
else:
arg1 = stack.pop()
G.add_node(arg1,pos = 1)
G.add_edge(arg1, n)
elif isinstance(n,FunctionNode):
args = []
for _ in range(n.num_args):
try:
args.append(stack.pop())
except:
raise Exception()
#try:
# args = [stack.pop() for _ in range(n.num_args)]
#except:
# print 'STACK', stack, type(n)
# raise Exception('prut')
args.reverse()
for i,a in enumerate(args):
G.add_node(a,pos = i)
G.add_edge(a,n)
else:
G.add_node(n,pos=0)
stack.append(n)
return G,stack.pop() | def build_ast(expression, debug = False) | build an AST from an Excel formula expression in reverse polish notation | 3.695835 | 3.612296 | 1.023126 |
if cell.formula:
debug = False
# if 'OFFSET' in cell.formula or 'INDEX' in cell.formula:
# debug = True
# if debug:
# print 'FORMULA', cell.formula
ref = parse_cell_address(cell.address()) if not cell.is_named_range else None
sheet = cell.sheet
e = shunting_yard(cell.formula, named_ranges, ref=ref, tokenize_range = False)
ast,root = build_ast(e, debug = debug)
code = root.emit(ast, context=sheet)
# print 'CODE', code, ref
else:
ast = None
if isinstance(cell.value, unicode):
code = u'u"' + cell.value.replace(u'"', u'\\"') + u'"'
elif isinstance(cell.value, str):
raise RuntimeError("Got unexpected non-unicode str")
else:
code = str(cell.value)
return code,ast | def cell2code(cell, named_ranges) | Generate python code for the given cell | 5.454636 | 5.566112 | 0.979972 |
if self._changes:
return self._changes.pop()
action_file = None
if self._changed:
self._changed = False
action_file = self._action_file or True # TODO: Hack (see above)
return action_file, None | def examine(self) | Called by LiveReloadHandler's poll_tasks method.
If a boolean true value is returned, then the waiters (browsers) are
reloaded. | 10.380184 | 9.840245 | 1.05487 |
if action is None:
action = _set_changed
event_handler = _WatchdogHandler(self, action)
self._observer.schedule(event_handler, path=path, recursive=True) | def watch(self, path, action, *args, **kwargs) | Called by the Server instance when a new watch task is requested. | 5.380968 | 5.965077 | 0.902079 |
fmt_re = re.compile(r'([^<]+) <([^>]+)>')
authors = local('git shortlog -s -e -n | cut -f 2-', capture=True)
with open('AUTHORS', 'w') as fh:
fh.write('Project contributors\n')
fh.write('====================\n\n')
for line in authors.splitlines():
match = fmt_re.match(line)
name, email = match.groups()
if email in env.ignored_authors:
continue
fh.write(' * ')
fh.write(line)
fh.write('\n') | def authors() | Updates the AUTHORS file with a list of committers from GIT. | 3.643766 | 3.250793 | 1.120885 |
if not is_working_tree_clean():
print('Your working tree is not clean. Refusing to create a release.')
return
print('Rebuilding the AUTHORS file to check for modifications...')
authors()
if not is_working_tree_clean():
print('Your working tree is not clean after the AUTHORS file was '
'rebuilt.')
print('Please commit the changes before continuing.')
return
if not is_manifest_up_to_date():
print('Manifest is not up to date.')
print('Please update MANIFEST.in or remove spurious files.')
return
# Get version
version = 'v{}'.format(local('python setup.py --version', capture=True))
name = local('python setup.py --name', capture=True)
# Tag
tag_message = '{} release version {}.'.format(name, version)
print('----------------------')
print('Proceeding will tag the release, push the repository upstream,')
print('and release a new version on PyPI.')
print()
print('Version: {}'.format(version))
print('Tag message: {}'.format(tag_message))
print()
if not confirm('Continue?', default=True):
print('Aborting.')
return
local('git tag -a {} -m {}'.format(pipes.quote(version),
pipes.quote(tag_message)))
# Push
local('git push --tags origin develop')
# Package and upload to pypi
local('python setup.py sdist bdist_wheel upload') | def release() | Create a new release and upload it to PyPI. | 3.93269 | 3.825412 | 1.028044 |
if self.is_terminal:
if index == len(iterable) or \
(gather and index < len(iterable) and iterable[index] == ' '): # only gather on word break)
confidence = float(len(self.key) - edit_distance) / float(max(len(self.key), index))
if confidence > match_threshold:
yield {
'key': self.key,
'match': iterable[:index],
'data': self.data,
'confidence': confidence * self.weight
}
if index < len(iterable) and iterable[index] in self.children:
for result in self.children[iterable[index]]\
.lookup(iterable, index + 1, gather=gather,
edit_distance=edit_distance, max_edit_distance=max_edit_distance, matched_length=matched_length + 1):
yield result
# if there's edit distance remaining and it's possible to match a word above the confidence threshold
potential_confidence = float(index - edit_distance + (max_edit_distance - edit_distance)) / \
(float(index) + (max_edit_distance - edit_distance)) if index + max_edit_distance - edit_distance > 0 else 0.0
if edit_distance < max_edit_distance and potential_confidence > match_threshold:
for child in list(self.children):
if index >= len(iterable) or child != iterable[index]:
# substitution
for result in self.children[child]\
.lookup(iterable, index + 1, gather=gather,
edit_distance=edit_distance + 1, max_edit_distance=max_edit_distance, matched_length=matched_length):
yield result
# delete
for result in self.children[child]\
.lookup(iterable, index + 2, gather=gather,
edit_distance=edit_distance + 1, max_edit_distance=max_edit_distance, matched_length=matched_length):
yield result
# insert
for result in self.children[child]\
.lookup(iterable, index, gather=gather,
edit_distance=edit_distance + 1, max_edit_distance=max_edit_distance, matched_length=matched_length):
yield result | def lookup(self, iterable, index=0, gather=False, edit_distance=0, max_edit_distance=0, match_threshold=0.0, matched_length=0) | TODO: Implement trie lookup with edit distance
Args:
iterable(list?): key used to find what is requested this could
be a generator.
index(int): index of what is requested
gather(bool): of weather to gather or not
edit_distance(int): the distance -- currently not used
max_edit_distance(int): the max distance -- not currently used
yields:
object: yields the results of the search | 2.245424 | 2.31127 | 0.971511 |
if index == len(iterable):
self.is_terminal = True
self.key = iterable
self.weight = weight
if data:
self.data.add(data)
else:
if iterable[index] not in self.children:
self.children[iterable[index]] = TrieNode()
self.children[iterable[index]].insert(iterable, index + 1, data) | def insert(self, iterable, index=0, data=None, weight=1.0) | Insert new node into tree
Args:
iterable(hashable): key used to find in the future.
data(object): data associated with the key
index(int): an index used for insertion.
weight(float): the wait given for the item added. | 2.860552 | 2.803782 | 1.020247 |
if index == len(iterable):
if self.is_terminal:
if data:
self.data.remove(data)
if len(self.data) == 0:
self.is_terminal = False
else:
self.data.clear()
self.is_terminal = False
return True
else:
return False
elif iterable[index] in self.children:
return self.children[iterable[index]].remove(iterable, index=index+1, data=data)
else:
return False | def remove(self, iterable, data=None, index=0) | Remove an element from the trie
Args
iterable(hashable): key used to find what is to be removed
data(object): data associated with the key
index(int): index of what is to me removed
Returns:
bool:
True: if it was removed
False: if it was not removed | 2.322487 | 2.268574 | 1.023765 |
for result in self.lookup(iterable, gather=True):
yield result | def gather(self, iterable) | Calls the lookup with gather True Passing iterable and yields
the result. | 13.173537 | 3.79892 | 3.467706 |
for result in self.root.lookup(iterable,
gather=gather,
edit_distance=0,
max_edit_distance=self.max_edit_distance,
match_threshold=self.match_threshold):
yield result | def lookup(self, iterable, gather=False) | Call the lookup on the root node with the given parameters.
Args
iterable(index or key): Used to retrive nodes from tree
gather(bool): this is passed down to the root node lookup
Notes:
max_edit_distance and match_threshold come from the init | 4.35936 | 3.406296 | 1.279795 |
self.root.insert(iterable, index=0, data=data, weight=1.0) | def insert(self, iterable, data=None, weight=1.0) | Used to insert into he root node
Args
iterable(hashable): index or key used to identify
data(object): data to be paired with the key | 5.539924 | 5.327207 | 1.03993 |
return self.root.remove(iterable, data=data) | def remove(self, iterable, data=None) | Used to remove from the root node
Args:
iterable(hashable): index or key used to identify
item to remove
data: data to be paired with the key | 8.40935 | 10.643232 | 0.790112 |
if len(p) == 0 and len(x) == 0:
yield r
return
for vertex in p[:]:
r_new = r[::]
r_new.append(vertex)
p_new = [val for val in p if val in graph.get_neighbors_of(vertex)] # p intersects N(vertex)
x_new = [val for val in x if val in graph.get_neighbors_of(vertex)] # x intersects N(vertex)
for result in bronk(r_new, p_new, x_new, graph):
yield result
p.remove(vertex)
x.append(vertex) | def bronk(r, p, x, graph) | This is used to fine cliques and remove them from graph
Args:
graph (graph): this is the graph of verticies to search for
cliques
p (list): this is a list of the verticies to search
r (list): used by bronk for the search
x (list): used by bronk for the search
Yields:
list : found clique of the given graph and verticies | 2.618146 | 2.760106 | 0.948567 |
start_token = tag.get('start_token')
entity = tag.get('entities', [])[entity_index]
return str(start_token) + '-' + entity.get('key') + '-' + str(entity.get('confidence')) | def graph_key_from_tag(tag, entity_index) | Returns a key from a tag entity
Args:
tag (tag) : this is the tag selected to get the key from
entity_index (int) : this is the index of the tagged entity
Returns:
str : String representing the key for the given tagged entity. | 4.354475 | 4.392239 | 0.991402 |
neighbors_of_a = self.adjacency_lists.get(a)
if not neighbors_of_a:
neighbors_of_a = set()
self.adjacency_lists[a] = neighbors_of_a
neighbors_of_a.add(b)
neighbors_of_b = self.adjacency_lists.get(b)
if not neighbors_of_b:
neighbors_of_b = set()
self.adjacency_lists[b] = neighbors_of_b
neighbors_of_b.add(a) | def add_edge(self, a, b) | Used to add edges to the graph. 'a' and 'b' are vertexes and
if 'a' or 'b' doesn't exisit then the vertex is created
Args:
a (hash): is one vertex of the edge
b (hash): is another vertext of the edge | 1.463075 | 1.610925 | 0.90822 |
if isinstance(data, list) and len(data) > 0:
self.nodes.append(data)
else:
self.nodes.append([data]) | def append(self, data) | Appends items or lists to the Lattice
Args:
data (item,list) : The Item or List to be added to the Lattice | 3.109026 | 3.535071 | 0.879481 |
if index < len(self.nodes):
for entity in self.nodes[index]:
for next_result in self.traverse(index=index+1):
if isinstance(entity, list):
yield entity + next_result
else:
yield [entity] + next_result
else:
yield [] | def traverse(self, index=0) | This is used to produce a list of lists where each each item
in that list is a diffrent combination of items from the lists
within with every combination of such values.
Args:
index (int) : the index at witch to start the list.
Note this is used only in the function as a processing
Returns:
list : is every combination. | 3.08635 | 3.522023 | 0.8763 |
graph = SimpleGraph()
for tag_index in xrange(len(tags)):
for entity_index in xrange(len(tags[tag_index].get('entities'))):
a_entity_name = graph_key_from_tag(tags[tag_index], entity_index)
tokens = self.tokenizer.tokenize(tags[tag_index].get('entities', [])[entity_index].get('match'))
for tag in tags[tag_index + 1:]:
start_token = tag.get('start_token')
if start_token >= tags[tag_index].get('start_token') + len(tokens):
for b_entity_index in xrange(len(tag.get('entities'))):
b_entity_name = graph_key_from_tag(tag, b_entity_index)
graph.add_edge(a_entity_name, b_entity_name)
return graph | def _build_graph(self, tags) | Builds a graph from the entities included in the tags.
Note this is used internally.
Args:
tags (list): A list of the tags to include in graph
Returns:
graph : this is the resulting graph of the tagged entities. | 2.771345 | 2.823544 | 0.981513 |
entities = {}
graph = self._build_graph(tags)
# name entities
for tag in tags:
for entity_index in xrange(len(tag.get('entities'))):
node_name = graph_key_from_tag(tag, entity_index)
if not node_name in entities:
entities[node_name] = []
entities[node_name] += [
tag.get('entities', [])[entity_index],
tag.get('entities', [])[entity_index].get('confidence'),
tag
]
for clique in get_cliques(list(entities), graph):
result = []
for entity_name in clique:
start_token = int(entity_name.split("-")[0])
old_tag = entities[entity_name][2]
tag = {
'start_token': start_token,
'entities': [entities.get(entity_name)[0]],
'confidence': entities.get(entity_name)[1] * old_tag.get('confidence', 1.0),
'end_token': old_tag.get('end_token'),
'match': old_tag.get('entities')[0].get('match'),
'key': old_tag.get('entities')[0].get('key'),
'from_context': old_tag.get('from_context', False)
}
result.append(tag)
result = sorted(result, key=lambda e: e.get('start_token'))
yield result | def _sub_expand(self, tags) | This called by expand to find cliques
Args:
tags (list): a list of the tags used to get cliques
Yields:
list : list of sorted tags by start_token this is a clique | 3.066272 | 2.954199 | 1.037937 |
lattice = Lattice()
overlapping_spans = []
def end_token_index():
return max([t.get('end_token') for t in overlapping_spans])
for i in xrange(len(tags)):
tag = tags[i]
if len(overlapping_spans) > 0 and end_token_index() >= tag.get('start_token'):
overlapping_spans.append(tag)
elif len(overlapping_spans) > 1:
cliques = list(self._sub_expand(overlapping_spans))
if clique_scoring_func:
cliques = sorted(cliques, key=lambda e: -1 * clique_scoring_func(e))
lattice.append(cliques)
overlapping_spans = [tag]
else:
lattice.append(overlapping_spans)
overlapping_spans = [tag]
if len(overlapping_spans) > 1:
cliques = list(self._sub_expand(overlapping_spans))
if clique_scoring_func:
cliques = sorted(cliques, key=lambda e: -1 * clique_scoring_func(e))
lattice.append(cliques)
else:
lattice.append(overlapping_spans)
return lattice.traverse() | def expand(self, tags, clique_scoring_func=None) | This is the main function to expand tags into cliques
Args:
tags (list): a list of tags to find the cliques.
clique_scoring_func (func): a function that returns a float
value for the clique
Returns:
list : a list of cliques | 2.240865 | 2.298437 | 0.974952 |
for start_idx in xrange(len(tokens)):
for end_idx in xrange(start_idx + 1, len(tokens) + 1):
yield ' '.join(tokens[start_idx:end_idx]), start_idx | def _iterate_subsequences(self, tokens) | Using regex invokes this function, which significantly impacts performance of adapt. it is an N! operation.
Args:
tokens(list): list of tokens for Yield results.
Yields:
str: ? | 2.20794 | 2.680198 | 0.823797 |
tokens = self.tokenizer.tokenize(utterance)
entities = []
if len(self.regex_entities) > 0:
for part, idx in self._iterate_subsequences(tokens):
local_trie = Trie()
for regex_entity in self.regex_entities:
match = regex_entity.match(part)
groups = match.groupdict() if match else {}
for key in list(groups):
match_str = groups.get(key)
local_trie.insert(match_str, (match_str, key))
sub_tagger = EntityTagger(local_trie, self.tokenizer, max_tokens=self.max_tokens)
for sub_entity in sub_tagger.tag(part):
sub_entity['start_token'] += idx
sub_entity['end_token'] += idx
for e in sub_entity['entities']:
e['confidence'] = 0.5
entities.append(sub_entity)
additional_sort = len(entities) > 0
context_entities = []
for i in xrange(len(tokens)):
part = ' '.join(tokens[i:])
for new_entity in self.trie.gather(part):
new_entity['data'] = list(new_entity['data'])
entities.append({
'match': new_entity.get('match'),
'key': new_entity.get('key'),
'start_token': i,
'entities': [new_entity],
'end_token': i + len(self.tokenizer.tokenize(new_entity.get('match'))) - 1,
'from_context': False
})
if context_trie:
for new_entity in context_trie.gather(part):
new_entity['data'] = list(new_entity['data'])
new_entity['confidence'] *= 2.0 # context entities get double the weight!
context_entities.append({
'match': new_entity.get('match'),
'key': new_entity.get('key'),
'start_token': i,
'entities': [new_entity],
'end_token': i + len(self.tokenizer.tokenize(new_entity.get('match'))) - 1,
'from_context': True
})
additional_sort = additional_sort or len(entities) > 0
if additional_sort:
entities = self._sort_and_merge_tags(entities + context_entities)
return entities | def tag(self, utterance, context_trie=None) | Tag known entities within the utterance.
Args:
utterance(str): a string of natural language text
context_trie(trie): optional, a trie containing only entities from context
for this request
Returns: dictionary, with the following keys
match(str): the proper entity matched
key(str): the string that was matched to the entity
start_token(int): 0-based index of the first token matched
end_token(int): 0-based index of the last token matched
entities(list): a list of entity kinds as strings (Ex: Artist, Location) | 2.538715 | 2.480811 | 1.023341 |
best_intent = None
best_tags = None
context_as_entities = [{'entities': [c]} for c in context]
for intent in self.intent_parsers:
i, tags = intent.validate_with_tags(parse_result.get('tags') + context_as_entities, parse_result.get('confidence'))
if not best_intent or (i and i.get('confidence') > best_intent.get('confidence')):
best_intent = i
best_tags = tags
return best_intent, best_tags | def __best_intent(self, parse_result, context=[]) | Decide the best intent
Args:
parse_result(list): results used to match the best intent.
context(list): ?
Returns:
best_intent, best_tags:
best_intent : The best intent for given results
best_tags : The Tags for result | 3.437992 | 3.699106 | 0.929411 |
tags_keys = set([t['key'] for t in parse_result['tags'] if t['from_context']])
result_context = [c for c in context if c['key'] not in tags_keys]
return result_context | def __get_unused_context(self, parse_result, context) | Used to get unused context from context. Any keys not in
parse_result
Args:
parse_results(list): parsed results used to identify what keys
in the context are used.
context(list): this is the context used to match with parsed results
keys missing in the parsed results are the unused context
Returns:
list: A list of the unused context results. | 4.387712 | 5.329299 | 0.823319 |
parser = Parser(self.tokenizer, self.tagger)
parser.on('tagged_entities',
(lambda result:
self.emit("tagged_entities", result)))
context = []
if context_manager:
context = context_manager.get_context()
for result in parser.parse(utterance, N=num_results, context=context):
self.emit("parse_result", result)
# create a context without entities used in result
remaining_context = self.__get_unused_context(result, context)
best_intent, tags = self.__best_intent(result, remaining_context)
if best_intent and best_intent.get('confidence', 0.0) > 0:
if include_tags:
best_intent['__tags__'] = tags
yield best_intent | def determine_intent(self, utterance, num_results=1, include_tags=False, context_manager=None) | Given an utterance, provide a valid intent.
Args:
utterance(str): an ascii or unicode string representing natural language speech
include_tags(list): includes the parsed tags (including position and confidence)
as part of result
context_manager(list): a context manager to provide context to the utterance
num_results(int): a maximum number of results to be returned.
Returns: A generator that yields dictionaries. | 4.194077 | 4.499226 | 0.932178 |
if alias_of:
self.trie.insert(entity_value.lower(), data=(alias_of, entity_type))
else:
self.trie.insert(entity_value.lower(), data=(entity_value, entity_type))
self.trie.insert(entity_type.lower(), data=(entity_type, 'Concept')) | def register_entity(self, entity_value, entity_type, alias_of=None) | Register an entity to be tagged in potential parse results
Args:
entity_value(str): the value/proper name of an entity instance (Ex: "The Big Bang Theory")
entity_type(str): the type/tag of an entity instance (Ex: "Television Show") | 2.936268 | 3.020257 | 0.972191 |
if regex_str and regex_str not in self._regex_strings:
self._regex_strings.add(regex_str)
self.regular_expressions_entities.append(re.compile(regex_str, re.IGNORECASE)) | def register_regex_entity(self, regex_str) | A regular expression making use of python named group expressions.
Example: (?P<Artist>.*)
regex_str(str): a string representing a regular expression as defined above | 3.103546 | 3.413008 | 0.909329 |
if hasattr(intent_parser, 'validate') and callable(intent_parser.validate):
self.intent_parsers.append(intent_parser)
else:
raise ValueError("%s is not an intent parser" % str(intent_parser)) | def register_intent_parser(self, intent_parser) | "Enforce" the intent parser interface at registration time.
Args:
intent_parser(intent): Intent to be registered.
Raises:
ValueError: on invalid intent | 2.613722 | 2.960719 | 0.8828 |
domain = 0
if domain not in self.domains:
self.register_domain(domain=domain)
return self.domains[domain].tokenizer | def tokenizer(self) | A property to link into IntentEngine's tokenizer.
Warning: this is only for backwards compatiblility and should not be used if you
intend on using domains.
Return: the domains tokenizer from its IntentEngine | 6.922761 | 6.418225 | 1.07861 |
domain = 0
if domain not in self.domains:
self.register_domain(domain=domain)
return self.domains[domain].trie | def trie(self) | A property to link into IntentEngine's trie.
warning:: this is only for backwards compatiblility and should not be used if you
intend on using domains.
Return: the domains trie from its IntentEngine | 7.064995 | 6.226223 | 1.134716 |
domain = 0
if domain not in self.domains:
self.register_domain(domain=domain)
return self.domains[domain].tagger | def tagger(self) | A property to link into IntentEngine's intent_parsers.
Warning: this is only for backwards compatiblility and should not be used if you
intend on using domains.
Return: the domains intent_parsers from its IntentEngine | 6.518908 | 6.391646 | 1.019911 |
domain = 0
if domain not in self.domains:
self.register_domain(domain=domain)
return self.domains[domain].intent_parsers | def intent_parsers(self) | A property to link into IntentEngine's intent_parsers.
Warning: this is only for backwards compatiblility and should not be used if you
intend on using domains.
Returns: the domains intent_parsers from its IntentEngine | 6.297359 | 5.634366 | 1.11767 |
domain = 0
if domain not in self.domains:
self.register_domain(domain=domain)
return self.domains[domain]._regex_strings | def _regex_strings(self) | A property to link into IntentEngine's _regex_strings.
Warning: this is only for backwards compatiblility and should not be used if you
intend on using domains.
Returns: the domains _regex_strings from its IntentEngine | 7.907797 | 5.970089 | 1.324569 |
domain = 0
if domain not in self.domains:
self.register_domain(domain=domain)
return self.domains[domain].regular_expressions_entities | def regular_expressions_entities(self) | A property to link into IntentEngine's regular_expressions_entities.
Warning: this is only for backwards compatiblility and should not be used if you
intend on using domains.
Returns: the domains regular_expression_entities from its IntentEngine | 5.872503 | 5.025619 | 1.168514 |
self.domains[domain] = IntentDeterminationEngine(
tokenizer=tokenizer, trie=trie) | def register_domain(self, domain=0, tokenizer=None, trie=None) | Register a domain with the intent engine.
Args:
tokenizer(tokenizer): The tokenizer you wish to use.
trie(Trie): the Trie() you wish to use.
domain(str): a string representing the domain you wish to add | 9.347504 | 7.738903 | 1.207859 |
if domain not in self.domains:
self.register_domain(domain=domain)
self.domains[domain].register_entity(entity_value=entity_value,
entity_type=entity_type,
alias_of=alias_of) | def register_entity(self, entity_value, entity_type, alias_of=None, domain=0) | Register an entity to be tagged in potential parse results.
Args:
entity_value(str): the value/proper name of an entity instance
(Ex: "The Big Bang Theory")
entity_type(str): the type/tag of an entity instance (Ex: "Television Show")
domain(str): a string representing the domain you wish to add the entity to | 2.33278 | 2.618221 | 0.890979 |
if domain not in self.domains:
self.register_domain(domain=domain)
self.domains[domain].register_regex_entity(regex_str=regex_str) | def register_regex_entity(self, regex_str, domain=0) | A regular expression making use of python named group expressions.
Example: (?P<Artist>.*)
Args:
regex_str(str): a string representing a regular expression as defined above
domain(str): a string representing the domain you wish to add the entity to | 3.008236 | 3.24566 | 0.926849 |
intents = []
for domain in self.domains:
gen = self.domains[domain].determine_intent(utterance=utterance,
num_results=1)
for intent in gen:
intents.append(intent)
heapq.nlargest(
num_results, intents, key=lambda domain: domain['confidence'])
for intent in intents:
yield intent | def determine_intent(self, utterance, num_results=1) | Given an utterance, provide a valid intent.
utterance(str): an ascii or unicode string representing natural language speech
num_results(int): a maximum number of results to be returned.
Returns: A generator the yields dictionaries. | 3.757137 | 3.806791 | 0.986956 |
if domain not in self.domains:
self.register_domain(domain=domain)
self.domains[domain].register_intent_parser(
intent_parser=intent_parser) | def register_intent_parser(self, intent_parser, domain=0) | Register a intent parser with a domain.
Args:
intent_parser(intent): The intent parser you wish to register.
domain(str): a string representing the domain you wish register the intent
parser to. | 3.246459 | 3.249063 | 0.999199 |
s = string
s = re.sub('\t', " ", s)
s = re.sub("(" + regex_separator + ")", " \g<1> ", s)
s = re.sub("([^0-9]),", "\g<1> , ", s)
s = re.sub(",([^0-9])", " , \g<1>", s)
s = re.sub("^(')", "\g<1> ", s)
s = re.sub("(" + regex_not_letter_number + ")'", "\g<1> '", s)
s = re.sub("(" + regex_clitics + ")$", " \g<1>", s)
s = re.sub("(" + regex_clitics + ")(" + regex_not_letter_number + ")", " \g<1> \g<2>", s)
words = s.strip().split()
p1 = re.compile(".*" + regex_letter_number + "\\.")
p2 = re.compile("^([A-Za-z]\\.([A-Za-z]\\.)+|[A-Z][bcdfghj-nptvxz]+\\.)$")
token_list = []
for word in words:
m1 = p1.match(word)
m2 = p2.match(word)
if m1 and word not in abbreviations_list and not m2:
token_list.append(word[0: word.find('.')])
token_list.append(word[word.find('.')])
else:
token_list.append(word)
return token_list | def tokenize(self, string) | Used to parce a string into tokens
This function is to take in a string and return a list of tokens
Args:
string(str): This is a string of words or a sentance to be parsed into tokens
Returns:
list: a list of tokens from the string passed in.
Notes:
Doesn't seem to parse contractions correctly for example don't
would parse as two tokens 'do' and "n't" and this seems to be not
what we would want. Maybe should be "don't" or maybe contractions
should be expanded into "do not" or "do","not". This could be
done with a contraction dictionary and some preprocessing. | 2.744619 | 2.845888 | 0.964416 |
start = time.time()
context_trie = None
if context and isinstance(context, list):
# sort by confidence in ascending order, so
# highest confidence for an entity is last.
# see comment on TrieNode ctor
context.sort(key=lambda x: x.get('confidence'))
context_trie = Trie()
for entity in context:
entity_value, entity_type = entity.get('data')[0]
context_trie.insert(entity_value.lower(),
data=(entity_value, entity_type),
weight=entity.get('confidence'))
tagged = self._tagger.tag(utterance.lower(), context_trie=context_trie)
self.emit("tagged_entities",
{
'utterance': utterance,
'tags': list(tagged),
'time': time.time() - start
})
start = time.time()
bke = BronKerboschExpander(self._tokenizer)
def score_clique(clique):
score = 0.0
for tagged_entity in clique:
ec = tagged_entity.get('entities', [{'confidence': 0.0}])[0].get('confidence')
score += ec * len(tagged_entity.get('entities', [{'match': ''}])[0].get('match')) / (
len(utterance) + 1)
return score
parse_results = bke.expand(tagged, clique_scoring_func=score_clique)
count = 0
for result in parse_results:
count += 1
parse_confidence = 0.0
for tag in result:
sample_entity = tag['entities'][0]
entity_confidence = sample_entity.get('confidence', 0.0) * float(
len(sample_entity.get('match'))) / len(utterance)
parse_confidence += entity_confidence
yield {
'utterance': utterance,
'tags': result,
'time': time.time() - start,
'confidence': parse_confidence
}
if count >= N:
break | def parse(self, utterance, context=None, N=1) | Used to find tags within utterance with a given confidence
Args:
utterance(str): conversational piece given by the user
context(list): a list of entities
N(int): number of results
Returns: yield an object with the following fields
utterance(str): the value passed in
tags(list) : a list of tags found in utterance
time(time) : duration since call of function
confidence(float) : float indicating how confident of a match to the
utterance. This might be used to determan the most likely intent. | 3.47548 | 3.354514 | 1.036061 |
result = len(query.keys()) > 0
for key in query.keys():
result = result and query[key] == self.metadata.get(key)
return result | def metadata_matches(self, query={}) | Returns key matches to metadata
This will check every key in query for a matching key in metadata
returning true if every key is in metadata. query without keys
return false.
Args:
query(object): metadata for matching
Returns:
bool:
True: when key count in query is > 0 and all keys in query in
self.metadata
False: if key count in query is <= 0 or any key in query not
found in self.metadata | 3.733379 | 4.098514 | 0.910911 |
self.entities.append(tag)
for k in metadata.keys():
if k not in self.metadata:
self.metadata[k] = k | def merge_context(self, tag, metadata) | merge into contextManagerFrame new entity and metadata.
Appends tag as new entity and adds keys in metadata to keys in
self.metadata.
Args:
tag(str): entity to be added to self.entities
metadata(object): metadata containes keys to be added to self.metadata | 4.423356 | 3.177904 | 1.39191 |
top_frame = self.frame_stack[0] if len(self.frame_stack) > 0 else None
if top_frame and top_frame.metadata_matches(metadata):
top_frame.merge_context(entity, metadata)
else:
frame = ContextManagerFrame(entities=[entity], metadata=metadata.copy())
self.frame_stack.insert(0, frame) | def inject_context(self, entity, metadata={}) | Args:
entity(object):
format {'data': 'Entity tag as <str>',
'key': 'entity proper name as <str>',
'confidence': <float>'
}
metadata(object): dict, arbitrary metadata about the entity being added | 3.279224 | 3.40365 | 0.963443 |
if not max_frames or max_frames > len(self.frame_stack):
max_frames = len(self.frame_stack)
missing_entities = list(missing_entities)
context = []
for i in xrange(max_frames):
frame_entities = [entity.copy() for entity in self.frame_stack[i].entities]
for entity in frame_entities:
entity['confidence'] = entity.get('confidence', 1.0) / (2.0 + i)
context += frame_entities
result = []
if len(missing_entities) > 0:
for entity in context:
if entity.get('data') in missing_entities:
result.append(entity)
# NOTE: this implies that we will only ever get one
# of an entity kind from context, unless specified
# multiple times in missing_entities. Cannot get
# an arbitrary number of an entity kind.
missing_entities.remove(entity.get('data'))
else:
result = context
return result | def get_context(self, max_frames=None, missing_entities=[]) | Constructs a list of entities from the context.
Args:
max_frames(int): maximum number of frames to look back
missing_entities(list of str): a list or set of tag names, as strings
Returns:
list: a list of entities | 3.493015 | 3.538323 | 0.987195 |
for tag in tags:
for entity in tag.get('entities'):
for v, t in entity.get('data'):
if t.lower() == entity_type.lower() and tag.get('start_token', 0) > after_index:
return tag, v, entity.get('confidence')
return None, None, None | def find_first_tag(tags, entity_type, after_index=-1) | Searches tags for entity type after given index
Args:
tags(list): a list of tags with entity types to be compaired too entity_type
entity_type(str): This is he entity type to be looking for in tags
after_index(int): the start token must be greaterthan this.
Returns:
( tag, v, confidence ):
tag(str): is the tag that matched
v(str): ? the word that matched?
confidence(float): is a mesure of accuacy. 1 is full confidence and 0 is none. | 3.852424 | 3.300703 | 1.167153 |
if len(lists) == 0:
yield []
else:
for el in lists[0]:
for next_list in choose_1_from_each(lists[1:]):
yield [el] + next_list | def choose_1_from_each(lists) | Takes a list of lists and returns a list of lists with one item
from each list. This new list should be the length of each list multiplied
by the others. 18 for an list with lists of 3, 2 and 3. Also the lenght
of each sub list should be same as the length of lists passed in.
Args:
lists(list of Lists): A list of lists
Returns:
list of lists: returns a list of lists constructions of one item from each
list in lists. | 2.331907 | 3.009236 | 0.774917 |
if len(tags) < len(at_least_one):
return None
for possible_resolution in choose_1_from_each(at_least_one):
resolution = {}
pr = possible_resolution[:]
for entity_type in pr:
last_end_index = -1
if entity_type in resolution:
last_end_index = resolution.get[entity_type][-1].get('end_token')
tag, value, c = find_first_tag(tags, entity_type, after_index=last_end_index)
if not tag:
break
else:
if entity_type not in resolution:
resolution[entity_type] = []
resolution[entity_type].append(tag)
if len(resolution) == len(possible_resolution):
return resolution
return None | def resolve_one_of(tags, at_least_one) | This searches tags for Entites in at_least_one and returns any match
Args:
tags(list): List of tags with Entities to search for Entities
at_least_one(list): List of Entities to find in tags
Returns:
object: returns None if no match is found but returns any match as an object | 3.767255 | 3.915563 | 0.962124 |
intent, tags = self.validate_with_tags(tags, confidence)
return intent | def validate(self, tags, confidence) | Using this method removes tags from the result of validate_with_tags
Returns:
intent(intent): Resuts from validate_with_tags | 9.536583 | 7.887312 | 1.209104 |
result = {'intent_type': self.name}
intent_confidence = 0.0
local_tags = tags[:]
used_tags = []
for require_type, attribute_name in self.requires:
required_tag, canonical_form, confidence = find_first_tag(local_tags, require_type)
if not required_tag:
result['confidence'] = 0.0
return result, []
result[attribute_name] = canonical_form
if required_tag in local_tags:
local_tags.remove(required_tag)
used_tags.append(required_tag)
# TODO: use confidence based on edit distance and context
intent_confidence += confidence
if len(self.at_least_one) > 0:
best_resolution = resolve_one_of(tags, self.at_least_one)
if not best_resolution:
result['confidence'] = 0.0
return result, []
else:
for key in best_resolution:
result[key] = best_resolution[key][0].get('key') # TODO: at least one must support aliases
intent_confidence += 1.0
used_tags.append(best_resolution)
if best_resolution in local_tags:
local_tags.remove(best_resolution)
for optional_type, attribute_name in self.optional:
optional_tag, canonical_form, conf = find_first_tag(local_tags, optional_type)
if not optional_tag or attribute_name in result:
continue
result[attribute_name] = canonical_form
if optional_tag in local_tags:
local_tags.remove(optional_tag)
used_tags.append(optional_tag)
intent_confidence += 1.0
total_confidence = intent_confidence / len(tags) * confidence
target_client, canonical_form, confidence = find_first_tag(local_tags, CLIENT_ENTITY_NAME)
result['target'] = target_client.get('key') if target_client else None
result['confidence'] = total_confidence
return result, used_tags | def validate_with_tags(self, tags, confidence) | Validate weather tags has required entites for this intent to fire
Args:
tags(list): Tags and Entities used for validation
confidence(float): ?
Returns:
intent, tags: Returns intent and tags used by the intent on
falure to meat required entities then returns intent with confidence
of 0.0 and an empty list for tags. | 2.905178 | 2.947702 | 0.985574 |
if not attribute_name:
attribute_name = entity_type
self.requires += [(entity_type, attribute_name)]
return self | def require(self, entity_type, attribute_name=None) | The intent parser should require an entity of the provided type.
Args:
entity_type(str): an entity type
attribute_name(str): the name of the attribute on the parsed intent. Defaults to match entity_type.
Returns:
self: to continue modifications. | 3.49835 | 4.102166 | 0.852806 |
if not attribute_name:
attribute_name = entity_type
self.optional += [(entity_type, attribute_name)]
return self | def optionally(self, entity_type, attribute_name=None) | Parsed intents from this parser can optionally include an entity of the provided type.
Args:
entity_type(str): an entity type
attribute_name(str): the name of the attribute on the parsed intent. Defaults to match entity_type.
Returns:
self: to continue modifications. | 3.62459 | 4.0535 | 0.894188 |
return Intent(self.name, self.requires, self.at_least_one, self.optional) | def build(self) | Constructs an intent from the builder's specifications.
:return: an Intent instance. | 12.18961 | 8.963562 | 1.359907 |
# note subsets have an unusual encoding
query = .format(s=subset, g=self.graph_name)
bindings = run_sparql(query)
return [r['c']['value'] for r in bindings] | def extract_subset(self, subset) | Find all nodes in a subset.
We assume the oboInOwl encoding of subsets, and subset IDs are IRIs | 13.395891 | 11.137477 | 1.202776 |
# note subsets have an unusual encoding
query = .format(g=self.graph_name)
bindings = run_sparql(query)
return [r['s']['value'] for r in bindings] | def subsets(self) | Find all subsets for an ontology | 16.469627 | 13.452962 | 1.224238 |
# TODO: DRY with sparql_ontol_utils
searchterm = searchterm.replace('%','.*')
namedGraph = get_named_graph(self.handle)
query = .format(pred=pred, s=searchterm, g=namedGraph)
bindings = run_sparql(query)
return [r['c']['value'] for r in bindings] | def _search(self, searchterm, pred, **args) | Search for things using labels | 7.482769 | 7.244126 | 1.032943 |
if inject_prefixes is None:
inject_prefixes = []
namedGraph = get_named_graph(self.handle)
cols = []
select_val = None
if select is None or select=='*':
if not single_column:
cols=None
select_val='*'
else:
if isinstance(cols,list):
cols = [select]
else:
cols = select
select_val = ", ".join(['?'+c for c in cols])
prefixes = ""
if inject_prefixes is not None:
plist = ["prefix {}: <{}> ".format(p,expand_uri(p+":")) for p in inject_prefixes if p != "" and p is not None]
prefixes = "\n".join(plist)
query = .format(prefixes=prefixes, s=select_val, b=body, g=namedGraph)
bindings = run_sparql(query)
if len(bindings) == 0:
return []
if cols is None:
return bindings
else:
if single_column:
c = list(bindings[0].keys())[0]
return [r[c]['value'] for r in bindings]
else:
return [r[c]['value'] for c in cols for r in bindings] | def sparql(self, select='*', body=None, inject_prefixes=None, single_column=False) | Execute a SPARQL query.
The query is specified using `select` and `body` parameters.
The argument for the Named Graph is injected into the query.
The select parameter should be either '*' or a list of vars (not prefixed with '?').
- If '*' is passed, then the result is a list of dicts, { $var: {value: $val } }
- If a list of vars is passed, then the result is a list of lists
- Unless single_column=True, in which case the results are a simple list of values from the first var
The inject_prefixes argument can be used to inject a list of prefixes - these are expanded
using the prefixcommons library | 3.187829 | 3.232516 | 0.986176 |
results = search_associations(objects=idlist,
subject_taxon=taxon,
subject_category=subject_category,
select_fields=[M.SUBJECT, M.OBJECT_CLOSURE],
facet_fields=[],
rows=-1,
include_raw=True,
**kwargs)
docs = results['raw'].docs
subjects_per_term = {}
smap = {}
for d in docs:
smap[d[M.SUBJECT]] = 1
for c in d[M.OBJECT_CLOSURE]:
if c in idlist:
if c not in subjects_per_term:
subjects_per_term[c] = []
subjects_per_term[c].append(d[M.SUBJECT])
pop_n = len(smap.keys())
cells = []
for cx in idlist:
csubjs = set(subjects_per_term[cx])
for dx in idlist:
dsubjs = set(subjects_per_term[dx])
a = len(csubjs.intersection(dsubjs))
b = len(csubjs) - a
c = len(dsubjs) - a
d = pop_n - len(dsubjs) - b
ctable = [[a, b], [c, d]]
_, p_under = sp.stats.fisher_exact(ctable, 'less')
_, p_over = sp.stats.fisher_exact(ctable, 'greater')
cells.append({'c':cx, 'd':dx,
'nc':len(csubjs),
'nd':len(dsubjs),
'n':a,
'p_l':p_under,
'p_g':p_over
})
return cells | def term_matrix(idlist, subject_category, taxon, **kwargs) | Intersection between annotated objects
P1 not(P1)
F1 0 5
not(F1) 6 0 | 3.147474 | 3.242021 | 0.970837 |
all_ancestors = self.ontology.ancestors(go_term, reflexive=True)
subont = self.ontology.subontology(all_ancestors)
return subont.ancestors(go_term, relations) | def get_ancestors_through_subont(self, go_term, relations) | Returns the ancestors from the relation filtered GO subontology of go_term's ancestors.
subontology() primarily used here for speed when specifying relations to traverse. Point of this is to first get
a smaller graph (all ancestors of go_term regardless of relation) and then filter relations on that instead of
the whole GO. | 3.374693 | 3.064155 | 1.101345 |
bp_root = "GO:0008150"
if go_term == bp_root:
return True
ancestors = self.get_isa_closure(go_term)
if bp_root in ancestors:
return True
else:
return False | def is_biological_process(self, go_term) | Returns True is go_term has is_a, part_of ancestor of biological process GO:0008150 | 3.211869 | 2.723389 | 1.179365 |
mf_root = "GO:0003674"
if go_term == mf_root:
return True
ancestors = self.get_isa_closure(go_term)
if mf_root in ancestors:
return True
else:
return False | def is_molecular_function(self, go_term) | Returns True is go_term has is_a, part_of ancestor of molecular function GO:0003674 | 3.407125 | 2.816953 | 1.209507 |
cc_root = "GO:0005575"
if go_term == cc_root:
return True
ancestors = self.get_isa_closure(go_term)
if cc_root in ancestors:
return True
else:
return False | def is_cellular_component(self, go_term) | Returns True is go_term has is_a, part_of ancestor of cellular component GO:0005575 | 3.207734 | 2.611197 | 1.228453 |
if not go_term.startswith("GO:"):
return None
else:
# Check ancestors for root terms
if self.is_molecular_function(go_term):
return 'F'
elif self.is_cellular_component(go_term):
return 'C'
elif self.is_biological_process(go_term):
return 'P' | def go_aspect(self, go_term) | For GO terms, returns F, C, or P corresponding to its aspect | 2.907332 | 2.648968 | 1.097534 |
response = self._get_response("graph/neighbors", format="json", **params)
return response.json() | def _neighbors_graph(self, **params) -> Dict | Get neighbors of a node
parameters are directly passed through to SciGraph: e.g. depth, relationshipType | 7.295149 | 6.217294 | 1.173364 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.