index
int64
0
731k
package
stringlengths
2
98
name
stringlengths
1
76
docstring
stringlengths
0
281k
code
stringlengths
4
1.07M
signature
stringlengths
2
42.8k
724,127
pdfrw.objects.pdfstring
PdfString
A PdfString is an encoded string. It has a decode method to get the actual string data out, and there is an encode class method to create such a string. Like any PDF object, it could be indirect, but it defaults to being a direct object.
class PdfString(str): """ A PdfString is an encoded string. It has a decode method to get the actual string data out, and there is an encode class method to create such a string. Like any PDF object, it could be indirect, but it defaults to being a direct object. """ indirect = False # The byte order mark, and unicode that could be # wrongly encoded into the byte order mark by the # pdfdocencoding codec. bytes_bom = codecs.BOM_UTF16_BE bad_pdfdoc_prefix = bytes_bom.decode('latin-1') # Used by decode_literal; filled in on first use unescape_dict = None unescape_func = None @classmethod def init_unescapes(cls): """ Sets up the unescape attributes for decode_literal """ unescape_pattern = r'\\([0-7]{1,3}|\r\n|.)' unescape_func = re.compile(unescape_pattern, re.DOTALL).split cls.unescape_func = unescape_func unescape_dict = dict(((chr(x), chr(x)) for x in range(0x100))) unescape_dict.update(zip('nrtbf', '\n\r\t\b\f')) unescape_dict['\r'] = '' unescape_dict['\n'] = '' unescape_dict['\r\n'] = '' for i in range(0o10): unescape_dict['%01o' % i] = chr(i) for i in range(0o100): unescape_dict['%02o' % i] = chr(i) for i in range(0o400): unescape_dict['%03o' % i] = chr(i) cls.unescape_dict = unescape_dict return unescape_func def decode_literal(self): """ Decode a PDF literal string, which is enclosed in parentheses () Many pdfrw users never decode strings, so defer creating data structures to do so until the first string is decoded. Possible string escapes from the spec: (PDF 1.7 Reference, section 3.2.3, page 53) 1. \[nrtbf\()]: simple escapes 2. \\d{1,3}: octal. Must be zero-padded to 3 digits if followed by digit 3. \<end of line>: line continuation. We don't know the EOL marker used in the PDF, so accept \r, \n, and \r\n. 4. Any other character following \ escape -- the backslash is swallowed. """ result = (self.unescape_func or self.init_unescapes())(self[1:-1]) if len(result) == 1: return convert_store(result[0]) unescape_dict = self.unescape_dict result[1::2] = [unescape_dict[x] for x in result[1::2]] return convert_store(''.join(result)) def decode_hex(self): """ Decode a PDF hexadecimal-encoded string, which is enclosed in angle brackets <>. """ hexstr = convert_store(''.join(self[1:-1].split())) if len(hexstr) % 1: # odd number of chars indicates a truncated 0 hexstr += '0' return binascii.unhexlify(hexstr) def to_bytes(self): """ Decode a PDF string to bytes. This is a convenience function for user code, in that (as of pdfrw 0.3) it is never actually used inside pdfrw. """ if self.startswith('(') and self.endswith(')'): return self.decode_literal() elif self.startswith('<') and self.endswith('>'): return self.decode_hex() else: raise ValueError('Invalid PDF string "%s"' % repr(self)) def to_unicode(self): """ Decode a PDF string to a unicode string. This is a convenience function for user code, in that (as of pdfrw 0.3) it is never actually used inside pdfrw. There are two Unicode storage methods used -- either UTF16_BE, or something called PDFDocEncoding, which is defined in the PDF spec. The determination of which decoding method to use is done by examining the first two bytes for the byte order marker. """ raw = self.to_bytes() if raw[:2] == self.bytes_bom: return raw[2:].decode('utf-16-be') else: return raw.decode('pdfdocencoding') # Legacy-compatible interface decode = to_unicode # Internal value used by encoding escape_splitter = None # Calculated on first use @classmethod def init_escapes(cls): """ Initialize the escape_splitter for the encode method """ cls.escape_splitter = re.compile(br'(\(|\\|\))').split return cls.escape_splitter @classmethod def from_bytes(cls, raw, bytes_encoding='auto'): """ The from_bytes() constructor is called to encode a source raw byte string into a PdfString that is suitable for inclusion in a PDF. NOTE: There is no magic in the encoding process. A user can certainly do his own encoding, and simply initialize a PdfString() instance with his encoded string. That may be useful, for example, to add line breaks to make it easier to load PDFs into editors, or to not bother to escape balanced parentheses, or to escape additional characters to make a PDF more readable in a file editor. Those are features not currently supported by this method. from_bytes() can use a heuristic to figure out the best encoding for the string, or the user can control the process by changing the bytes_encoding parameter to 'literal' or 'hex' to force a particular conversion method. """ # If hexadecimal is not being forced, then figure out how long # the escaped literal string will be, and fall back to hex if # it is too long. force_hex = bytes_encoding == 'hex' if not force_hex: if bytes_encoding not in ('literal', 'auto'): raise ValueError('Invalid bytes_encoding value: %s' % bytes_encoding) splitlist = (cls.escape_splitter or cls.init_escapes())(raw) if bytes_encoding == 'auto' and len(splitlist) // 2 >= len(raw): force_hex = True if force_hex: # The spec does not mandate uppercase, # but it seems to be the convention. fmt = '<%s>' result = binascii.hexlify(raw).upper() else: fmt = '(%s)' splitlist[1::2] = [(b'\\' + x) for x in splitlist[1::2]] result = b''.join(splitlist) return cls(fmt % convert_load(result)) @classmethod def from_unicode(cls, source, text_encoding='auto', bytes_encoding='auto'): """ The from_unicode() constructor is called to encode a source string into a PdfString that is suitable for inclusion in a PDF. NOTE: There is no magic in the encoding process. A user can certainly do his own encoding, and simply initialize a PdfString() instance with his encoded string. That may be useful, for example, to add line breaks to make it easier to load PDFs into editors, or to not bother to escape balanced parentheses, or to escape additional characters to make a PDF more readable in a file editor. Those are features not supported by this method. from_unicode() can use a heuristic to figure out the best encoding for the string, or the user can control the process by changing the text_encoding parameter to 'pdfdocencoding' or 'utf16', and/or by changing the bytes_encoding parameter to 'literal' or 'hex' to force particular conversion methods. The function will raise an exception if it cannot perform the conversion as requested by the user. """ # Give preference to pdfdocencoding, since it only # requires one raw byte per character, rather than two. if text_encoding != 'utf16': force_pdfdoc = text_encoding == 'pdfdocencoding' if text_encoding != 'auto' and not force_pdfdoc: raise ValueError('Invalid text_encoding value: %s' % text_encoding) if source.startswith(cls.bad_pdfdoc_prefix): if force_pdfdoc: raise UnicodeError('Prefix of string %r cannot be encoded ' 'in pdfdocencoding' % source[:20]) else: try: raw = source.encode('pdfdocencoding') except UnicodeError: if force_pdfdoc: raise else: return cls.from_bytes(raw, bytes_encoding) # If the user is not forcing literal strings, # it makes much more sense to use hexadecimal with 2-byte chars raw = cls.bytes_bom + source.encode('utf-16-be') encoding = 'hex' if bytes_encoding == 'auto' else bytes_encoding return cls.from_bytes(raw, encoding) @classmethod def encode(cls, source, uni_type = type(u''), isinstance=isinstance): """ The encode() constructor is a legacy function that is also a convenience for the PdfWriter. """ if isinstance(source, uni_type): return cls.from_unicode(source) else: return cls.from_bytes(source)
null
724,128
pdfrw.objects.pdfstring
to_unicode
Decode a PDF string to a unicode string. This is a convenience function for user code, in that (as of pdfrw 0.3) it is never actually used inside pdfrw. There are two Unicode storage methods used -- either UTF16_BE, or something called PDFDocEncoding, which is defined in the PDF spec. The determination of which decoding method to use is done by examining the first two bytes for the byte order marker.
def to_unicode(self): """ Decode a PDF string to a unicode string. This is a convenience function for user code, in that (as of pdfrw 0.3) it is never actually used inside pdfrw. There are two Unicode storage methods used -- either UTF16_BE, or something called PDFDocEncoding, which is defined in the PDF spec. The determination of which decoding method to use is done by examining the first two bytes for the byte order marker. """ raw = self.to_bytes() if raw[:2] == self.bytes_bom: return raw[2:].decode('utf-16-be') else: return raw.decode('pdfdocencoding')
(self)
724,129
pdfrw.objects.pdfstring
decode_hex
Decode a PDF hexadecimal-encoded string, which is enclosed in angle brackets <>.
def decode_hex(self): """ Decode a PDF hexadecimal-encoded string, which is enclosed in angle brackets <>. """ hexstr = convert_store(''.join(self[1:-1].split())) if len(hexstr) % 1: # odd number of chars indicates a truncated 0 hexstr += '0' return binascii.unhexlify(hexstr)
(self)
724,130
pdfrw.objects.pdfstring
decode_literal
Decode a PDF literal string, which is enclosed in parentheses () Many pdfrw users never decode strings, so defer creating data structures to do so until the first string is decoded. Possible string escapes from the spec: (PDF 1.7 Reference, section 3.2.3, page 53) 1. \[nrtbf\()]: simple escapes 2. \d{1,3}: octal. Must be zero-padded to 3 digits if followed by digit 3. \<end of line>: line continuation. We don't know the EOL marker used in the PDF, so accept , , and . 4. Any other character following \ escape -- the backslash is swallowed.
def decode_literal(self): """ Decode a PDF literal string, which is enclosed in parentheses () Many pdfrw users never decode strings, so defer creating data structures to do so until the first string is decoded. Possible string escapes from the spec: (PDF 1.7 Reference, section 3.2.3, page 53) 1. \[nrtbf\()]: simple escapes 2. \\d{1,3}: octal. Must be zero-padded to 3 digits if followed by digit 3. \<end of line>: line continuation. We don't know the EOL marker used in the PDF, so accept \r, \n, and \r\n. 4. Any other character following \ escape -- the backslash is swallowed. """ result = (self.unescape_func or self.init_unescapes())(self[1:-1]) if len(result) == 1: return convert_store(result[0]) unescape_dict = self.unescape_dict result[1::2] = [unescape_dict[x] for x in result[1::2]] return convert_store(''.join(result))
(self)
724,131
pdfrw.objects.pdfstring
to_bytes
Decode a PDF string to bytes. This is a convenience function for user code, in that (as of pdfrw 0.3) it is never actually used inside pdfrw.
def to_bytes(self): """ Decode a PDF string to bytes. This is a convenience function for user code, in that (as of pdfrw 0.3) it is never actually used inside pdfrw. """ if self.startswith('(') and self.endswith(')'): return self.decode_literal() elif self.startswith('<') and self.endswith('>'): return self.decode_hex() else: raise ValueError('Invalid PDF string "%s"' % repr(self))
(self)
724,133
pdfrw.tokens
PdfTokens
null
class PdfTokens(object): # Table 3.1, page 50 of reference, defines whitespace eol = '\n\r' whitespace = '\x00 \t\f' + eol # Text on page 50 defines delimiter characters # Escape the ] delimiters = r'()<>{}[\]/%' # "normal" stuff is all but delimiters or whitespace. p_normal = r'(?:[^\\%s%s]+|\\[^%s])+' % (whitespace, delimiters, whitespace) p_comment = r'\%%[^%s]*' % eol # This will get the bulk of literal strings. p_literal_string = r'\((?:[^\\()]+|\\.)*[()]?' # This will get more pieces of literal strings # (Don't ask me why, but it hangs without the trailing ?.) p_literal_string_extend = r'(?:[^\\()]+|\\.)*[()]?' # A hex string. This one's easy. p_hex_string = r'\<[%s0-9A-Fa-f]*\>' % whitespace p_dictdelim = r'\<\<|\>\>' p_name = r'/[^%s%s]*' % (delimiters, whitespace) p_catchall = '[^%s]' % whitespace pattern = '|'.join([p_normal, p_name, p_hex_string, p_dictdelim, p_literal_string, p_comment, p_catchall]) findtok = re.compile('(%s)[%s]*' % (pattern, whitespace), re.DOTALL).finditer findparen = re.compile('(%s)[%s]*' % (p_literal_string_extend, whitespace), re.DOTALL).finditer def _gettoks(self, startloc, intern=intern, delimiters=delimiters, findtok=findtok, findparen=findparen, PdfString=PdfString, PdfObject=PdfObject, BasePdfName=BasePdfName): ''' Given a source data string and a location inside it, gettoks generates tokens. Each token is a tuple of the form: <starting file loc>, <ending file loc>, <token string> The ending file loc is past any trailing whitespace. The main complication here is the literal strings, which can contain nested parentheses. In order to cope with these we can discard the current iterator and loop back to the top to get a fresh one. We could use re.search instead of re.finditer, but that's slower. ''' fdata = self.fdata current = self.current = [(startloc, startloc)] cache = {} get_cache = cache.get while 1: for match in findtok(fdata, current[0][1]): current[0] = tokspan = match.span() token = match.group(1) firstch = token[0] toktype = intern if firstch not in delimiters: toktype = PdfObject elif firstch in '/<(%': if firstch == '/': # PDF Name toktype = BasePdfName elif firstch == '<': # << dict delim, or < hex string > if token[1:2] != '<': toktype = PdfString elif firstch == '(': # Literal string # It's probably simple, but maybe not # Nested parentheses are a bear, and if # they are present, we exit the for loop # and get back in with a new starting location. ends = None # For broken strings if fdata[match.end(1) - 1] != ')': nest = 2 m_start, loc = tokspan for match in findparen(fdata, loc): loc = match.end(1) ending = fdata[loc - 1] == ')' nest += 1 - ending * 2 if not nest: break if ending and ends is None: ends = loc, match.end(), nest token = fdata[m_start:loc] current[0] = m_start, match.end() if nest: # There is one possible recoverable error # seen in the wild -- some stupid generators # don't escape (. If this happens, just # terminate on first unescaped ). The string # won't be quite right, but that's a science # fair project for another time. (self.error, self.exception)[not ends]( 'Unterminated literal string') loc, ends, nest = ends token = fdata[m_start:loc] + ')' * nest current[0] = m_start, ends toktype = PdfString elif firstch == '%': # Comment if self.strip_comments: continue else: self.exception(('Tokenizer logic incorrect -- ' 'should never get here')) newtok = get_cache(token) if newtok is None: newtok = cache[token] = toktype(token) yield newtok if current[0] is not tokspan: break else: if self.strip_comments: break raise StopIteration def __init__(self, fdata, startloc=0, strip_comments=True, verbose=True): self.fdata = fdata self.strip_comments = strip_comments self.iterator = iterator = self._gettoks(startloc) self.msgs_dumped = None if verbose else set() self.next = getattr(iterator, nextattr) self.current = [(startloc, startloc)] def setstart(self, startloc): ''' Change the starting location. ''' current = self.current if startloc != current[0][1]: current[0] = startloc, startloc def floc(self): ''' Return the current file position (where the next token will be retrieved) ''' return self.current[0][1] floc = property(floc, setstart) def tokstart(self): ''' Return the file position of the most recently retrieved token. ''' return self.current[0][0] tokstart = property(tokstart, setstart) def __iter__(self): return self.iterator def multiple(self, count, islice=itertools.islice, list=list): ''' Retrieve multiple tokens ''' return list(islice(self, count)) def next_default(self, default='nope'): for result in self: return result return default def msg(self, msg, *arg): dumped = self.msgs_dumped if dumped is not None: if msg in dumped: return dumped.add(msg) if arg: msg %= arg fdata = self.fdata begin, end = self.current[0] if begin >= len(fdata): return '%s (filepos %s past EOF %s)' % (msg, begin, len(fdata)) line, col = linepos(fdata, begin) if end > begin: tok = fdata[begin:end].rstrip() if len(tok) > 30: tok = tok[:26] + ' ...' return ('%s (line=%d, col=%d, token=%s)' % (msg, line, col, repr(tok))) return '%s (line=%d, col=%d)' % (msg, line, col) def warning(self, *arg): s = self.msg(*arg) if s: log.warning(s) def error(self, *arg): s = self.msg(*arg) if s: log.error(s) def exception(self, *arg): raise PdfParseError(self.msg(*arg))
(fdata, startloc=0, strip_comments=True, verbose=True)
724,134
pdfrw.tokens
__init__
null
def __init__(self, fdata, startloc=0, strip_comments=True, verbose=True): self.fdata = fdata self.strip_comments = strip_comments self.iterator = iterator = self._gettoks(startloc) self.msgs_dumped = None if verbose else set() self.next = getattr(iterator, nextattr) self.current = [(startloc, startloc)]
(self, fdata, startloc=0, strip_comments=True, verbose=True)
724,135
pdfrw.tokens
__iter__
null
def __iter__(self): return self.iterator
(self)
724,136
pdfrw.tokens
_gettoks
Given a source data string and a location inside it, gettoks generates tokens. Each token is a tuple of the form: <starting file loc>, <ending file loc>, <token string> The ending file loc is past any trailing whitespace. The main complication here is the literal strings, which can contain nested parentheses. In order to cope with these we can discard the current iterator and loop back to the top to get a fresh one. We could use re.search instead of re.finditer, but that's slower.
def _gettoks(self, startloc, intern=intern, delimiters=delimiters, findtok=findtok, findparen=findparen, PdfString=PdfString, PdfObject=PdfObject, BasePdfName=BasePdfName): ''' Given a source data string and a location inside it, gettoks generates tokens. Each token is a tuple of the form: <starting file loc>, <ending file loc>, <token string> The ending file loc is past any trailing whitespace. The main complication here is the literal strings, which can contain nested parentheses. In order to cope with these we can discard the current iterator and loop back to the top to get a fresh one. We could use re.search instead of re.finditer, but that's slower. ''' fdata = self.fdata current = self.current = [(startloc, startloc)] cache = {} get_cache = cache.get while 1: for match in findtok(fdata, current[0][1]): current[0] = tokspan = match.span() token = match.group(1) firstch = token[0] toktype = intern if firstch not in delimiters: toktype = PdfObject elif firstch in '/<(%': if firstch == '/': # PDF Name toktype = BasePdfName elif firstch == '<': # << dict delim, or < hex string > if token[1:2] != '<': toktype = PdfString elif firstch == '(': # Literal string # It's probably simple, but maybe not # Nested parentheses are a bear, and if # they are present, we exit the for loop # and get back in with a new starting location. ends = None # For broken strings if fdata[match.end(1) - 1] != ')': nest = 2 m_start, loc = tokspan for match in findparen(fdata, loc): loc = match.end(1) ending = fdata[loc - 1] == ')' nest += 1 - ending * 2 if not nest: break if ending and ends is None: ends = loc, match.end(), nest token = fdata[m_start:loc] current[0] = m_start, match.end() if nest: # There is one possible recoverable error # seen in the wild -- some stupid generators # don't escape (. If this happens, just # terminate on first unescaped ). The string # won't be quite right, but that's a science # fair project for another time. (self.error, self.exception)[not ends]( 'Unterminated literal string') loc, ends, nest = ends token = fdata[m_start:loc] + ')' * nest current[0] = m_start, ends toktype = PdfString elif firstch == '%': # Comment if self.strip_comments: continue else: self.exception(('Tokenizer logic incorrect -- ' 'should never get here')) newtok = get_cache(token) if newtok is None: newtok = cache[token] = toktype(token) yield newtok if current[0] is not tokspan: break else: if self.strip_comments: break raise StopIteration
(self, startloc, intern=<built-in function intern>, delimiters='()<>{}[\\]/%', findtok=<built-in method finditer of re.Pattern object at 0x55b77025b280>, findparen=<built-in method finditer of re.Pattern object at 0x7ff9a4a59fc0>, PdfString=<class 'pdfrw.objects.pdfstring.PdfString'>, PdfObject=<class 'pdfrw.objects.pdfobject.PdfObject'>, BasePdfName=<class 'pdfrw.objects.pdfname.BasePdfName'>)
724,137
pdfrw.tokens
error
null
def error(self, *arg): s = self.msg(*arg) if s: log.error(s)
(self, *arg)
724,138
pdfrw.tokens
exception
null
def exception(self, *arg): raise PdfParseError(self.msg(*arg))
(self, *arg)
724,139
pdfrw.tokens
msg
null
def msg(self, msg, *arg): dumped = self.msgs_dumped if dumped is not None: if msg in dumped: return dumped.add(msg) if arg: msg %= arg fdata = self.fdata begin, end = self.current[0] if begin >= len(fdata): return '%s (filepos %s past EOF %s)' % (msg, begin, len(fdata)) line, col = linepos(fdata, begin) if end > begin: tok = fdata[begin:end].rstrip() if len(tok) > 30: tok = tok[:26] + ' ...' return ('%s (line=%d, col=%d, token=%s)' % (msg, line, col, repr(tok))) return '%s (line=%d, col=%d)' % (msg, line, col)
(self, msg, *arg)
724,140
pdfrw.tokens
multiple
Retrieve multiple tokens
def multiple(self, count, islice=itertools.islice, list=list): ''' Retrieve multiple tokens ''' return list(islice(self, count))
(self, count, islice=<class 'itertools.islice'>, list=<class 'list'>)
724,141
pdfrw.tokens
next_default
null
def next_default(self, default='nope'): for result in self: return result return default
(self, default='nope')
724,142
pdfrw.tokens
setstart
Change the starting location.
def setstart(self, startloc): ''' Change the starting location. ''' current = self.current if startloc != current[0][1]: current[0] = startloc, startloc
(self, startloc)
724,143
pdfrw.tokens
warning
null
def warning(self, *arg): s = self.msg(*arg) if s: log.warning(s)
(self, *arg)
724,164
asyncmock
AsyncCallableMixin
null
class AsyncCallableMixin(CallableMixin): def __init__(_mock_self, not_async=False, *args, **kwargs): super().__init__(*args, **kwargs) _mock_self.not_async = not_async _mock_self.aenter_return_value = _mock_self def __call__(_mock_self, *args, **kwargs): # can't use self in-case a function / method we are mocking uses self # in the signature if _mock_self.not_async: _mock_self._mock_check_sig(*args, **kwargs) return _mock_self._mock_call(*args, **kwargs) else: async def wrapper(): _mock_self._mock_check_sig(*args, **kwargs) return _mock_self._mock_call(*args, **kwargs) return wrapper() async def __aenter__(_mock_self): return _mock_self.aenter_return_value async def __aexit__(_mock_self, exc_type, exc_val, exc_tb): pass
(not_async=False, *args, **kwargs)
724,165
asyncmock
__aenter__
null
def __call__(_mock_self, *args, **kwargs): # can't use self in-case a function / method we are mocking uses self # in the signature if _mock_self.not_async: _mock_self._mock_check_sig(*args, **kwargs) return _mock_self._mock_call(*args, **kwargs) else: async def wrapper(): _mock_self._mock_check_sig(*args, **kwargs) return _mock_self._mock_call(*args, **kwargs) return wrapper()
(_mock_self)
724,168
asyncmock
__init__
null
def __init__(_mock_self, not_async=False, *args, **kwargs): super().__init__(*args, **kwargs) _mock_self.not_async = not_async _mock_self.aenter_return_value = _mock_self
(_mock_self, not_async=False, *args, **kwargs)
724,173
asyncmock
AsyncMock
Create a new `AsyncMock` object. `AsyncMock` several options that extends the behaviour of the basic `Mock` object: * `not_async`: This is a boolean flag used to indicate that when the mock is called it should not return a normal Mock instance to make the mock non-awaitable. If this flag is set the mock reverts to the default behaviour of a `Mock` instance. All other arguments are passed directly through to the underlying `Mock` object.
class AsyncMock(AsyncCallableMixin, NonCallableMock): """ Create a new `AsyncMock` object. `AsyncMock` several options that extends the behaviour of the basic `Mock` object: * `not_async`: This is a boolean flag used to indicate that when the mock is called it should not return a normal Mock instance to make the mock non-awaitable. If this flag is set the mock reverts to the default behaviour of a `Mock` instance. All other arguments are passed directly through to the underlying `Mock` object. """
(spec=None, wraps=None, name=None, spec_set=None, parent=None, _spec_state=None, _new_name='', _new_parent=None, _spec_as_instance=False, _eat_self=None, unsafe=False, **kwargs)
724,211
mock.mock
CallableMixin
null
class CallableMixin(Base): def __init__(self, spec=None, side_effect=None, return_value=DEFAULT, wraps=None, name=None, spec_set=None, parent=None, _spec_state=None, _new_name='', _new_parent=None, **kwargs): self.__dict__['_mock_return_value'] = return_value _safe_super(CallableMixin, self).__init__( spec, wraps, name, spec_set, parent, _spec_state, _new_name, _new_parent, **kwargs ) self.side_effect = side_effect def _mock_check_sig(self, *args, **kwargs): # stub method that can be replaced with one with a specific signature pass def __call__(_mock_self, *args, **kwargs): # can't use self in-case a function / method we are mocking uses self # in the signature _mock_self._mock_check_sig(*args, **kwargs) _mock_self._increment_mock_call(*args, **kwargs) return _mock_self._mock_call(*args, **kwargs) def _mock_call(_mock_self, *args, **kwargs): return _mock_self._execute_mock_call(*args, **kwargs) def _increment_mock_call(_mock_self, *args, **kwargs): self = _mock_self self.called = True self.call_count += 1 # handle call_args # needs to be set here so assertions on call arguments pass before # execution in the case of awaited calls _call = _Call((args, kwargs), two=True) self.call_args = _call self.call_args_list.append(_call) # initial stuff for method_calls: do_method_calls = self._mock_parent is not None method_call_name = self._mock_name # initial stuff for mock_calls: mock_call_name = self._mock_new_name is_a_call = mock_call_name == '()' self.mock_calls.append(_Call(('', args, kwargs))) # follow up the chain of mocks: _new_parent = self._mock_new_parent while _new_parent is not None: # handle method_calls: if do_method_calls: _new_parent.method_calls.append(_Call((method_call_name, args, kwargs))) do_method_calls = _new_parent._mock_parent is not None if do_method_calls: method_call_name = _new_parent._mock_name + '.' + method_call_name # handle mock_calls: this_mock_call = _Call((mock_call_name, args, kwargs)) _new_parent.mock_calls.append(this_mock_call) if _new_parent._mock_new_name: if is_a_call: dot = '' else: dot = '.' is_a_call = _new_parent._mock_new_name == '()' mock_call_name = _new_parent._mock_new_name + dot + mock_call_name # follow the parental chain: _new_parent = _new_parent._mock_new_parent def _execute_mock_call(_mock_self, *args, **kwargs): self = _mock_self # separate from _increment_mock_call so that awaited functions are # executed separately from their call, also AsyncMock overrides this method effect = self.side_effect if effect is not None: if _is_exception(effect): raise effect elif not _callable(effect): result = next(effect) if _is_exception(result): raise result else: result = effect(*args, **kwargs) if result is not DEFAULT: return result if self._mock_return_value is not DEFAULT: return self.return_value if self._mock_wraps is not None: return self._mock_wraps(*args, **kwargs) return self.return_value
(spec=None, side_effect=None, return_value=sentinel.DEFAULT, wraps=None, name=None, spec_set=None, parent=None, _spec_state=None, _new_name='', _new_parent=None, **kwargs)
724,437
flask_gzip
Gzip
null
class Gzip(object): def __init__(self, app, compress_level=6, minimum_size=500): self.app = app self.compress_level = compress_level self.minimum_size = minimum_size self.app.after_request(self.after_request) def after_request(self, response): accept_encoding = request.headers.get('Accept-Encoding', '') if response.status_code < 200 or \ response.status_code >= 300 or \ response.direct_passthrough or \ len(response.get_data()) < self.minimum_size or \ 'gzip' not in accept_encoding.lower() or \ 'Content-Encoding' in response.headers: return response gzip_buffer = BytesIO() gzip_file = gzip.GzipFile(mode='wb', compresslevel=self.compress_level, fileobj=gzip_buffer) gzip_file.write(response.get_data()) gzip_file.close() response.set_data(gzip_buffer.getvalue()) response.headers['Content-Encoding'] = 'gzip' response.headers['Content-Length'] = len(response.get_data()) return response
(app, compress_level=6, minimum_size=500)
724,438
flask_gzip
__init__
null
def __init__(self, app, compress_level=6, minimum_size=500): self.app = app self.compress_level = compress_level self.minimum_size = minimum_size self.app.after_request(self.after_request)
(self, app, compress_level=6, minimum_size=500)
724,439
flask_gzip
after_request
null
def after_request(self, response): accept_encoding = request.headers.get('Accept-Encoding', '') if response.status_code < 200 or \ response.status_code >= 300 or \ response.direct_passthrough or \ len(response.get_data()) < self.minimum_size or \ 'gzip' not in accept_encoding.lower() or \ 'Content-Encoding' in response.headers: return response gzip_buffer = BytesIO() gzip_file = gzip.GzipFile(mode='wb', compresslevel=self.compress_level, fileobj=gzip_buffer) gzip_file.write(response.get_data()) gzip_file.close() response.set_data(gzip_buffer.getvalue()) response.headers['Content-Encoding'] = 'gzip' response.headers['Content-Length'] = len(response.get_data()) return response
(self, response)
724,441
hypertion.main
HyperFunction
Handles the creation of a schema for LLM function calling, as well as the validation and invocation of functions based on the provided signature or metadata.
class HyperFunction: """ Handles the creation of a schema for LLM function calling, as well as the validation and invocation of functions based on the provided signature or metadata. """ def __init__(self) -> None: self._registered_functions: dict[str, info.FunctionInfo] = {} """Registered functions.""" def takeover(self, description: str | None = None): """Register the function by decorating it to generate function schema.""" def __wrapper__(func: Callable[..., Any]): _description = description or func.__doc__ if _description is None: raise RuntimeError(f"No description found for {func.__name__!r}") _description = '\n'.join( line.strip() for line in _description.split('\n') ) self._registered_functions[func.__name__] = info.FunctionInfo( memloc=func, description=description ) return func return __wrapper__ @staticmethod def criteria( default: Any | None = None, *, description: str ): """Adding criteria to parameters.""" return info.CriteriaInfo(description=description, default=default) def _construct_mappings(self): """Construct schema mappings of registered functions.""" for f_name, f_info in self._registered_functions.items(): signature = inspect.signature(f_info.memloc) properties, required = {}, [] for name, instance in signature.parameters.items(): criteria = instance.default if not isinstance(criteria, info.CriteriaInfo): raise TypeError( f"Parameters of registered functions must be initialized with `criteria` method." ) default, annotation = criteria.default, instance.annotation if default and not isinstance(default, annotation): raise TypeError( f"{name!r} parameter is type-hinted as {annotation.__name__!r} but default value is of type {type(default).__name__!r}" ) elif not default: required.append(name) properties[name] = helpers.construct_property_map( annotation=annotation, description=criteria.description ) parameter_map = {} if properties: parameter_map = {"type": "object", "properties": properties} if required: parameter_map['required'] = required yield {'name': f_name, 'description': f_info.description} | {'parameters': parameter_map} def attach_hyperfunction(self, __obj: "HyperFunction"): """Attach new `HyperFunction` instance in the current instance""" self._registered_functions.update(__obj._registered_functions) @property def as_openai_functions(self) -> list[dict[str, Any]]: """Return GPT based function schema.""" return list(self._construct_mappings()) @property def as_open_functions(self) -> list[dict[str, Any]]: """Return Gorilla based function schema.""" return [{'api_call': _['name']} | _ for _ in self._construct_mappings()] def invoke(self, __signature_or_metadata: types.Signature | types.Metadata): """Validate and invoke the function from signature or metadata.""" function = __signature_or_metadata if isinstance(function, types.Signature): function = function.as_metadata() function_info = self._registered_functions.get(function.name) if function_info is None: raise LookupError(f"{function.name!r} not found in registered functions.") signature, forged_kwargs = inspect.signature(function_info.memloc), {} for param_name, param in signature.parameters.items(): criteria = param.default if param_name not in function.arguments and criteria.default is None: raise KeyError( f"{function.name!r} function required parameter {param_name!r} missing." ) forged_kwargs[param_name] = helpers.forge_parameter( annotation=param.annotation, value=function.arguments.get(param_name, criteria.default) ) return function_info.memloc(**forged_kwargs)
() -> None
724,442
hypertion.main
__init__
null
def __init__(self) -> None: self._registered_functions: dict[str, info.FunctionInfo] = {} """Registered functions."""
(self) -> NoneType
724,443
hypertion.main
_construct_mappings
Construct schema mappings of registered functions.
def _construct_mappings(self): """Construct schema mappings of registered functions.""" for f_name, f_info in self._registered_functions.items(): signature = inspect.signature(f_info.memloc) properties, required = {}, [] for name, instance in signature.parameters.items(): criteria = instance.default if not isinstance(criteria, info.CriteriaInfo): raise TypeError( f"Parameters of registered functions must be initialized with `criteria` method." ) default, annotation = criteria.default, instance.annotation if default and not isinstance(default, annotation): raise TypeError( f"{name!r} parameter is type-hinted as {annotation.__name__!r} but default value is of type {type(default).__name__!r}" ) elif not default: required.append(name) properties[name] = helpers.construct_property_map( annotation=annotation, description=criteria.description ) parameter_map = {} if properties: parameter_map = {"type": "object", "properties": properties} if required: parameter_map['required'] = required yield {'name': f_name, 'description': f_info.description} | {'parameters': parameter_map}
(self)
724,444
hypertion.main
attach_hyperfunction
Attach new `HyperFunction` instance in the current instance
def attach_hyperfunction(self, __obj: "HyperFunction"): """Attach new `HyperFunction` instance in the current instance""" self._registered_functions.update(__obj._registered_functions)
(self, _HyperFunction__obj: hypertion.main.HyperFunction)
724,445
hypertion.main
criteria
Adding criteria to parameters.
@staticmethod def criteria( default: Any | None = None, *, description: str ): """Adding criteria to parameters.""" return info.CriteriaInfo(description=description, default=default)
(default: Optional[Any] = None, *, description: str)
724,446
hypertion.main
invoke
Validate and invoke the function from signature or metadata.
def invoke(self, __signature_or_metadata: types.Signature | types.Metadata): """Validate and invoke the function from signature or metadata.""" function = __signature_or_metadata if isinstance(function, types.Signature): function = function.as_metadata() function_info = self._registered_functions.get(function.name) if function_info is None: raise LookupError(f"{function.name!r} not found in registered functions.") signature, forged_kwargs = inspect.signature(function_info.memloc), {} for param_name, param in signature.parameters.items(): criteria = param.default if param_name not in function.arguments and criteria.default is None: raise KeyError( f"{function.name!r} function required parameter {param_name!r} missing." ) forged_kwargs[param_name] = helpers.forge_parameter( annotation=param.annotation, value=function.arguments.get(param_name, criteria.default) ) return function_info.memloc(**forged_kwargs)
(self, _HyperFunction__signature_or_metadata: hypertion.types.Signature | hypertion.types.Metadata)
724,447
hypertion.main
takeover
Register the function by decorating it to generate function schema.
def takeover(self, description: str | None = None): """Register the function by decorating it to generate function schema.""" def __wrapper__(func: Callable[..., Any]): _description = description or func.__doc__ if _description is None: raise RuntimeError(f"No description found for {func.__name__!r}") _description = '\n'.join( line.strip() for line in _description.split('\n') ) self._registered_functions[func.__name__] = info.FunctionInfo( memloc=func, description=description ) return func return __wrapper__
(self, description: Optional[str] = None)
724,455
nylas.client
Client
API client for the Nylas API. Attributes: api_key: The Nylas API key to use for authentication api_uri: The URL to use for communicating with the Nylas API http_client: The HTTP client to use for requests to the Nylas API
class Client: """ API client for the Nylas API. Attributes: api_key: The Nylas API key to use for authentication api_uri: The URL to use for communicating with the Nylas API http_client: The HTTP client to use for requests to the Nylas API """ def __init__( self, api_key: str, api_uri: str = DEFAULT_SERVER_URL, timeout: int = 90 ): """ Initialize the Nylas API client. Args: api_key: The Nylas API key to use for authentication api_uri: The URL to use for communicating with the Nylas API timeout: The timeout for requests to the Nylas API, in seconds """ self.api_key = api_key self.api_uri = api_uri self.http_client = HttpClient(self.api_uri, self.api_key, timeout) @property def auth(self) -> Auth: """ Access the Auth API. Returns: The Auth API. """ return Auth(self.http_client) @property def applications(self) -> Applications: """ Access the Applications API. Returns: The Applications API. """ return Applications(self.http_client) @property def attachments(self) -> Attachments: """ Access the Attachments API. Returns: The Attachments API. """ return Attachments(self.http_client) @property def connectors(self) -> Connectors: """ Access the Connectors API. Returns: The Connectors API. """ return Connectors(self.http_client) @property def calendars(self) -> Calendars: """ Access the Calendars API. Returns: The Calendars API. """ return Calendars(self.http_client) @property def contacts(self) -> Contacts: """ Access the Contacts API. Returns: The Contacts API. """ return Contacts(self.http_client) @property def drafts(self) -> Drafts: """ Access the Drafts API. Returns: The Drafts API. """ return Drafts(self.http_client) @property def events(self) -> Events: """ Access the Events API. Returns: The Events API. """ return Events(self.http_client) @property def folders(self) -> Folders: """ Access the Folders API. Returns: The Folders API. """ return Folders(self.http_client) @property def grants(self) -> Grants: """ Access the Grants API. Returns: The Grants API. """ return Grants(self.http_client) @property def messages(self) -> Messages: """ Access the Messages API. Returns: The Messages API. """ return Messages(self.http_client) @property def threads(self) -> Threads: """ Access the Threads API. Returns: The Threads API. """ return Threads(self.http_client) @property def webhooks(self) -> Webhooks: """ Access the Webhooks API. Returns: The Webhooks API. """ return Webhooks(self.http_client)
(api_key: str, api_uri: str = 'https://api.us.nylas.com', timeout: int = 90)
724,456
nylas.client
__init__
Initialize the Nylas API client. Args: api_key: The Nylas API key to use for authentication api_uri: The URL to use for communicating with the Nylas API timeout: The timeout for requests to the Nylas API, in seconds
def __init__( self, api_key: str, api_uri: str = DEFAULT_SERVER_URL, timeout: int = 90 ): """ Initialize the Nylas API client. Args: api_key: The Nylas API key to use for authentication api_uri: The URL to use for communicating with the Nylas API timeout: The timeout for requests to the Nylas API, in seconds """ self.api_key = api_key self.api_uri = api_uri self.http_client = HttpClient(self.api_uri, self.api_key, timeout)
(self, api_key: str, api_uri: str = 'https://api.us.nylas.com', timeout: int = 90)
724,464
json_logic
jsonLogic
null
def jsonLogic(tests, data=None): # You've recursed to a primitive, stop! if tests is None or type(tests) != dict: return tests data = data or {} op = tests.keys()[0] values = tests[op] operations = { "==" : (lambda a, b: a == b), "===" : (lambda a, b: a is b), "!=" : (lambda a, b: a != b), "!==" : (lambda a, b: a is not b), ">" : (lambda a, b: a > b), ">=" : (lambda a, b: a >= b), "<" : (lambda a, b, c=None: a < b if (c is None) else (a < b) and (b < c) ), "<=" : (lambda a, b, c=None: a <= b if (c is None) else (a <= b) and (b <= c) ), "!" : (lambda a: not a), "%" : (lambda a, b: a % b), "and" : (lambda *args: reduce(lambda total, arg: total and arg, args, True) ), "or" : (lambda *args: reduce(lambda total, arg: total or arg, args, False) ), "?:" : (lambda a, b, c: b if a else c), "log" : (lambda a: a if sys.stdout.write(str(a)) else a), "in" : (lambda a, b: a in b if "__contains__" in dir(b) else False ), "var" : (lambda a, not_found=None: reduce(lambda data, key: (data.get(key, not_found) if type(data) == dict else data[int(key)] if (type(data) in [list, tuple] and str(key).lstrip("-").isdigit()) else not_found), str(a).split("."), data) ), "cat" : (lambda *args: "".join(args) ), "+" : (lambda *args: reduce(lambda total, arg: total + float(arg), args, 0.0) ), "*" : (lambda *args: reduce(lambda total, arg: total * float(arg), args, 1.0) ), "-" : (lambda a, b=None: -a if b is None else a - b), "/" : (lambda a, b=None: a if b is None else float(a) / float(b)), "min" : (lambda *args: min(args)), "max" : (lambda *args: max(args)), "count": (lambda *args: sum(1 if a else 0 for a in args)), } if op not in operations: raise RuntimeError("Unrecognized operation %s" % op) # Easy syntax for unary operators, like {"var": "x"} instead of strict # {"var": ["x"]} if type(values) not in [list, tuple]: values = [values] # Recursion! values = map(lambda val: jsonLogic(val, data), values) return operations[op](*values)
(tests, data=None)
724,466
tlo.tl_config
TlConfig
null
class TlConfig(TlBase): def __init__( self, ): self.types: List['TlType'] = [] self.id_to_type: Dict[int, 'TlType'] = {} # orig int32_t self.name_to_type: Dict[str, 'TlType'] = {} self.functions: List['TlCombinator'] = [] self.id_to_function: Dict[int, 'TlCombinator'] = {} # orig int32_t self.name_to_function: Dict[str, 'TlCombinator'] = {} def add_type(self, type_: 'TlType') -> None: self.types.append(type_) self.id_to_type[type_.id] = type_ self.name_to_type[type_.name] = type_ def get_type(self, type_id_or_name: Union[int, str]) -> 'TlType': # orig int32_t if isinstance(type_id_or_name, int): return self.id_to_type[type_id_or_name] else: return self.name_to_type[type_id_or_name] def add_function(self, function: 'TlCombinator') -> None: self.functions.append(function) self.id_to_function[function.id] = function self.name_to_function[function.name] = function def get_function(self, function_id_or_name: Union[int, str]) -> 'TlCombinator': # orig int32_t if isinstance(function_id_or_name, int): return self.id_to_function[function_id_or_name] else: return self.name_to_function[function_id_or_name] def get_type_count(self) -> int: # orig size_t return len(self.types) def get_type_by_num(self, num: int) -> 'TlType': # orig size_t return self.types[num] def get_function_count(self) -> int: # orig size_t return len(self.functions) def get_function_by_num(self, num: int) -> 'TlCombinator': # orig size_t return self.functions[num]
()
724,467
tlo.tl_config
__init__
null
def __init__( self, ): self.types: List['TlType'] = [] self.id_to_type: Dict[int, 'TlType'] = {} # orig int32_t self.name_to_type: Dict[str, 'TlType'] = {} self.functions: List['TlCombinator'] = [] self.id_to_function: Dict[int, 'TlCombinator'] = {} # orig int32_t self.name_to_function: Dict[str, 'TlCombinator'] = {}
(self)
724,468
tlo.tl_core
__repr__
null
def __repr__(self): return f'<{__name__}.{type(self).__name__}> {vars(self)}'
(self)
724,470
tlo.tl_config
add_function
null
def add_function(self, function: 'TlCombinator') -> None: self.functions.append(function) self.id_to_function[function.id] = function self.name_to_function[function.name] = function
(self, function: tlo.tl_core.TlCombinator) -> NoneType
724,471
tlo.tl_config
add_type
null
def add_type(self, type_: 'TlType') -> None: self.types.append(type_) self.id_to_type[type_.id] = type_ self.name_to_type[type_.name] = type_
(self, type_: tlo.tl_core.TlType) -> NoneType
724,472
tlo.tl_config
get_function
null
def get_function(self, function_id_or_name: Union[int, str]) -> 'TlCombinator': # orig int32_t if isinstance(function_id_or_name, int): return self.id_to_function[function_id_or_name] else: return self.name_to_function[function_id_or_name]
(self, function_id_or_name: Union[int, str]) -> tlo.tl_core.TlCombinator
724,473
tlo.tl_config
get_function_by_num
null
def get_function_by_num(self, num: int) -> 'TlCombinator': # orig size_t return self.functions[num]
(self, num: int) -> tlo.tl_core.TlCombinator
724,474
tlo.tl_config
get_function_count
null
def get_function_count(self) -> int: # orig size_t return len(self.functions)
(self) -> int
724,475
tlo.tl_config
get_type
null
def get_type(self, type_id_or_name: Union[int, str]) -> 'TlType': # orig int32_t if isinstance(type_id_or_name, int): return self.id_to_type[type_id_or_name] else: return self.name_to_type[type_id_or_name]
(self, type_id_or_name: Union[int, str]) -> tlo.tl_core.TlType
724,476
tlo.tl_config
get_type_by_num
null
def get_type_by_num(self, num: int) -> 'TlType': # orig size_t return self.types[num]
(self, num: int) -> tlo.tl_core.TlType
724,477
tlo.tl_config
get_type_count
null
def get_type_count(self) -> int: # orig size_t return len(self.types)
(self) -> int
724,478
tlo.tl_config_parser
TlConfigParser
null
class TlConfigParser(TlBase): def __init__(self, data: bytes): self.p = TlSimpleParser(data) self.schema_version = -1 self.config = TlConfig() # should be TlConfig def parse_config(self) -> 'TlConfig': self.schema_version = self.get_schema_version(self.try_parse_int()) if self.schema_version < 2: raise RuntimeError(f'Unsupported tl-schema version {self.schema_version}') self.try_parse_int() # date self.try_parse_int() # version types_n = self.try_parse_int() constructors_total = 0 for i in range(types_n): tl_type = self.read_type() self.config.add_type(tl_type) constructors_total += tl_type.constructors_num constructors_n = self.try_parse_int() assert constructors_n == constructors_total for i in range(constructors_n): tl_combinator = self.read_combinator() self.config.get_type(tl_combinator.type_id).add_constructor(tl_combinator) functions_n = self.try_parse_int() for i in range(functions_n): self.config.add_function(self.read_combinator()) self.p.fetch_end() self.try_parse(0) return self.config @staticmethod def get_schema_version(version_id: int) -> int: if version_id == TLS_SCHEMA_V4: return 4 elif version_id == TLS_SCHEMA_V3: return 3 elif version_id == TLS_SCHEMA_V2: return 2 return -1 def read_type(self): t = self.try_parse_int() if t != TLS_TYPE: raise RuntimeError(f'Wrong tls_type magic {t}') tl_type = TlType() tl_type.id = self.try_parse_int() tl_type.name = self.try_parse_string() tl_type.constructors_num = self.try_parse_int() # orig size_t tl_type.constructors = [] tl_type.flags = self.try_parse_int() tl_type.flags &= ~(1 | 8 | 16 | 1024) if tl_type.flags != 0: logger.warning(f'Type {tl_type.name} has non-zero flags: {tl_type.flags}') tl_type.arity = self.try_parse_int() self.try_parse_long() # unused return tl_type def read_combinator(self): t = self.try_parse_int() if t != TLS_COMBINATOR: raise RuntimeError(f'Wrong tls_combinator magic {t}') tl_combinator = TlCombinator() tl_combinator.id = self.try_parse_int() tl_combinator.name = self.try_parse_string() tl_combinator.type_id = self.try_parse_int() tl_combinator.var_count = 0 left_type = self.try_parse_int() if left_type == TLS_COMBINATOR_LEFT: tl_combinator.args = self.read_args_list(tl_combinator) else: if left_type != TLS_COMBINATOR_LEFT_BUILTIN: raise RuntimeError(f'Wrong tls_combinator_left magic {left_type}') right_ver = self.try_parse_int() if right_ver != TLS_COMBINATOR_RIGHT_V2: raise RuntimeError(f'Wrong tls_combinator_right magic {right_ver}') tl_combinator.result = self.read_type_expr(tl_combinator) return tl_combinator def read_num_const(self) -> TlTree: num = self.try_parse_int() return TlTreeNatConst(FLAG_NOVAR, num) def read_num_var(self, tl_combinator: TlCombinator) -> TlTree: diff = self.try_parse_int() var_num = self.try_parse_int() if var_num >= tl_combinator.var_count: tl_combinator.var_count = var_num + 1 return TlTreeVarNum(0, var_num, diff) def read_nat_expr(self, tl_combinator: TlCombinator) -> TlTree: tree_type = self.try_parse_int() if tree_type in (TLS_NAT_CONST_OLD, TLS_NAT_CONST): return self.read_num_const() elif tree_type == TLS_NAT_VAR: return self.read_num_var(tl_combinator) else: raise RuntimeError(f'tree_type = {tree_type}') def read_expr(self, tl_combinator: TlCombinator) -> TlTree: tree_type = self.try_parse_int() if tree_type == TLS_EXPR_NAT: return self.read_nat_expr(tl_combinator) elif tree_type == TLS_EXPR_TYPE: return self.read_type_expr(tl_combinator) else: raise RuntimeError(f'tree_type = {tree_type}') def read_args_list(self, tl_combinator: TlCombinator) -> List[Arg]: schema_flag_opt_field = 2 << int(self.schema_version >= 3) schema_flag_has_vars = schema_flag_opt_field ^ 6 args_num = self.try_parse_int() args_list = [] for i in range(args_num): arg = Arg() arg_v = self.try_parse_int() if arg_v != TLS_ARG_V2: raise RuntimeError(f'Wrong tls_arg magic {arg_v}') arg.name = self.try_parse_string() arg.flags = self.try_parse_int() is_optional = False if arg.flags & schema_flag_opt_field: arg.flags &= ~schema_flag_opt_field is_optional = True if arg.flags & schema_flag_has_vars: arg.flags &= ~schema_flag_has_vars arg.var_num = self.try_parse_int() else: arg.var_num = -1 if arg.var_num >= tl_combinator.var_count: tl_combinator.var_count = arg.var_num + 1 if is_optional: arg.exist_var_num = self.try_parse_int() arg.exist_var_bit = self.try_parse_int() else: arg.exist_var_num = -1 arg.exist_var_bit = 0 arg.type = self.read_type_expr(tl_combinator) if arg.type.flags & FLAG_NOVAR: arg.flags |= FLAG_NOVAR args_list.append(arg) return args_list def read_type_expr(self, tl_combinator: TlCombinator) -> TlTree: tree_type = self.try_parse_int() if tree_type == TLS_TYPE_VAR: return self.read_type_var(tl_combinator) elif tree_type == TLS_TYPE_EXPR: return self.read_type_tree(tl_combinator) elif tree_type == TLS_ARRAY: return self.read_array(tl_combinator) else: raise RuntimeError(f'tree_type = {tree_type}') def read_type_var(self, tl_combinator: TlCombinator) -> TlTree: var_num = self.try_parse_int() flags = self.try_parse_int() if var_num >= tl_combinator.var_count: tl_combinator.var_count = var_num + 1 assert not (flags & (FLAG_NOVAR | FLAG_BARE)) return TlTreeVarType(flags, var_num) def read_type_tree(self, tl_combinator: TlCombinator) -> TlTree: tl_type = self.config.get_type(self.try_parse_int()) # there is assert not needed because we have KeyError exception flags = self.try_parse_int() | FLAG_NOVAR arity = self.try_parse_int() assert tl_type.arity == arity tl_tree_type = TlTreeType(flags, tl_type, arity) for i in range(arity): child = self.read_expr(tl_combinator) tl_tree_type.children.append(child) if not (child.flags & FLAG_NOVAR): tl_tree_type.flags &= ~FLAG_NOVAR return tl_tree_type def read_array(self, tl_combinator: TlCombinator) -> TlTree: flags = FLAG_NOVAR multiplicity = self.read_nat_expr(tl_combinator) tl_tree_array = TlTreeArray(flags, multiplicity, self.read_args_list(tl_combinator)) for i in range(len(tl_tree_array.args)): if not (tl_tree_array.args[i].flags & FLAG_NOVAR): tl_tree_array.flags &= ~FLAG_NOVAR return tl_tree_array def try_parse(self, res): if self.p.get_error(): raise RuntimeError(f'Wrong TL-scheme specified: {self.p.get_error()} at {self.p.get_error_pos()}') return res def try_parse_int(self) -> int: # orig int32_t return self.try_parse(self.p.fetch_int()) def try_parse_long(self) -> int: # orig int64_t return self.try_parse(self.p.fetch_long()) def try_parse_string(self) -> str: return self.try_parse(self.p.fetch_string())
(data: bytes)
724,479
tlo.tl_config_parser
__init__
null
def __init__(self, data: bytes): self.p = TlSimpleParser(data) self.schema_version = -1 self.config = TlConfig() # should be TlConfig
(self, data: bytes)
724,482
tlo.tl_config_parser
get_schema_version
null
@staticmethod def get_schema_version(version_id: int) -> int: if version_id == TLS_SCHEMA_V4: return 4 elif version_id == TLS_SCHEMA_V3: return 3 elif version_id == TLS_SCHEMA_V2: return 2 return -1
(version_id: int) -> int
724,483
tlo.tl_config_parser
parse_config
null
def parse_config(self) -> 'TlConfig': self.schema_version = self.get_schema_version(self.try_parse_int()) if self.schema_version < 2: raise RuntimeError(f'Unsupported tl-schema version {self.schema_version}') self.try_parse_int() # date self.try_parse_int() # version types_n = self.try_parse_int() constructors_total = 0 for i in range(types_n): tl_type = self.read_type() self.config.add_type(tl_type) constructors_total += tl_type.constructors_num constructors_n = self.try_parse_int() assert constructors_n == constructors_total for i in range(constructors_n): tl_combinator = self.read_combinator() self.config.get_type(tl_combinator.type_id).add_constructor(tl_combinator) functions_n = self.try_parse_int() for i in range(functions_n): self.config.add_function(self.read_combinator()) self.p.fetch_end() self.try_parse(0) return self.config
(self) -> tlo.tl_config.TlConfig
724,484
tlo.tl_config_parser
read_args_list
null
def read_args_list(self, tl_combinator: TlCombinator) -> List[Arg]: schema_flag_opt_field = 2 << int(self.schema_version >= 3) schema_flag_has_vars = schema_flag_opt_field ^ 6 args_num = self.try_parse_int() args_list = [] for i in range(args_num): arg = Arg() arg_v = self.try_parse_int() if arg_v != TLS_ARG_V2: raise RuntimeError(f'Wrong tls_arg magic {arg_v}') arg.name = self.try_parse_string() arg.flags = self.try_parse_int() is_optional = False if arg.flags & schema_flag_opt_field: arg.flags &= ~schema_flag_opt_field is_optional = True if arg.flags & schema_flag_has_vars: arg.flags &= ~schema_flag_has_vars arg.var_num = self.try_parse_int() else: arg.var_num = -1 if arg.var_num >= tl_combinator.var_count: tl_combinator.var_count = arg.var_num + 1 if is_optional: arg.exist_var_num = self.try_parse_int() arg.exist_var_bit = self.try_parse_int() else: arg.exist_var_num = -1 arg.exist_var_bit = 0 arg.type = self.read_type_expr(tl_combinator) if arg.type.flags & FLAG_NOVAR: arg.flags |= FLAG_NOVAR args_list.append(arg) return args_list
(self, tl_combinator: tlo.tl_core.TlCombinator) -> List[tlo.tl_core.Arg]
724,485
tlo.tl_config_parser
read_array
null
def read_array(self, tl_combinator: TlCombinator) -> TlTree: flags = FLAG_NOVAR multiplicity = self.read_nat_expr(tl_combinator) tl_tree_array = TlTreeArray(flags, multiplicity, self.read_args_list(tl_combinator)) for i in range(len(tl_tree_array.args)): if not (tl_tree_array.args[i].flags & FLAG_NOVAR): tl_tree_array.flags &= ~FLAG_NOVAR return tl_tree_array
(self, tl_combinator: tlo.tl_core.TlCombinator) -> tlo.tl_core.TlTree
724,486
tlo.tl_config_parser
read_combinator
null
def read_combinator(self): t = self.try_parse_int() if t != TLS_COMBINATOR: raise RuntimeError(f'Wrong tls_combinator magic {t}') tl_combinator = TlCombinator() tl_combinator.id = self.try_parse_int() tl_combinator.name = self.try_parse_string() tl_combinator.type_id = self.try_parse_int() tl_combinator.var_count = 0 left_type = self.try_parse_int() if left_type == TLS_COMBINATOR_LEFT: tl_combinator.args = self.read_args_list(tl_combinator) else: if left_type != TLS_COMBINATOR_LEFT_BUILTIN: raise RuntimeError(f'Wrong tls_combinator_left magic {left_type}') right_ver = self.try_parse_int() if right_ver != TLS_COMBINATOR_RIGHT_V2: raise RuntimeError(f'Wrong tls_combinator_right magic {right_ver}') tl_combinator.result = self.read_type_expr(tl_combinator) return tl_combinator
(self)
724,487
tlo.tl_config_parser
read_expr
null
def read_expr(self, tl_combinator: TlCombinator) -> TlTree: tree_type = self.try_parse_int() if tree_type == TLS_EXPR_NAT: return self.read_nat_expr(tl_combinator) elif tree_type == TLS_EXPR_TYPE: return self.read_type_expr(tl_combinator) else: raise RuntimeError(f'tree_type = {tree_type}')
(self, tl_combinator: tlo.tl_core.TlCombinator) -> tlo.tl_core.TlTree
724,488
tlo.tl_config_parser
read_nat_expr
null
def read_nat_expr(self, tl_combinator: TlCombinator) -> TlTree: tree_type = self.try_parse_int() if tree_type in (TLS_NAT_CONST_OLD, TLS_NAT_CONST): return self.read_num_const() elif tree_type == TLS_NAT_VAR: return self.read_num_var(tl_combinator) else: raise RuntimeError(f'tree_type = {tree_type}')
(self, tl_combinator: tlo.tl_core.TlCombinator) -> tlo.tl_core.TlTree
724,489
tlo.tl_config_parser
read_num_const
null
def read_num_const(self) -> TlTree: num = self.try_parse_int() return TlTreeNatConst(FLAG_NOVAR, num)
(self) -> tlo.tl_core.TlTree
724,490
tlo.tl_config_parser
read_num_var
null
def read_num_var(self, tl_combinator: TlCombinator) -> TlTree: diff = self.try_parse_int() var_num = self.try_parse_int() if var_num >= tl_combinator.var_count: tl_combinator.var_count = var_num + 1 return TlTreeVarNum(0, var_num, diff)
(self, tl_combinator: tlo.tl_core.TlCombinator) -> tlo.tl_core.TlTree
724,491
tlo.tl_config_parser
read_type
null
def read_type(self): t = self.try_parse_int() if t != TLS_TYPE: raise RuntimeError(f'Wrong tls_type magic {t}') tl_type = TlType() tl_type.id = self.try_parse_int() tl_type.name = self.try_parse_string() tl_type.constructors_num = self.try_parse_int() # orig size_t tl_type.constructors = [] tl_type.flags = self.try_parse_int() tl_type.flags &= ~(1 | 8 | 16 | 1024) if tl_type.flags != 0: logger.warning(f'Type {tl_type.name} has non-zero flags: {tl_type.flags}') tl_type.arity = self.try_parse_int() self.try_parse_long() # unused return tl_type
(self)
724,492
tlo.tl_config_parser
read_type_expr
null
def read_type_expr(self, tl_combinator: TlCombinator) -> TlTree: tree_type = self.try_parse_int() if tree_type == TLS_TYPE_VAR: return self.read_type_var(tl_combinator) elif tree_type == TLS_TYPE_EXPR: return self.read_type_tree(tl_combinator) elif tree_type == TLS_ARRAY: return self.read_array(tl_combinator) else: raise RuntimeError(f'tree_type = {tree_type}')
(self, tl_combinator: tlo.tl_core.TlCombinator) -> tlo.tl_core.TlTree
724,493
tlo.tl_config_parser
read_type_tree
null
def read_type_tree(self, tl_combinator: TlCombinator) -> TlTree: tl_type = self.config.get_type(self.try_parse_int()) # there is assert not needed because we have KeyError exception flags = self.try_parse_int() | FLAG_NOVAR arity = self.try_parse_int() assert tl_type.arity == arity tl_tree_type = TlTreeType(flags, tl_type, arity) for i in range(arity): child = self.read_expr(tl_combinator) tl_tree_type.children.append(child) if not (child.flags & FLAG_NOVAR): tl_tree_type.flags &= ~FLAG_NOVAR return tl_tree_type
(self, tl_combinator: tlo.tl_core.TlCombinator) -> tlo.tl_core.TlTree
724,494
tlo.tl_config_parser
read_type_var
null
def read_type_var(self, tl_combinator: TlCombinator) -> TlTree: var_num = self.try_parse_int() flags = self.try_parse_int() if var_num >= tl_combinator.var_count: tl_combinator.var_count = var_num + 1 assert not (flags & (FLAG_NOVAR | FLAG_BARE)) return TlTreeVarType(flags, var_num)
(self, tl_combinator: tlo.tl_core.TlCombinator) -> tlo.tl_core.TlTree
724,495
tlo.tl_config_parser
try_parse
null
def try_parse(self, res): if self.p.get_error(): raise RuntimeError(f'Wrong TL-scheme specified: {self.p.get_error()} at {self.p.get_error_pos()}') return res
(self, res)
724,496
tlo.tl_config_parser
try_parse_int
null
def try_parse_int(self) -> int: # orig int32_t return self.try_parse(self.p.fetch_int())
(self) -> int
724,497
tlo.tl_config_parser
try_parse_long
null
def try_parse_long(self) -> int: # orig int64_t return self.try_parse(self.p.fetch_long())
(self) -> int
724,498
tlo.tl_config_parser
try_parse_string
null
def try_parse_string(self) -> str: return self.try_parse(self.p.fetch_string())
(self) -> str
724,499
tlo
read_tl_config
null
def read_tl_config(data: bytes) -> TlConfig: if not data: raise RuntimeError(f'Config data is empty') if len(data) % struct.calcsize('i') != 0: raise RuntimeError(f'Config size = {len(data)} is not multiple of {struct.calcsize("i")}') parser = TlConfigParser(data) return parser.parse_config()
(data: bytes) -> tlo.tl_config.TlConfig
724,500
tlo
read_tl_config_from_file
null
def read_tl_config_from_file(file_name: str) -> TlConfig: with open(file_name, 'rb') as config: return read_tl_config(config.read())
(file_name: str) -> tlo.tl_config.TlConfig
724,506
domain2idna.converter
Converter
Provides a base for every core logic we add. :param subject: The subject to convert. :type subject: str, list :param str original_encoding: The encoding to provide as output.
class Converter: """ Provides a base for every core logic we add. :param subject: The subject to convert. :type subject: str, list :param str original_encoding: The encoding to provide as output. """ to_ignore = [ "0.0.0.0", "localhost", "127.0.0.1", "localdomain", "local", "broadcasthost", "allhosts", "allnodes", "allrouters", "localnet", "loopback", "mcastprefix", ] def __init__(self, subject, original_encoding="utf-8"): self.subject = subject self.encoding = original_encoding def convert_to_idna(self, subject, original_encoding="utf-8"): """ Converts the given subject to IDNA. :param str subject: The subject to convert. :rtype: str """ if subject in self.to_ignore: return subject if "://" not in subject: try: return subject.encode("idna").decode(original_encoding) except UnicodeError: # pragma: no cover return subject if subject.startswith("://"): to_convert = urlparse(f"https://{subject[3:]}").netloc converted = self.convert_to_idna(to_convert) return subject.replace(to_convert, converted) parsed_url = urlparse(subject) result = f"{parsed_url.scheme}://{self.convert_to_idna(parsed_url.netloc)}{parsed_url.path}" if parsed_url.params: result += f";{parsed_url.params}" if parsed_url.query: result += f"?{parsed_url.query}" if parsed_url.fragment: result += f"#{parsed_url.fragment}" return result def __get_converted(self, subject): """ Process the actual conversion. :param str subject: The subject to convert. :rtype: str """ if ( not subject or not subject.strip() or subject in self.to_ignore or subject.startswith("#") ): return subject if "#" in subject and "://" not in subject: comment = " " + subject[subject.find("#") :] subject = subject[: subject.find("#")].strip() else: comment = "" return ( " ".join( [ self.convert_to_idna(x, original_encoding=self.encoding) for x in subject.split() ] ) + comment ).strip() def get_converted(self): """ Provides the converted data. """ if isinstance(self.subject, list): return [self.__get_converted(x) for x in self.subject] return self.__get_converted(self.subject)
(subject, original_encoding='utf-8')
724,507
domain2idna.converter
__get_converted
Process the actual conversion. :param str subject: The subject to convert. :rtype: str
def __get_converted(self, subject): """ Process the actual conversion. :param str subject: The subject to convert. :rtype: str """ if ( not subject or not subject.strip() or subject in self.to_ignore or subject.startswith("#") ): return subject if "#" in subject and "://" not in subject: comment = " " + subject[subject.find("#") :] subject = subject[: subject.find("#")].strip() else: comment = "" return ( " ".join( [ self.convert_to_idna(x, original_encoding=self.encoding) for x in subject.split() ] ) + comment ).strip()
(self, subject)
724,508
domain2idna.converter
__init__
null
def __init__(self, subject, original_encoding="utf-8"): self.subject = subject self.encoding = original_encoding
(self, subject, original_encoding='utf-8')
724,509
domain2idna.converter
convert_to_idna
Converts the given subject to IDNA. :param str subject: The subject to convert. :rtype: str
def convert_to_idna(self, subject, original_encoding="utf-8"): """ Converts the given subject to IDNA. :param str subject: The subject to convert. :rtype: str """ if subject in self.to_ignore: return subject if "://" not in subject: try: return subject.encode("idna").decode(original_encoding) except UnicodeError: # pragma: no cover return subject if subject.startswith("://"): to_convert = urlparse(f"https://{subject[3:]}").netloc converted = self.convert_to_idna(to_convert) return subject.replace(to_convert, converted) parsed_url = urlparse(subject) result = f"{parsed_url.scheme}://{self.convert_to_idna(parsed_url.netloc)}{parsed_url.path}" if parsed_url.params: result += f";{parsed_url.params}" if parsed_url.query: result += f"?{parsed_url.query}" if parsed_url.fragment: result += f"#{parsed_url.fragment}" return result
(self, subject, original_encoding='utf-8')
724,510
domain2idna.converter
get_converted
Provides the converted data.
def get_converted(self): """ Provides the converted data. """ if isinstance(self.subject, list): return [self.__get_converted(x) for x in self.subject] return self.__get_converted(self.subject)
(self)
724,512
domain2idna
domain2idna
Process the conversion of the given subject. :param subject: The subject to convert. :type subject: str, list :param str encoding: The encoding to provide. :rtype: list, str
def domain2idna(subject, encoding="utf-8"): """ Process the conversion of the given subject. :param subject: The subject to convert. :type subject: str, list :param str encoding: The encoding to provide. :rtype: list, str """ return Converter(subject, original_encoding=encoding).get_converted()
(subject, encoding='utf-8')
724,513
domain2idna
get
This function is a passerelle between the front and the backend of this module. :param str domain_to_convert: The domain to convert. :return: str: if a string is given. list: if a list is given. :rtype: str, list .. deprecated:: 1.10.0 Use :func:`~domain2idna.domain2idna` instead.
def get(domain_to_convert): # pragma: no cover """ This function is a passerelle between the front and the backend of this module. :param str domain_to_convert: The domain to convert. :return: str: if a string is given. list: if a list is given. :rtype: str, list .. deprecated:: 1.10.0 Use :func:`~domain2idna.domain2idna` instead. """ warnings.warn( "`domain2idna.get` will be removed in future version. " "Please use `domain2idna.domain2idna` instead.", DeprecationWarning, ) return domain2idna(domain_to_convert)
(domain_to_convert)
724,566
markov_clustering.utils
MessagePrinter
null
class MessagePrinter(object): def __init__(self, enabled): self._enabled = enabled def enable(self): self._enabled = True def disable(self): self._enabled = False def print(self, string): if self._enabled: print(string)
(enabled)
724,567
markov_clustering.utils
__init__
null
def __init__(self, enabled): self._enabled = enabled
(self, enabled)
724,568
markov_clustering.utils
disable
null
def disable(self): self._enabled = False
(self)
724,569
markov_clustering.utils
enable
null
def enable(self): self._enabled = True
(self)
724,570
markov_clustering.utils
print
null
def print(self, string): if self._enabled: print(string)
(self, string)
724,571
markov_clustering.mcl
add_self_loops
Add self-loops to the matrix by setting the diagonal to loop_value :param matrix: The matrix to add loops to :param loop_value: Value to use for self-loops :returns: The matrix with self-loops
def add_self_loops(matrix, loop_value): """ Add self-loops to the matrix by setting the diagonal to loop_value :param matrix: The matrix to add loops to :param loop_value: Value to use for self-loops :returns: The matrix with self-loops """ shape = matrix.shape assert shape[0] == shape[1], "Error, matrix is not square" if isspmatrix(matrix): new_matrix = matrix.todok() else: new_matrix = matrix.copy() for i in range(shape[0]): new_matrix[i, i] = loop_value if isspmatrix(matrix): return new_matrix.tocsc() return new_matrix
(matrix, loop_value)
724,572
markov_clustering.mcl
converged
Check for convergence by determining if matrix1 and matrix2 are approximately equal. :param matrix1: The matrix to compare with matrix2 :param matrix2: The matrix to compare with matrix1 :returns: True if matrix1 and matrix2 approximately equal
def converged(matrix1, matrix2): """ Check for convergence by determining if matrix1 and matrix2 are approximately equal. :param matrix1: The matrix to compare with matrix2 :param matrix2: The matrix to compare with matrix1 :returns: True if matrix1 and matrix2 approximately equal """ if isspmatrix(matrix1) or isspmatrix(matrix2): return sparse_allclose(matrix1, matrix2) return np.allclose(matrix1, matrix2)
(matrix1, matrix2)
724,573
markov_clustering.modularity
convert_to_adjacency_matrix
Converts transition matrix into adjacency matrix :param matrix: The matrix to be converted :returns: adjacency matrix
def convert_to_adjacency_matrix(matrix): """ Converts transition matrix into adjacency matrix :param matrix: The matrix to be converted :returns: adjacency matrix """ for i in range(matrix.shape[0]): if isspmatrix(matrix): col = find(matrix[:,i])[2] else: col = matrix[:,i].T.tolist()[0] coeff = max( Fraction(c).limit_denominator().denominator for c in col ) matrix[:,i] *= coeff return matrix
(matrix)
724,574
scipy.sparse._csc
csc_matrix
Compressed Sparse Column matrix. This can be instantiated in several ways: csc_matrix(D) where D is a 2-D ndarray csc_matrix(S) with another sparse array or matrix S (equivalent to S.tocsc()) csc_matrix((M, N), [dtype]) to construct an empty matrix with shape (M, N) dtype is optional, defaulting to dtype='d'. csc_matrix((data, (row_ind, col_ind)), [shape=(M, N)]) where ``data``, ``row_ind`` and ``col_ind`` satisfy the relationship ``a[row_ind[k], col_ind[k]] = data[k]``. csc_matrix((data, indices, indptr), [shape=(M, N)]) is the standard CSC representation where the row indices for column i are stored in ``indices[indptr[i]:indptr[i+1]]`` and their corresponding values are stored in ``data[indptr[i]:indptr[i+1]]``. If the shape parameter is not supplied, the matrix dimensions are inferred from the index arrays. Attributes ---------- dtype : dtype Data type of the matrix shape : 2-tuple Shape of the matrix ndim : int Number of dimensions (this is always 2) nnz size data CSC format data array of the matrix indices CSC format index array of the matrix indptr CSC format index pointer array of the matrix has_sorted_indices has_canonical_format T Notes ----- Sparse matrices can be used in arithmetic operations: they support addition, subtraction, multiplication, division, and matrix power. Advantages of the CSC format - efficient arithmetic operations CSC + CSC, CSC * CSC, etc. - efficient column slicing - fast matrix vector products (CSR, BSR may be faster) Disadvantages of the CSC format - slow row slicing operations (consider CSR) - changes to the sparsity structure are expensive (consider LIL or DOK) Canonical format - Within each column, indices are sorted by row. - There are no duplicate entries. Examples -------- >>> import numpy as np >>> from scipy.sparse import csc_matrix >>> csc_matrix((3, 4), dtype=np.int8).toarray() array([[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]], dtype=int8) >>> row = np.array([0, 2, 2, 0, 1, 2]) >>> col = np.array([0, 0, 1, 2, 2, 2]) >>> data = np.array([1, 2, 3, 4, 5, 6]) >>> csc_matrix((data, (row, col)), shape=(3, 3)).toarray() array([[1, 0, 4], [0, 0, 5], [2, 3, 6]]) >>> indptr = np.array([0, 2, 3, 6]) >>> indices = np.array([0, 2, 2, 0, 1, 2]) >>> data = np.array([1, 2, 3, 4, 5, 6]) >>> csc_matrix((data, indices, indptr), shape=(3, 3)).toarray() array([[1, 0, 4], [0, 0, 5], [2, 3, 6]])
class csc_matrix(spmatrix, _csc_base): """ Compressed Sparse Column matrix. This can be instantiated in several ways: csc_matrix(D) where D is a 2-D ndarray csc_matrix(S) with another sparse array or matrix S (equivalent to S.tocsc()) csc_matrix((M, N), [dtype]) to construct an empty matrix with shape (M, N) dtype is optional, defaulting to dtype='d'. csc_matrix((data, (row_ind, col_ind)), [shape=(M, N)]) where ``data``, ``row_ind`` and ``col_ind`` satisfy the relationship ``a[row_ind[k], col_ind[k]] = data[k]``. csc_matrix((data, indices, indptr), [shape=(M, N)]) is the standard CSC representation where the row indices for column i are stored in ``indices[indptr[i]:indptr[i+1]]`` and their corresponding values are stored in ``data[indptr[i]:indptr[i+1]]``. If the shape parameter is not supplied, the matrix dimensions are inferred from the index arrays. Attributes ---------- dtype : dtype Data type of the matrix shape : 2-tuple Shape of the matrix ndim : int Number of dimensions (this is always 2) nnz size data CSC format data array of the matrix indices CSC format index array of the matrix indptr CSC format index pointer array of the matrix has_sorted_indices has_canonical_format T Notes ----- Sparse matrices can be used in arithmetic operations: they support addition, subtraction, multiplication, division, and matrix power. Advantages of the CSC format - efficient arithmetic operations CSC + CSC, CSC * CSC, etc. - efficient column slicing - fast matrix vector products (CSR, BSR may be faster) Disadvantages of the CSC format - slow row slicing operations (consider CSR) - changes to the sparsity structure are expensive (consider LIL or DOK) Canonical format - Within each column, indices are sorted by row. - There are no duplicate entries. Examples -------- >>> import numpy as np >>> from scipy.sparse import csc_matrix >>> csc_matrix((3, 4), dtype=np.int8).toarray() array([[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]], dtype=int8) >>> row = np.array([0, 2, 2, 0, 1, 2]) >>> col = np.array([0, 0, 1, 2, 2, 2]) >>> data = np.array([1, 2, 3, 4, 5, 6]) >>> csc_matrix((data, (row, col)), shape=(3, 3)).toarray() array([[1, 0, 4], [0, 0, 5], [2, 3, 6]]) >>> indptr = np.array([0, 2, 3, 6]) >>> indices = np.array([0, 2, 2, 0, 1, 2]) >>> data = np.array([1, 2, 3, 4, 5, 6]) >>> csc_matrix((data, indices, indptr), shape=(3, 3)).toarray() array([[1, 0, 4], [0, 0, 5], [2, 3, 6]]) """
(arg1, shape=None, dtype=None, copy=False)
724,575
scipy.sparse._data
__abs__
null
def __abs__(self): return self._with_data(abs(self._deduped_data()))
(self)
724,576
scipy.sparse._base
__add__
null
def __add__(self, other): # self + other if isscalarlike(other): if other == 0: return self.copy() # Now we would add this scalar to every element. raise NotImplementedError('adding a nonzero scalar to a ' 'sparse array is not supported') elif issparse(other): if other.shape != self.shape: raise ValueError("inconsistent shapes") return self._add_sparse(other) elif isdense(other): other = np.broadcast_to(other, self.shape) return self._add_dense(other) else: return NotImplemented
(self, other)
724,577
scipy.sparse._base
__bool__
null
def __bool__(self): # Simple -- other ideas? if self.shape == (1, 1): return self.nnz != 0 else: raise ValueError("The truth value of an array with more than one " "element is ambiguous. Use a.any() or a.all().")
(self)
724,578
scipy.sparse._base
__div__
null
def __div__(self, other): # Always do true division return self._divide(other, true_divide=True)
(self, other)
724,579
scipy.sparse._compressed
__eq__
null
def __eq__(self, other): # Scalar other. if isscalarlike(other): if np.isnan(other): return self.__class__(self.shape, dtype=np.bool_) if other == 0: warn("Comparing a sparse matrix with 0 using == is inefficient" ", try using != instead.", SparseEfficiencyWarning, stacklevel=3) all_true = self.__class__(np.ones(self.shape, dtype=np.bool_)) inv = self._scalar_binopt(other, operator.ne) return all_true - inv else: return self._scalar_binopt(other, operator.eq) # Dense other. elif isdense(other): return self.todense() == other # Pydata sparse other. elif is_pydata_spmatrix(other): return NotImplemented # Sparse other. elif issparse(other): warn("Comparing sparse matrices using == is inefficient, try using" " != instead.", SparseEfficiencyWarning, stacklevel=3) # TODO sparse broadcasting if self.shape != other.shape: return False elif self.format != other.format: other = other.asformat(self.format) res = self._binopt(other, '_ne_') all_true = self.__class__(np.ones(self.shape, dtype=np.bool_)) return all_true - res else: return NotImplemented
(self, other)
724,580
scipy.sparse._compressed
__ge__
null
def __ge__(self, other): return self._inequality(other, operator.ge, '_ge_', "Comparing a sparse matrix with a scalar " "less than zero using >= is inefficient, " "try using < instead.")
(self, other)
724,581
scipy.sparse._index
__getitem__
null
def __getitem__(self, key): row, col = self._validate_indices(key) # Dispatch to specialized methods. if isinstance(row, INT_TYPES): if isinstance(col, INT_TYPES): return self._get_intXint(row, col) elif isinstance(col, slice): self._raise_on_1d_array_slice() return self._get_intXslice(row, col) elif col.ndim == 1: self._raise_on_1d_array_slice() return self._get_intXarray(row, col) elif col.ndim == 2: return self._get_intXarray(row, col) raise IndexError('index results in >2 dimensions') elif isinstance(row, slice): if isinstance(col, INT_TYPES): self._raise_on_1d_array_slice() return self._get_sliceXint(row, col) elif isinstance(col, slice): if row == slice(None) and row == col: return self.copy() return self._get_sliceXslice(row, col) elif col.ndim == 1: return self._get_sliceXarray(row, col) raise IndexError('index results in >2 dimensions') elif row.ndim == 1: if isinstance(col, INT_TYPES): self._raise_on_1d_array_slice() return self._get_arrayXint(row, col) elif isinstance(col, slice): return self._get_arrayXslice(row, col) else: # row.ndim == 2 if isinstance(col, INT_TYPES): return self._get_arrayXint(row, col) elif isinstance(col, slice): raise IndexError('index results in >2 dimensions') elif row.shape[1] == 1 and (col.ndim == 1 or col.shape[0] == 1): # special case for outer indexing return self._get_columnXarray(row[:,0], col.ravel()) # The only remaining case is inner (fancy) indexing row, col = _broadcast_arrays(row, col) if row.shape != col.shape: raise IndexError('number of row and column indices differ') if row.size == 0: return self.__class__(np.atleast_2d(row).shape, dtype=self.dtype) return self._get_arrayXarray(row, col)
(self, key)
724,582
scipy.sparse._compressed
__gt__
null
def __gt__(self, other): return self._inequality(other, operator.gt, '_gt_', "Comparing a sparse matrix with a scalar " "less than zero using > is inefficient, " "try using <= instead.")
(self, other)
724,583
scipy.sparse._base
__iadd__
null
def __iadd__(self, other): return NotImplemented
(self, other)
724,584
scipy.sparse._base
__idiv__
null
def __idiv__(self, other): return self.__itruediv__(other)
(self, other)
724,585
scipy.sparse._data
__imul__
null
def __imul__(self, other): # self *= other if isscalarlike(other): self.data *= other return self else: return NotImplemented
(self, other)
724,586
scipy.sparse._compressed
__init__
null
def __init__(self, arg1, shape=None, dtype=None, copy=False): _data_matrix.__init__(self) if issparse(arg1): if arg1.format == self.format and copy: arg1 = arg1.copy() else: arg1 = arg1.asformat(self.format) self.indptr, self.indices, self.data, self._shape = ( arg1.indptr, arg1.indices, arg1.data, arg1._shape ) elif isinstance(arg1, tuple): if isshape(arg1): # It's a tuple of matrix dimensions (M, N) # create empty matrix self._shape = check_shape(arg1) M, N = self.shape # Select index dtype large enough to pass array and # scalar parameters to sparsetools idx_dtype = self._get_index_dtype(maxval=max(M, N)) self.data = np.zeros(0, getdtype(dtype, default=float)) self.indices = np.zeros(0, idx_dtype) self.indptr = np.zeros(self._swap((M, N))[0] + 1, dtype=idx_dtype) else: if len(arg1) == 2: # (data, ij) format coo = self._coo_container(arg1, shape=shape, dtype=dtype) arrays = coo._coo_to_compressed(self._swap) self.indptr, self.indices, self.data, self._shape = arrays elif len(arg1) == 3: # (data, indices, indptr) format (data, indices, indptr) = arg1 # Select index dtype large enough to pass array and # scalar parameters to sparsetools maxval = None if shape is not None: maxval = max(shape) idx_dtype = self._get_index_dtype((indices, indptr), maxval=maxval, check_contents=True) if not copy: copy = copy_if_needed self.indices = np.array(indices, copy=copy, dtype=idx_dtype) self.indptr = np.array(indptr, copy=copy, dtype=idx_dtype) self.data = np.array(data, copy=copy, dtype=dtype) else: raise ValueError(f"unrecognized {self.format}_matrix " "constructor usage") else: # must be dense try: arg1 = np.asarray(arg1) except Exception as e: msg = f"unrecognized {self.format}_matrix constructor usage" raise ValueError(msg) from e coo = self._coo_container(arg1, dtype=dtype) arrays = coo._coo_to_compressed(self._swap) self.indptr, self.indices, self.data, self._shape = arrays # Read matrix dimensions given, if any if shape is not None: self._shape = check_shape(shape) else: if self.shape is None: # shape not already set, try to infer dimensions try: major_dim = len(self.indptr) - 1 minor_dim = self.indices.max() + 1 except Exception as e: raise ValueError('unable to infer matrix dimensions') from e else: self._shape = check_shape(self._swap((major_dim, minor_dim))) if dtype is not None: self.data = self.data.astype(dtype, copy=False) self.check_format(full_check=False)
(self, arg1, shape=None, dtype=None, copy=False)
724,587
scipy.sparse._base
__isub__
null
def __isub__(self, other): return NotImplemented
(self, other)
724,588
scipy.sparse._csc
__iter__
null
def __iter__(self): yield from self.tocsr()
(self)
724,589
scipy.sparse._data
__itruediv__
null
def __itruediv__(self, other): # self /= other if isscalarlike(other): recip = 1.0 / other self.data *= recip return self else: return NotImplemented
(self, other)
724,590
scipy.sparse._compressed
__le__
null
def __le__(self, other): return self._inequality(other, operator.le, '_le_', "Comparing a sparse matrix with a scalar " "greater than zero using <= is inefficient, " "try using > instead.")
(self, other)
724,591
scipy.sparse._base
__len__
null
def __len__(self): raise TypeError("sparse array length is ambiguous; use getnnz()" " or shape[0]")
(self)
724,592
scipy.sparse._compressed
__lt__
null
def __lt__(self, other): return self._inequality(other, operator.lt, '_lt_', "Comparing a sparse matrix with a scalar " "greater than zero using < is inefficient, " "try using >= instead.")
(self, other)