code
string
signature
string
docstring
string
loss_without_docstring
float64
loss_with_docstring
float64
factor
float64
contract_address = self.get_asset_address(asset) invoke_code = build_native_invoke_code(contract_address, b'\x00', 'decimals', bytearray()) tx = Transaction(0, 0xd1, int(time()), 0, 0, None, invoke_code, bytearray(), list()) response = self.__sdk.rpc.send_raw_transaction_pre_exec(tx) try: decimal = ContractDataParser.to_int(response['Result']) return decimal except SDKException: return 0
def query_decimals(self, asset: str) -> int
This interface is used to query the asset's decimals of ONT or ONG. :param asset: a string which is used to indicate which asset's decimals we want to get :return: asset's decimals in the form of int
7.052012
7.149536
0.986359
if not isinstance(b58_from_address, str) or not isinstance(b58_to_address, str) or not isinstance( b58_payer_address, str): raise SDKException(ErrorCode.param_err('the data type of base58 encode address should be the string.')) if len(b58_from_address) != 34 or len(b58_to_address) != 34 or len(b58_payer_address) != 34: raise SDKException(ErrorCode.param_err('the length of base58 encode address should be 34 bytes.')) if amount <= 0: raise SDKException(ErrorCode.other_error('the amount should be greater than than zero.')) if gas_price < 0: raise SDKException(ErrorCode.other_error('the gas price should be equal or greater than zero.')) if gas_limit < 0: raise SDKException(ErrorCode.other_error('the gas limit should be equal or greater than zero.')) contract_address = self.get_asset_address(asset) raw_from = Address.b58decode(b58_from_address).to_bytes() raw_to = Address.b58decode(b58_to_address).to_bytes() raw_payer = Address.b58decode(b58_payer_address).to_bytes() state = [{"from": raw_from, "to": raw_to, "amount": amount}] invoke_code = build_native_invoke_code(contract_address, b'\x00', "transfer", state) return Transaction(0, 0xd1, int(time()), gas_price, gas_limit, raw_payer, invoke_code, bytearray(), list())
def new_transfer_transaction(self, asset: str, b58_from_address: str, b58_to_address: str, amount: int, b58_payer_address: str, gas_limit: int, gas_price: int) -> Transaction
This interface is used to generate a Transaction object for transfer. :param asset: a string which is used to indicate which asset we want to transfer. :param b58_from_address: a base58 encode address which indicate where the asset from. :param b58_to_address: a base58 encode address which indicate where the asset to. :param amount: the amount of asset that will be transferred. :param b58_payer_address: a base58 encode address which indicate who will pay for the transaction. :param gas_limit: an int value that indicate the gas limit. :param gas_price: an int value that indicate the gas price. :return: a Transaction object which can be used for transfer.
2.354112
2.309725
1.019217
if not isinstance(b58_send_address, str) or not isinstance(b58_recv_address, str): raise SDKException(ErrorCode.param_err('the data type of base58 encode address should be the string.')) if len(b58_send_address) != 34 or len(b58_recv_address) != 34: raise SDKException(ErrorCode.param_err('the length of base58 encode address should be 34 bytes.')) if amount <= 0: raise SDKException(ErrorCode.other_error('the amount should be greater than than zero.')) if gas_price < 0: raise SDKException(ErrorCode.other_error('the gas price should be equal or greater than zero.')) if gas_limit < 0: raise SDKException(ErrorCode.other_error('the gas limit should be equal or greater than zero.')) contract_address = self.get_asset_address(asset) raw_send = Address.b58decode(b58_send_address).to_bytes() raw_recv = Address.b58decode(b58_recv_address).to_bytes() raw_payer = Address.b58decode(b58_payer_address).to_bytes() args = {"from": raw_send, "to": raw_recv, "amount": amount} invoke_code = build_native_invoke_code(contract_address, b'\x00', 'approve', args) return Transaction(0, 0xd1, int(time()), gas_price, gas_limit, raw_payer, invoke_code, bytearray(), list())
def new_approve_transaction(self, asset: str, b58_send_address: str, b58_recv_address: str, amount: int, b58_payer_address: str, gas_limit: int, gas_price: int) -> Transaction
This interface is used to generate a Transaction object for approve. :param asset: a string which is used to indicate which asset we want to approve. :param b58_send_address: a base58 encode address which indicate where the approve from. :param b58_recv_address: a base58 encode address which indicate where the approve to. :param amount: the amount of asset that will be approved. :param b58_payer_address: a base58 encode address which indicate who will pay for the transaction. :param gas_limit: an int value that indicate the gas limit. :param gas_price: an int value that indicate the gas price. :return: a Transaction object which can be used for approve.
2.463412
2.398915
1.026886
raw_sender = Address.b58decode(b58_send_address).to_bytes() raw_from = Address.b58decode(b58_from_address).to_bytes() raw_to = Address.b58decode(b58_recv_address).to_bytes() raw_payer = Address.b58decode(b58_payer_address).to_bytes() contract_address = self.get_asset_address(asset) args = {"sender": raw_sender, "from": raw_from, "to": raw_to, "amount": amount} invoke_code = build_native_invoke_code(contract_address, b'\x00', "transferFrom", args) return Transaction(0, 0xd1, int(time()), gas_price, gas_limit, raw_payer, invoke_code, bytearray(), list())
def new_transfer_from_transaction(self, asset: str, b58_send_address: str, b58_from_address: str, b58_recv_address: str, amount: int, b58_payer_address: str, gas_limit: int, gas_price: int) -> Transaction
This interface is used to generate a Transaction object that allow one account to transfer a amount of ONT or ONG Asset to another account, in the condition of the first account had been approved. :param asset: a string which is used to indicate which asset we want to transfer. :param b58_send_address: a base58 encode address which indicate where the asset from. :param b58_from_address: a base58 encode address which indicate where the asset from. :param b58_recv_address: a base58 encode address which indicate where the asset to. :param amount: the amount of asset that will be transferred. :param b58_payer_address: a base58 encode address which indicate who will pay for the transaction. :param gas_limit: an int value that indicate the gas limit. :param gas_price: an int value that indicate the gas price. :return: a Transaction object which allow one account to transfer a amount of asset to another account.
2.624908
2.779111
0.944514
if not isinstance(b58_claimer_address, str) or not isinstance(b58_recv_address, str) or not isinstance( b58_payer_address, str): raise SDKException(ErrorCode.param_err('the data type of base58 encode address should be the string.')) if len(b58_claimer_address) != 34 or len(b58_recv_address) != 34 or len(b58_payer_address) != 34: raise SDKException(ErrorCode.param_err('the length of base58 encode address should be 34 bytes.')) if amount <= 0: raise SDKException(ErrorCode.other_error('the amount should be greater than than zero.')) if gas_price < 0: raise SDKException(ErrorCode.other_error('the gas price should be equal or greater than zero.')) if gas_limit < 0: raise SDKException(ErrorCode.other_error('the gas limit should be equal or greater than zero.')) ont_contract_address = self.get_asset_address('ont') ong_contract_address = self.get_asset_address("ong") args = {"sender": Address.b58decode(b58_claimer_address).to_bytes(), "from": ont_contract_address, "to": Address.b58decode(b58_recv_address).to_bytes(), "value": amount} invoke_code = build_native_invoke_code(ong_contract_address, b'\x00', "transferFrom", args) payer_array = Address.b58decode(b58_payer_address).to_bytes() return Transaction(0, 0xd1, int(time()), gas_price, gas_limit, payer_array, invoke_code, bytearray(), list())
def new_withdraw_ong_transaction(self, b58_claimer_address: str, b58_recv_address: str, amount: int, b58_payer_address: str, gas_limit: int, gas_price: int) -> Transaction
This interface is used to generate a Transaction object that allow one account to withdraw an amount of ong and transfer them to receive address. :param b58_claimer_address: a base58 encode address which is used to indicate who is the claimer. :param b58_recv_address: a base58 encode address which is used to indicate who receive the claimed ong. :param amount: the amount of asset that will be claimed. :param b58_payer_address: a base58 encode address which indicate who will pay for the transaction. :param gas_limit: an int value that indicate the gas limit. :param gas_price: an int value that indicate the gas price. :return: a Transaction object which can be used for withdraw ong.
2.527525
2.436364
1.037417
tx = self.new_transfer_transaction(asset, from_acct.get_address_base58(), b58_to_address, amount, payer.get_address_base58(), gas_limit, gas_price) tx.sign_transaction(from_acct) if from_acct.get_address_base58() != payer.get_address_base58(): tx.add_sign_transaction(payer) return self.__sdk.get_network().send_raw_transaction(tx)
def transfer(self, asset: str, from_acct: Account, b58_to_address: str, amount: int, payer: Account, gas_limit: int, gas_price: int)
This interface is used to send a transfer transaction that only for ONT or ONG. :param asset: a string which is used to indicate which asset we want to transfer. :param from_acct: a Account object which indicate where the asset from. :param b58_to_address: a base58 encode address which indicate where the asset to. :param amount: the amount of asset that will be transferred. :param payer: a Account object which indicate who will pay for the transaction. :param gas_limit: an int value that indicate the gas limit. :param gas_price: an int value that indicate the gas price. :return: hexadecimal transaction hash value.
2.57514
2.542916
1.012672
if claimer is None: raise SDKException(ErrorCode.param_err('the claimer should not be None.')) if payer is None: raise SDKException(ErrorCode.param_err('the payer should not be None.')) if amount <= 0: raise SDKException(ErrorCode.other_error('the amount should be greater than than zero.')) if gas_price < 0: raise SDKException(ErrorCode.other_error('the gas price should be equal or greater than zero.')) if gas_limit < 0: raise SDKException(ErrorCode.other_error('the gas limit should be equal or greater than zero.')) b58_claimer = claimer.get_address_base58() b58_payer = payer.get_address_base58() tx = self.new_withdraw_ong_transaction(b58_claimer, b58_recv_address, amount, b58_payer, gas_limit, gas_price) tx.sign_transaction(claimer) if claimer.get_address_base58() != payer.get_address_base58(): tx.add_sign_transaction(payer) return self.__sdk.get_network().send_raw_transaction(tx)
def withdraw_ong(self, claimer: Account, b58_recv_address: str, amount: int, payer: Account, gas_limit: int, gas_price: int) -> str
This interface is used to withdraw a amount of ong and transfer them to receive address. :param claimer: the owner of ong that remained to claim. :param b58_recv_address: the address that received the ong. :param amount: the amount of ong want to claim. :param payer: an Account class that used to pay for the transaction. :param gas_limit: an int value that indicate the gas limit. :param gas_price: an int value that indicate the gas price. :return: hexadecimal transaction hash value.
2.189773
2.111279
1.037178
if sender is None: raise SDKException(ErrorCode.param_err('the sender should not be None.')) if payer is None: raise SDKException(ErrorCode.param_err('the payer should not be None.')) if amount <= 0: raise SDKException(ErrorCode.other_error('the amount should be greater than than zero.')) if gas_price < 0: raise SDKException(ErrorCode.other_error('the gas price should be equal or greater than zero.')) if gas_limit < 0: raise SDKException(ErrorCode.other_error('the gas limit should be equal or greater than zero.')) b58_sender_address = sender.get_address_base58() b58_payer_address = payer.get_address_base58() tx = self.new_approve_transaction(asset, b58_sender_address, b58_recv_address, amount, b58_payer_address, gas_limit, gas_price) tx.sign_transaction(sender) if sender.get_address_base58() != payer.get_address_base58(): tx.add_sign_transaction(payer) return self.__sdk.get_network().send_raw_transaction(tx)
def approve(self, asset, sender: Account, b58_recv_address: str, amount: int, payer: Account, gas_limit: int, gas_price: int) -> str
This is an interface used to send an approve transaction which allow receiver to spend a amount of ONT or ONG asset in sender's account. :param asset: a string which is used to indicate what asset we want to approve. :param sender: an Account class that send the approve transaction. :param b58_recv_address: a base58 encode address which indicate where the approve to. :param amount: the amount of asset want to approve. :param payer: an Account class that used to pay for the transaction. :param gas_limit: an int value that indicate the gas limit. :param gas_price: an int value that indicate the gas price. :return: hexadecimal transaction hash value.
2.228949
2.140023
1.041554
account = self.get_account_by_b58_address(address) if account is None: raise SDKException(ErrorCode.get_account_by_address_err) self.accounts.remove(account)
def remove_account(self, address: str)
This interface is used to remove account from WalletData. :param address: a string address.
5.972831
5.707196
1.046544
if index >= len(self.accounts): raise SDKException(ErrorCode.param_error) for acct in self.accounts: acct.is_default = False self.accounts[index].is_default = True self.default_account_address = self.accounts[index].b58_address
def set_default_account_by_index(self, index: int)
This interface is used to set default account by given index. :param index: an int value that indicate the account object in account list.
4.396629
4.380105
1.003772
flag = True index = -1 for acct in self.accounts: index += 1 if acct.b58_address == b58_address: flag = False break if flag: raise SDKException(ErrorCode.get_account_by_address_err) for i in range(len(self.accounts)): self.accounts[i].is_default = False self.accounts[index].is_default = True self.default_account_address = b58_address
def set_default_account_by_address(self, b58_address: str)
This interface is used to set default account by given base58 encode address. :param b58_address: a base58 encode address.
2.956906
3.046583
0.970565
identities_len = len(self.identities) if index >= identities_len: raise SDKException(ErrorCode.param_error) for i in range(identities_len): self.identities[i].is_default = False if i == index: self.identities[index].is_default = True
def set_default_identity_by_index(self, index: int)
This interface is used to set default account by given an index value. :param index: an int value that indicate the position of an account object in account list.
3.289131
3.20401
1.026567
self._email = email self._password = password response = requests.post(self._auth_url, json=dict(payload=dict(email=self._email, password=self._password, serviceId=4728))) body = response.json() if response.status_code == codes.ok: self._glb_id = body['glbId'] else: raise CartolaFCError(body['userMessage'])
def set_credentials(self, email, password)
Realiza a autenticação no sistema do CartolaFC utilizando o email e password informados. Args: email (str): O email do usuário password (str): A senha do usuário Raises: cartolafc.CartolaFCError: Se o conjunto (email, password) não conseguiu realizar a autenticação com sucesso.
5.020837
3.896981
1.288391
self._redis_url = redis_url self._redis_timeout = redis_timeout if isinstance(redis_timeout, int) and redis_timeout > 0 else 10 try: self._redis = redis.StrictRedis.from_url(url=redis_url) self._redis.ping() except (ConnectionError, TimeoutError): raise CartolaFCError('Erro conectando ao servidor Redis.')
def set_redis(self, redis_url, redis_timeout=10)
Realiza a autenticação no servidor Redis utilizando a URL informada. Args: redis_url (str): URL para conectar ao servidor Redis, exemplo: redis://user:password@localhost:6379/2. redis_timeout (int): O timeout padrão (em segundos). kwargs (dict): Raises: cartolafc.CartolaFCError: Se não for possível se conectar ao servidor Redis
3.149576
2.435571
1.293157
if not any((nome, slug)): raise CartolaFCError('Você precisa informar o nome ou o slug da liga que deseja obter') slug = slug if slug else convert_team_name_to_slug(nome) url = '{api_url}/auth/liga/{slug}'.format(api_url=self._api_url, slug=slug) data = self._request(url, params=dict(page=page, orderBy=order_by)) return Liga.from_dict(data, order_by)
def liga(self, nome=None, slug=None, page=1, order_by=CAMPEONATO)
Este serviço requer que a API esteja autenticada, e realiza uma busca pelo nome ou slug informados. Este serviço obtém apenas 20 times por página, portanto, caso sua liga possua mais que 20 membros, deve-se utilizar o argumento "page" para obter mais times. Args: nome (str): Nome da liga que se deseja obter. Requerido se o slug não for informado. slug (str): Slug do time que se deseja obter. *Este argumento tem prioridade sobre o nome* page (int): Página dos times que se deseja obter. order_by (str): É possível obter os times ordenados por "campeonato", "turno", "mes", "rodada" e "patrimonio". As constantes estão disponíveis em "cartolafc.CAMPEONATO", "cartolafc.TURNO" e assim sucessivamente. Returns: Um objeto representando a liga encontrada. Raises: CartolaFCError: Se a API não está autenticada ou se nenhuma liga foi encontrada com os dados recebidos.
3.938852
3.205547
1.228761
url = '{api_url}/ligas'.format(api_url=self._api_url) data = self._request(url, params=dict(q=query)) return [Liga.from_dict(liga_info) for liga_info in data]
def ligas(self, query)
Retorna o resultado da busca ao Cartola por um determinado termo de pesquisa. Args: query (str): Termo para utilizar na busca. Returns: Uma lista de instâncias de cartolafc.Liga, uma para cada liga contento o termo utilizado na busca.
3.354972
3.55032
0.944977
url = '{api_url}/mercado/status'.format(api_url=self._api_url) data = self._request(url) return Mercado.from_dict(data)
def mercado(self)
Obtém o status do mercado na rodada atual. Returns: Uma instância de cartolafc.Mercado representando o status do mercado na rodada atual.
4.655105
3.846346
1.210267
if self.mercado().status.id == MERCADO_FECHADO: url = '{api_url}/atletas/pontuados'.format(api_url=self._api_url) data = self._request(url) clubes = {clube['id']: Clube.from_dict(clube) for clube in data['clubes'].values()} return {int(atleta_id): Atleta.from_dict(atleta, clubes=clubes, atleta_id=int(atleta_id)) for atleta_id, atleta in data['atletas'].items() if atleta['clube_id'] > 0} raise CartolaFCError('As pontuações parciais só ficam disponíveis com o mercado fechado.')
def parciais(self)
Obtém um mapa com todos os atletas que já pontuaram na rodada atual (aberta). Returns: Uma mapa, onde a key é um inteiro representando o id do atleta e o valor é uma instância de cartolafc.Atleta Raises: CartolaFCError: Se o mercado atual estiver com o status fechado.
4.59139
3.303871
1.3897
if not any((id, nome, slug)): raise CartolaFCError('Você precisa informar o nome ou o slug do time que deseja obter') param = 'id' if id else 'slug' value = id if id else (slug if slug else convert_team_name_to_slug(nome)) url = '{api_url}/time/{param}/{value}'.format(api_url=self._api_url, param=param, value=value) data = self._request(url) if bool(as_json): return data clubes = {clube['id']: Clube.from_dict(clube) for clube in data['clubes'].values()} return Time.from_dict(data, clubes=clubes, capitao=data['capitao_id'])
def time(self, id=None, nome=None, slug=None, as_json=False)
Obtém um time específico, baseando-se no nome ou no slug utilizado. Ao menos um dos dois devem ser informado. Args: id (int): Id to time que se deseja obter. *Este argumento sempre será utilizado primeiro* nome (str): Nome do time que se deseja obter. Requerido se o slug não for informado. slug (str): Slug do time que se deseja obter. *Este argumento tem prioridade sobre o nome* as_json (bool): Se desejar obter o retorno no formato json. Returns: Uma instância de cartolafc.Time se o time foi encontrado. Raises: cartolafc.CartolaFCError: Se algum erro aconteceu, como por exemplo: Nenhum time foi encontrado.
3.469351
3.140211
1.104815
url = '{api_url}/times'.format(api_url=self._api_url) data = self._request(url, params=dict(q=query)) return [TimeInfo.from_dict(time_info) for time_info in data]
def times(self, query)
Retorna o resultado da busca ao Cartola por um determinado termo de pesquisa. Args: query (str): Termo para utilizar na busca. Returns: Uma lista de instâncias de cartolafc.TimeInfo, uma para cada time contento o termo utilizado na busca.
3.365404
3.537016
0.951481
if features is not None: features = set(features) dataframes = [] def parse_frame(s): if s == ".": return 0 else: return int(s) # GTF columns: # 1) seqname: str ("1", "X", "chrX", etc...) # 2) source : str # Different versions of GTF use second column as of: # (a) gene biotype # (b) transcript biotype # (c) the annotation source # See: https://www.biostars.org/p/120306/#120321 # 3) feature : str ("gene", "transcript", &c) # 4) start : int # 5) end : int # 6) score : float or "." # 7) strand : "+", "-", or "." # 8) frame : 0, 1, 2 or "." # 9) attribute : key-value pairs separated by semicolons # (see more complete description in docstring at top of file) chunk_iterator = pd.read_csv( filepath_or_buffer, sep="\t", comment="#", names=REQUIRED_COLUMNS, skipinitialspace=True, skip_blank_lines=True, error_bad_lines=True, warn_bad_lines=True, chunksize=chunksize, engine="c", dtype={ "start": np.int64, "end": np.int64, "score": np.float32, "seqname": str, }, na_values=".", converters={"frame": parse_frame}) dataframes = [] try: for df in chunk_iterator: for intern_column in intern_columns: df[intern_column] = [intern(str(s)) for s in df[intern_column]] # compare feature strings after interning if features is not None: df = df[df["feature"].isin(features)] for fix_quotes_column in fix_quotes_columns: # Catch mistaken semicolons by replacing "xyz;" with "xyz" # Required to do this since the Ensembl GTF for Ensembl # release 78 has mistakes such as: # gene_name = "PRAMEF6;" transcript_name = "PRAMEF6;-201" df[fix_quotes_column] = [ s.replace(';\"', '\"').replace(";-", "-") for s in df[fix_quotes_column] ] dataframes.append(df) except Exception as e: raise ParsingError(str(e)) df = pd.concat(dataframes) return df
def parse_gtf( filepath_or_buffer, chunksize=1024 * 1024, features=None, intern_columns=["seqname", "source", "strand", "frame"], fix_quotes_columns=["attribute"])
Parameters ---------- filepath_or_buffer : str or buffer object chunksize : int features : set or None Drop entries which aren't one of these features intern_columns : list These columns are short strings which should be interned fix_quotes_columns : list Most commonly the 'attribute' column which had broken quotes on some Ensembl release GTF files.
3.412522
3.396935
1.004589
result = parse_gtf( filepath_or_buffer, chunksize=chunksize, features=features) attribute_values = result["attribute"] del result["attribute"] for column_name, values in expand_attribute_strings( attribute_values, usecols=restrict_attribute_columns).items(): result[column_name] = values return result
def parse_gtf_and_expand_attributes( filepath_or_buffer, chunksize=1024 * 1024, restrict_attribute_columns=None, features=None)
Parse lines into column->values dictionary and then expand the 'attribute' column into multiple columns. This expansion happens by replacing strings of semi-colon separated key-value values in the 'attribute' column with one column per distinct key, with a list of values for each row (using None for rows where key didn't occur). Parameters ---------- filepath_or_buffer : str or buffer object chunksize : int restrict_attribute_columns : list/set of str or None If given, then only usese attribute columns. features : set or None Ignore entries which don't correspond to one of the supplied features
3.00403
3.517185
0.854101
if isinstance(filepath_or_buffer, string_types) and not exists(filepath_or_buffer): raise ValueError("GTF file does not exist: %s" % filepath_or_buffer) if expand_attribute_column: result_df = parse_gtf_and_expand_attributes( filepath_or_buffer, chunksize=chunksize, restrict_attribute_columns=usecols) else: result_df = parse_gtf(result_df, features=features) for column_name, column_type in list(column_converters.items()): result_df[column_name] = [ column_type(string_value) if len(string_value) > 0 else None for string_value in result_df[column_name] ] # Hackishly infer whether the values in the 'source' column of this GTF # are actually representing a biotype by checking for the most common # gene_biotype and transcript_biotype value 'protein_coding' if infer_biotype_column: unique_source_values = set(result_df["source"]) if "protein_coding" in unique_source_values: column_names = set(result_df.columns) # Disambiguate between the two biotypes by checking if # gene_biotype is already present in another column. If it is, # the 2nd column is the transcript_biotype (otherwise, it's the # gene_biotype) if "gene_biotype" not in column_names: logging.info("Using column 'source' to replace missing 'gene_biotype'") result_df["gene_biotype"] = result_df["source"] if "transcript_biotype" not in column_names: logging.info("Using column 'source' to replace missing 'transcript_biotype'") result_df["transcript_biotype"] = result_df["source"] if usecols is not None: column_names = set(result_df.columns) valid_columns = [c for c in usecols if c in column_names] result_df = result_df[valid_columns] return result_df
def read_gtf( filepath_or_buffer, expand_attribute_column=True, infer_biotype_column=False, column_converters={}, usecols=None, features=None, chunksize=1024 * 1024)
Parse a GTF into a dictionary mapping column names to sequences of values. Parameters ---------- filepath_or_buffer : str or buffer object Path to GTF file (may be gzip compressed) or buffer object such as StringIO expand_attribute_column : bool Replace strings of semi-colon separated key-value values in the 'attribute' column with one column per distinct key, with a list of values for each row (using None for rows where key didn't occur). infer_biotype_column : bool Due to the annoying ambiguity of the second GTF column across multiple Ensembl releases, figure out if an older GTF's source column is actually the gene_biotype or transcript_biotype. column_converters : dict, optional Dictionary mapping column names to conversion functions. Will replace empty strings with None and otherwise passes them to given conversion function. usecols : list of str or None Restrict which columns are loaded to the give set. If None, then load all columns. features : set of str or None Drop rows which aren't one of the features in the supplied set chunksize : int
2.660694
2.63146
1.011109
n = len(attribute_strings) extra_columns = {} column_order = [] # # SOME NOTES ABOUT THE BIZARRE STRING INTERNING GOING ON BELOW # # While parsing millions of repeated strings (e.g. "gene_id" and "TP53"), # we can save a lot of memory by making sure there's only one string # object per unique string. The canonical way to do this is using # the 'intern' function. One problem is that Py2 won't let you intern # unicode objects, so to get around this we call intern(str(...)). # # It also turns out to be faster to check interned strings ourselves # using a local dictionary, hence the two dictionaries below # and pair of try/except blocks in the loop. column_interned_strings = {} value_interned_strings = {} for (i, attribute_string) in enumerate(attribute_strings): for kv in attribute_string.split(";"): # We're slicing the first two elements out of split() because # Ensembl release 79 added values like: # transcript_support_level "1 (assigned to previous version 5)"; # ...which gets mangled by splitting on spaces. parts = kv.strip().split(" ", 2)[:2] if len(parts) != 2: continue column_name, value = parts try: column_name = column_interned_strings[column_name] except KeyError: column_name = intern(str(column_name)) column_interned_strings[column_name] = column_name if usecols is not None and column_name not in usecols: continue try: column = extra_columns[column_name] except KeyError: column = [missing_value] * n extra_columns[column_name] = column column_order.append(column_name) value = value.replace(quote_char, "") if value.startswith(quote_char) else value try: value = value_interned_strings[value] except KeyError: value = intern(str(value)) value_interned_strings[value] = value # if an attribute is used repeatedly then # keep track of all its values in a list old_value = column[i] if old_value is missing_value: column[i] = value else: column[i] = "%s,%s" % (old_value, value) logging.info("Extracted GTF attributes: %s" % column_order) return OrderedDict( (column_name, extra_columns[column_name]) for column_name in column_order)
def expand_attribute_strings( attribute_strings, quote_char='\"', missing_value="", usecols=None)
The last column of GTF has a variable number of key value pairs of the format: "key1 value1; key2 value2;" Parse these into a dictionary mapping each key onto a list of values, where the value is None for any row where the key was missing. Parameters ---------- attribute_strings : list of str quote_char : str Quote character to remove from values missing_value : any If an attribute is missing from a row, give it this value. usecols : list of str or None If not None, then only expand columns included in this set, otherwise use all columns. Returns OrderedDict of column->value list mappings, in the order they appeared in the attribute strings.
4.284105
4.204438
1.018948
extra_dataframes = [] existing_features = set(dataframe["feature"]) existing_columns = set(dataframe.keys()) for (feature_name, groupby_key) in unique_keys.items(): if feature_name in existing_features: logging.info( "Feature '%s' already exists in GTF data" % feature_name) continue logging.info("Creating rows for missing feature '%s'" % feature_name) # don't include rows where the groupby key was missing empty_key_values = dataframe[groupby_key].map( lambda x: x == "" or x is None) row_groups = dataframe[~empty_key_values].groupby(groupby_key) # Each group corresponds to a unique feature entry for which the # other columns may or may not be uniquely defined. Start off by # assuming the values for every column are missing and fill them in # where possible. feature_values = OrderedDict([ (column_name, [missing_value] * row_groups.ngroups) for column_name in dataframe.keys() ]) # User specifies which non-required columns should we try to infer # values for feature_columns = list(extra_columns.get(feature_name, [])) for i, (feature_id, group) in enumerate(row_groups): # fill in the required columns by assuming that this feature # is the union of all intervals of other features that were # tagged with its unique ID (e.g. union of exons which had a # particular gene_id). feature_values["feature"][i] = feature_name feature_values[groupby_key][i] = feature_id # set the source to 'gtfparse' to indicate that we made this # entry up from other data feature_values["source"][i] = "gtfparse" feature_values["start"][i] = group["start"].min() feature_values["end"][i] = group["end"].max() # assume that seqname and strand are the same for all other # entries in the GTF which shared this unique ID feature_values["seqname"][i] = group["seqname"].iat[0] feature_values["strand"][i] = group["strand"].iat[0] # there's probably no rigorous way to set the values of # 'score' or 'frame' columns so leave them empty for column_name in feature_columns: if column_name not in existing_columns: raise ValueError( "Column '%s' does not exist in GTF, columns = %s" % ( column_name, existing_columns)) # expect that all entries related to a reconstructed feature # are related and are thus within the same interval of # positions on the same chromosome unique_values = group[column_name].dropna().unique() if len(unique_values) == 1: feature_values[column_name][i] = unique_values[0] extra_dataframes.append(pd.DataFrame(feature_values)) return pd.concat([dataframe] + extra_dataframes, ignore_index=True)
def create_missing_features( dataframe, unique_keys={}, extra_columns={}, missing_value=None)
Helper function used to construct a missing feature such as 'transcript' or 'gene'. Some GTF files only have 'exon' and 'CDS' entries, but have transcript_id and gene_id annotations which allow us to construct those missing features. Parameters ---------- dataframe : pandas.DataFrame Should contain at least the core GTF columns, such as "seqname", "start", and "end" unique_keys : dict Mapping from feature names to the name of the column which should act as a unique key for that feature. Example: {"gene": "gene_id"} extra_columns : dict By default the constructed feature row will include only the 8 core columns and its unique key. Any other columns that should be included should be associated with the feature name in this dict. missing_value : any Which value to fill in for columns that we don't infer values for. Returns original dataframe along with all extra rows created for missing features.
4.078588
3.838471
1.062555
index_local_timestamp = self.get_index_local_timestamp(key) real_local_timestamp = self.get_real_local_timestamp(key) remote_timestamp = self.get_remote_timestamp(key) return get_sync_state( index_local_timestamp, real_local_timestamp, remote_timestamp )
def get_action(self, key)
returns the action to perform on this key based on its state before the last sync.
3.394085
3.088263
1.099027
logger.debug("Locking %s", self.lock_file) if not os.path.exists(self.lock_file): self.ensure_path(self.lock_file) with open(self.lock_file, "w"): os.utime(self.lock_file) self._lock.acquire(timeout=timeout)
def lock(self, timeout=10)
Advisory lock. Use to ensure that only one LocalSyncClient is working on the Target at the same time.
2.552202
2.508473
1.017433
logger.debug("Releasing lock %s", self.lock_file) self._lock.release() try: os.unlink(self.lock_file) except FileNotFoundError: pass
def unlock(self)
Unlock the active advisory lock.
3.494143
3.605263
0.969178
while True: if secret: value = getpass.getpass(*args, **kwargs) else: value = input(*args, **kwargs) if blank: value = value if value else None if not required or value: break return value
def get_input(*args, secret=False, required=False, blank=False, **kwargs)
secret: Don't show user input when they are typing. required: Keep prompting if the user enters an empty value. blank: turn all empty strings into None.
2.481326
2.239957
1.107756
@wraps(method) def wrapper(self, *args, **kwargs): notproxied = _oga(self, "__notproxied__") _osa(self, "__notproxied__", True) try: return method(self, *args, **kwargs) finally: _osa(self, "__notproxied__", notproxied) return wrapper
def _no_proxy(method)
Returns a wrapped version of `method`, such that proxying is turned off during the method call.
3.190476
3.220678
0.990623
if attr in type(self).__notproxied__: return False if _oga(self, "__notproxied__") is True: return False return True
def _should_proxy(self, attr)
Determines whether `attr` should be looked up on the proxied object, or the proxy itself.
9.042567
8.262412
1.094422
@wraps(func) def proxied(self, *args, **kwargs): args = list(args) args.insert(arg_pos, self.__subject__) result = func(*args, **kwargs) return result setattr(cls, name, proxied)
def add_proxy_meth(cls, name, func, arg_pos=0)
Add a method `name` to the class, which returns the value of `func`, called with the proxied value inserted at `arg_pos`
2.561454
2.51135
1.019951
if loader is None: loader = functools.partial(jsonloader, **kwargs) return JsonRef.replace_refs( json.load(fp, **kwargs), base_uri=base_uri, loader=loader, jsonschema=jsonschema, load_on_repr=load_on_repr, )
def load(fp, base_uri="", loader=None, jsonschema=False, load_on_repr=True, **kwargs)
Drop in replacement for :func:`json.load`, where JSON references are proxied to their referent data. :param fp: File-like object containing JSON document :param kwargs: This function takes any of the keyword arguments from :meth:`JsonRef.replace_refs`. Any other keyword arguments will be passed to :func:`json.load`
2.262257
2.198574
1.028966
if loader is None: loader = functools.partial(jsonloader, **kwargs) return JsonRef.replace_refs( json.loads(s, **kwargs), base_uri=base_uri, loader=loader, jsonschema=jsonschema, load_on_repr=load_on_repr, )
def loads(s, base_uri="", loader=None, jsonschema=False, load_on_repr=True, **kwargs)
Drop in replacement for :func:`json.loads`, where JSON references are proxied to their referent data. :param s: String containing JSON document :param kwargs: This function takes any of the keyword arguments from :meth:`JsonRef.replace_refs`. Any other keyword arguments will be passed to :func:`json.loads`
2.30775
2.234097
1.032968
if loader is None: loader = jsonloader if base_uri is None: base_uri = uri return JsonRef.replace_refs( loader(uri), base_uri=base_uri, loader=loader, jsonschema=jsonschema, load_on_repr=load_on_repr, )
def load_uri(uri, base_uri=None, loader=None, jsonschema=False, load_on_repr=True)
Load JSON data from ``uri`` with JSON references proxied to their referent data. :param uri: URI to fetch the JSON from :param kwargs: This function takes any of the keyword arguments from :meth:`JsonRef.replace_refs`
2.21825
2.157989
1.027925
kwargs["cls"] = _ref_encoder_factory(kwargs.get("cls", json.JSONEncoder)) return json.dumps(obj, **kwargs)
def dumps(obj, **kwargs)
Serialize `obj`, which may contain :class:`JsonRef` objects, to a JSON formatted string. `JsonRef` objects will be dumped as the original reference object they were created from. :param obj: Object to serialize :param kwargs: Keyword arguments are the same as to :func:`json.dumps`
4.425792
5.519018
0.801917
store = kwargs.setdefault("_store", _URIDict()) base_uri, frag = urlparse.urldefrag(kwargs.get("base_uri", "")) store_uri = None # If this does not get set, we won't store the result if not frag and not _recursive: store_uri = base_uri try: if kwargs.get("jsonschema") and isinstance(obj["id"], basestring): kwargs["base_uri"] = urlparse.urljoin( kwargs.get("base_uri", ""), obj["id"] ) store_uri = kwargs["base_uri"] except (TypeError, LookupError): pass try: if not isinstance(obj["$ref"], basestring): raise TypeError except (TypeError, LookupError): pass else: return cls(obj, **kwargs) # If our obj was not a json reference object, iterate through it, # replacing children with JsonRefs kwargs["_recursive"] = True path = list(kwargs.pop("_path", ())) if isinstance(obj, Mapping): obj = type(obj)( (k, cls.replace_refs(v, _path=path + [k], **kwargs)) for k, v in iteritems(obj) ) elif isinstance(obj, Sequence) and not isinstance(obj, basestring): obj = type(obj)( cls.replace_refs(v, _path=path + [i], **kwargs) for i, v in enumerate(obj) ) if store_uri is not None: store[store_uri] = obj return obj
def replace_refs(cls, obj, _recursive=False, **kwargs)
Returns a deep copy of `obj` with all contained JSON reference objects replaced with :class:`JsonRef` instances. :param obj: If this is a JSON reference object, a :class:`JsonRef` instance will be created. If `obj` is not a JSON reference object, a deep copy of it will be created with all contained JSON reference objects replaced by :class:`JsonRef` instances :param base_uri: URI to resolve relative references against :param loader: Callable that takes a URI and returns the parsed JSON (defaults to global ``jsonloader``, a :class:`JsonLoader` instance) :param jsonschema: Flag to turn on `JSON Schema mode <http://json-schema.org/latest/json-schema-core.html#anchor25>`_. 'id' keyword changes the `base_uri` for references contained within the object :param load_on_repr: If set to ``False``, :func:`repr` call on a :class:`JsonRef` object will not cause the reference to be loaded if it hasn't already. (defaults to ``True``)
3.169163
2.943108
1.076808
# Do only split at single forward slashes which are not prefixed by a caret parts = re.split(r"(?<!\^)/", unquote(pointer.lstrip("/"))) if pointer else [] for part in parts: # Restore escaped slashes and carets replacements = {r"^/": r"/", r"^^": r"^"} part = re.sub( "|".join(re.escape(key) for key in replacements.keys()), lambda k: replacements[k.group(0)], part, ) if isinstance(document, Sequence): # Try to turn an array index to an int try: part = int(part) except ValueError: pass try: document = document[part] except (TypeError, LookupError) as e: self._error("Unresolvable JSON pointer: %r" % pointer, cause=e) return document
def resolve_pointer(self, document, pointer)
Resolve a json pointer ``pointer`` within the referenced ``document``. :argument document: the referent document :argument str pointer: a json pointer URI fragment to resolve within it
4.597179
4.355319
1.055532
if not callable(fp.write): raise TypeError('fp.write not callable') fp_write = fp.write __encode_value(fp_write, obj, {}, container_count, sort_keys, no_float32, default)
def dump(obj, fp, container_count=False, sort_keys=False, no_float32=True, default=None)
Writes the given object as UBJSON to the provided file-like object Args: obj: The object to encode fp: write([size])-able object container_count (bool): Specify length for container types (including for empty ones). This can aid decoding speed depending on implementation but requires a bit more space and encoding speed could be reduced if getting length of any of the containers is expensive. sort_keys (bool): Sort keys of mappings no_float32 (bool): Never use float32 to store float numbers (other than for zero). Disabling this might save space at the loss of precision. default (callable): Called for objects which cannot be serialised. Should return a UBJSON-encodable version of the object or raise an EncoderException. Raises: EncoderException: If an encoding failure occured. The following Python types and interfaces (ABCs) are supported (as are any subclasses): +------------------------------+-----------------------------------+ | Python | UBJSON | +==============================+===================================+ | (3) str | string | | (2) unicode | | +------------------------------+-----------------------------------+ | None | null | +------------------------------+-----------------------------------+ | bool | true, false | +------------------------------+-----------------------------------+ | (3) int | uint8, int8, int16, int32, int64, | | (2) int, long | high_precision | +------------------------------+-----------------------------------+ | float | float32, float64, high_precision | +------------------------------+-----------------------------------+ | Decimal | high_precision | +------------------------------+-----------------------------------+ | (3) bytes, bytearray | array (type, uint8) | | (2) str | array (type, uint8) | +------------------------------+-----------------------------------+ | (3) collections.abc.Mapping | object | | (2) collections.Mapping | | +------------------------------+-----------------------------------+ | (3) collections.abc.Sequence | array | | (2) collections.Sequence | | +------------------------------+-----------------------------------+ Notes: - Items are resolved in the order of this table, e.g. if the item implements both Mapping and Sequence interfaces, it will be encoded as a mapping. - None and bool do not use an isinstance check - Numbers in brackets denote Python version. - Only unicode strings in Python 2 are encoded as strings, i.e. for compatibility with e.g. Python 3 one MUST NOT use str in Python 2 (as that will be interpreted as a byte array). - Mapping keys have to be strings: str for Python3 and unicode or str in Python 2. - float conversion rules (depending on no_float32 setting): float32: 1.18e-38 <= abs(value) <= 3.4e38 or value == 0 float64: 2.23e-308 <= abs(value) < 1.8e308 For other values Decimal is used.
4.625608
6.322896
0.731565
with BytesIO() as fp: dump(obj, fp, container_count=container_count, sort_keys=sort_keys, no_float32=no_float32, default=default) return fp.getvalue()
def dumpb(obj, container_count=False, sort_keys=False, no_float32=True, default=None)
Returns the given object as UBJSON in a bytes instance. See dump() for available arguments.
1.695409
1.937519
0.875041
if version is not LATEST: return version resp = urlopen('https://pypi.python.org/pypi/setuptools/json') with contextlib.closing(resp): try: charset = resp.info().get_content_charset() except Exception: # Python 2 compat; assume UTF-8 charset = 'UTF-8' reader = codecs.getreader(charset) doc = json.load(reader(resp)) return str(doc['info']['version'])
def _resolve_version(version)
Resolve LATEST version
3.727931
3.481554
1.070767
if object_pairs_hook is None and object_hook is None: object_hook = __object_hook_noop if not callable(fp.read): raise TypeError('fp.read not callable') fp_read = fp.read marker = fp_read(1) try: try: return __METHOD_MAP[marker](fp_read, marker) except KeyError: pass if marker == ARRAY_START: return __decode_array(fp_read, bool(no_bytes), object_hook, object_pairs_hook, intern_object_keys) elif marker == OBJECT_START: return __decode_object(fp_read, bool(no_bytes), object_hook, object_pairs_hook, intern_object_keys) else: raise DecoderException('Invalid marker') except DecoderException as ex: raise_from(DecoderException(ex.args[0], fp), ex)
def load(fp, no_bytes=False, object_hook=None, object_pairs_hook=None, intern_object_keys=False)
Decodes and returns UBJSON from the given file-like object Args: fp: read([size])-able object no_bytes (bool): If set, typed UBJSON arrays (uint8) will not be converted to a bytes instance and instead treated like any other array (i.e. result in a list). object_hook (callable): Called with the result of any object literal decoded (instead of dict). object_pairs_hook (callable): Called with the result of any object literal decoded with an ordered list of pairs (instead of dict). Takes precedence over object_hook. intern_object_keys (bool): If set, object keys are interned which can provide a memory saving when many repeated keys are used. NOTE: This is not supported in Python2 (since interning does not apply to unicode) and wil be ignored. Returns: Decoded object Raises: DecoderException: If an encoding failure occured. UBJSON types are mapped to Python types as follows. Numbers in brackets denote Python version. +----------------------------------+---------------+ | UBJSON | Python | +==================================+===============+ | object | dict | +----------------------------------+---------------+ | array | list | +----------------------------------+---------------+ | string | (3) str | | | (2) unicode | +----------------------------------+---------------+ | uint8, int8, int16, int32, int64 | (3) int | | | (2) int, long | +----------------------------------+---------------+ | float32, float64 | float | +----------------------------------+---------------+ | high_precision | Decimal | +----------------------------------+---------------+ | array (typed, uint8) | (3) bytes | | | (2) str | +----------------------------------+---------------+ | true | True | +----------------------------------+---------------+ | false | False | +----------------------------------+---------------+ | null | None | +----------------------------------+---------------+
2.712582
2.803823
0.967458
with BytesIO(chars) as fp: return load(fp, no_bytes=no_bytes, object_hook=object_hook, object_pairs_hook=object_pairs_hook, intern_object_keys=intern_object_keys)
def loadb(chars, no_bytes=False, object_hook=None, object_pairs_hook=None, intern_object_keys=False)
Decodes and returns UBJSON from the given bytes or bytesarray object. See load() for available arguments.
1.80304
2.148424
0.839238
# start off with STATUS_SUCCESS as a baseline status = NtStatusCodes.STATUS_SUCCESS error_code = self['errorCode'] if error_code.isValue: # ASN.1 Integer is stored as an signed integer, we need to # convert it to a unsigned integer status = ctypes.c_uint32(error_code).value if status != NtStatusCodes.STATUS_SUCCESS: raise NTStatusException(status)
def check_error_code(self)
For CredSSP version of 3 or newer, the server can response with an NtStatus error code with details of what error occurred. This method will check if the error code exists and throws an NTStatusException if it is no STATUS_SUCCESS.
7.125947
5.572227
1.278833
ntlm_oid = GSSAPIContext._AUTH_MECHANISMS['ntlm'] ntlm_mech = gssapi.OID.from_int_seq(ntlm_oid) # GSS_NTLMSSP_RESET_CRYPTO_OID_LENGTH # github.com/simo5/gss-ntlmssp/blob/master/src/gssapi_ntlmssp.h#L68 reset_mech = gssapi.OID.from_int_seq("1.3.6.1.4.1.7165.655.1.3") try: # we don't actually care about the account used here so just use # a random username and password ntlm_context = GSSAPIContext._get_security_context( gssapi.NameType.user, ntlm_mech, "http@server", "username", "password" ) ntlm_context.step() set_sec_context_option(reset_mech, context=ntlm_context, value=b"\x00" * 4) except gssapi.exceptions.GSSError as exc: # failed to init NTLM and verify gss-ntlmssp is available, this # means NTLM is either not available or won't work # (not gss-ntlmssp) so we return kerberos as the only available # mechanism for the GSSAPI Context log.debug("Failed to init test NTLM context with GSSAPI: %s" % str(exc)) return ['kerberos'] else: return ['auto', 'kerberos', 'ntlm']
def get_mechs_available()
Returns a list of auth mechanisms that are available to the local GSSAPI instance. Because we are interacting with Windows, we only care if SPNEGO, Kerberos and NTLM are available where NTLM is the only wildcard that may not be available by default. The only NTLM implementation that works properly is gss-ntlmssp and part of this test is to verify the gss-ntlmssp OID GSS_NTLMSSP_RESET_CRYPTO_OID_LENGTH is implemented which is required for SPNEGO and NTLM to work properly. :return: list - A list of supported mechs available in the installed version of GSSAPI
5.404276
4.366079
1.237787
ts_request = TSRequest() if auth_token is not None: nego_token = NegoToken() nego_token['negoToken'] = auth_token ts_request['negoTokens'].append(nego_token) if nonce is not None: ts_request['clientNonce'] = nonce hash_input = b"CredSSP Client-To-Server Binding Hash\x00" + \ nonce + public_key pub_value = hashlib.sha256(hash_input).digest() else: pub_value = public_key enc_public_key = context.wrap(pub_value) ts_request['pubKeyAuth'] = enc_public_key return encoder.encode(ts_request)
def _build_pub_key_auth(self, context, nonce, auth_token, public_key)
[MS-CSSP] 3.1.5 Processing Events and Sequencing Rules - Step 3 https://msdn.microsoft.com/en-us/library/cc226791.aspx This step sends the final SPNEGO token to the server if required and computes the value for the pubKeyAuth field for the protocol version negotiated. The format of the pubKeyAuth field depends on the version that the server supports. For version 2 to 4: The pubKeyAuth field is just wrapped using the authenticated context For versions 5 to 6: The pubKeyAuth is a sha256 hash of the server's public key plus a nonce and a magic string value. This hash is wrapped using the authenticated context and the nonce is added to the TSRequest alongside the nonce used in the hash calcs. :param context: The authenticated context :param nonce: If versions 5+, the nonce to use in the hash :param auth_token: If NTLM, this is the last msg (authenticate msg) to send in the same request :param public_key: The server's public key :return: The TSRequest as a byte string to send to the server
4.188164
3.760977
1.113584
if nonce is not None: hash_input = b"CredSSP Server-To-Client Binding Hash\x00" + nonce \ + public_key actual = hashlib.sha256(hash_input).digest() expected = server_key else: first_byte = struct.unpack("B", server_key[0:1])[0] actual_first_byte = struct.pack("B", first_byte - 1) actual = actual_first_byte + server_key[1:] expected = public_key if actual != expected: raise AuthenticationException("Could not verify key sent from the " "server, potential man in the " "middle attack")
def _verify_public_keys(self, nonce, server_key, public_key)
[MS-CSSP] 3.1.5 Processing Events and Sequencing Rules - Step 4 https://msdn.microsoft.com/en-us/library/cc226791.aspx The rules vary depending on the server version For version 2 to 4: After the server received the public key in Step 3 it verifies the key with what was in the handshake. After the verification it then adds 1 to the first byte representing the public key and encrypts the bytes result by using the authentication protocol's encryption services. This method does the opposite where it will decrypt the public key returned from the server and subtract the first byte by 1 to compare with the public key we sent originally. For versions 5 to 6: A hash is calculated with the magic string value, the nonce that was sent to the server and the public key that was used. This is verified against the returned server public key. :param nonce: If version 5+, the nonce used in the hash calculations :param server_key: The unwrapped value returned in the TSRequest['pubKeyAuth'] field. :param public_key: The actual public key of the server
4.654635
4.457897
1.044133
ts_password = TSPasswordCreds() ts_password['domainName'] = context.domain.encode('utf-16-le') ts_password['userName'] = context.username.encode('utf-16-le') ts_password['password'] = context.password.encode('utf-16-le') ts_credentials = TSCredentials() ts_credentials['credType'] = ts_password.CRED_TYPE ts_credentials['credentials'] = encoder.encode(ts_password) ts_request = TSRequest() enc_credentials = context.wrap(encoder.encode(ts_credentials)) ts_request['authInfo'] = enc_credentials return encoder.encode(ts_request)
def _get_encrypted_credentials(self, context)
[MS-CSSP] 3.1.5 Processing Events and Sequencing Rules - Step 5 https://msdn.microsoft.com/en-us/library/cc226791.aspx After the client has verified the server's authenticity, it encrypts the user's credentials with the authentication protocol's encryption services. The resulting value is encapsulated in the authInfo field of the TSRequest structure and sent over the encrypted TLS channel to the server :param context: The authenticated security context :return: The encrypted TSRequest that contains the user's credentials
3.087108
2.74857
1.123169
length = self.tls_connection.send(data) encrypted_data = b'' counter = 0 while True: try: encrypted_chunk = \ self.tls_connection.bio_read(self.BIO_BUFFER_SIZE) except SSL.WantReadError: break encrypted_data += encrypted_chunk # in case of a borked TLS connection, break the loop if the current # buffer counter is > the length of the original message plus the # the size of the buffer (to be careful) counter += self.BIO_BUFFER_SIZE if counter > length + self.BIO_BUFFER_SIZE: break return encrypted_data
def wrap(self, data)
Encrypts the data in preparation for sending to the server. The data is encrypted using the TLS channel negotiated between the client and the server. :param data: a byte string of data to encrypt :return: a byte string of the encrypted data
5.21756
4.95497
1.052995
length = self.tls_connection.bio_write(encrypted_data) data = b'' counter = 0 while True: try: data_chunk = self.tls_connection.recv(self.BIO_BUFFER_SIZE) except SSL.WantReadError: break data += data_chunk counter += self.BIO_BUFFER_SIZE if counter > length: break return data
def unwrap(self, encrypted_data)
Decrypts the data send by the server using the TLS channel negotiated between the client and the server. :param encrypted_data: the byte string of the encrypted data :return: a byte string of the decrypted data
3.608464
3.621104
0.996509
public_key = cert.get_pubkey() cryptographic_key = public_key.to_cryptography_key() subject_public_key = cryptographic_key.public_bytes(Encoding.DER, PublicFormat.PKCS1) return subject_public_key
def _get_subject_public_key(cert)
Returns the SubjectPublicKey asn.1 field of the SubjectPublicKeyInfo field of the server's certificate. This is used in the server verification steps to thwart MitM attacks. :param cert: X509 certificate from pyOpenSSL .get_peer_certificate() :return: byte string of the asn.1 DER encoded SubjectPublicKey field
2.819957
2.959628
0.952808
# NOTE: Prevents QueryInterface error caused by getting a URL # while switched to an iframe self.switch_to_default_content() self.get(_KindleCloudReaderBrowser._CLOUD_READER_URL) if self.title == u'Problem loading page': raise ConnectionError # Wait for either the login page or the reader to load login_or_reader_loaded = lambda br: ( br.find_elements_by_id('amzn_kcr') or br.find_elements_by_id('KindleLibraryIFrame')) self._wait(5).until(login_or_reader_loaded) try: self._wait(5).until(lambda br: br.title == u'Amazon.com Sign In') except TimeoutException: raise BrowserError('Failed to load Kindle Cloud Reader.') else: self._login()
def _to_reader_home(self)
Navigate to the Cloud Reader library page. Raises: BrowserError: If the KCR homepage could not be loaded. ConnectionError: If there was a connection error.
8.729772
7.145676
1.221686
if not self.current_url.startswith(_KindleCloudReaderBrowser._SIGNIN_URL): raise BrowserError( 'Current url "%s" is not a signin url ("%s")' % (self.current_url, _KindleCloudReaderBrowser._SIGNIN_URL)) email_field_loaded = lambda br: br.find_elements_by_id('ap_email') self._wait().until(email_field_loaded) tries = 0 while tries < max_tries: # Enter the username email_elem = self.find_element_by_id('ap_email') email_elem.clear() email_elem.send_keys(self._uname) # Enter the password pword_elem = self.find_element_by_id('ap_password') pword_elem.clear() pword_elem.send_keys(self._pword) def creds_entered(_): email_ok = email_elem.get_attribute('value') == self._uname pword_ok = pword_elem.get_attribute('value') == self._pword return email_ok and pword_ok kcr_page_loaded = lambda br: br.title == u'Kindle Cloud Reader' try: self._wait(5).until(creds_entered) self.find_element_by_id('signInSubmit-input').click() self._wait(5).until(kcr_page_loaded) except TimeoutException: tries += 1 else: return raise LoginError
def _login(self, max_tries=2)
Logs in to Kindle Cloud Reader. Args: max_tries: The maximum number of login attempts that will be made. Raises: BrowserError: If method called when browser not at a signin URL. LoginError: If login unsuccessful after `max_tries` attempts.
2.948237
2.537083
1.162058
reader_frame = 'KindleReaderIFrame' frame_loaded = lambda br: br.find_elements_by_id(reader_frame) self._wait().until(frame_loaded) self.switch_to.frame(reader_frame) # pylint: disable=no-member reader_loaded = lambda br: br.find_elements_by_id('kindleReader_header') self._wait().until(reader_loaded)
def _to_reader_frame(self)
Navigate to the KindleReader iframe.
5.683253
3.687579
1.541188
# Wait for the Module Manager to load mod_mgr_script = ur"return window.hasOwnProperty('KindleModuleManager');" mod_mgr_loaded = lambda br: br.execute_script(mod_mgr_script) self._wait(5).until(mod_mgr_loaded) # Wait for the DB Client to load db_client_script = dedent(ur) db_client_loaded = lambda br: br.execute_async_script(db_client_script) self._wait(5).until(db_client_loaded)
def _wait_for_js(self)
Wait for the Kindle Cloud Reader JS modules to initialize. These modules provide the interface used to execute API queries.
4.932334
4.453632
1.107486
api_call = dedent() % { 'api_call': function_name, 'args': ', '.join(args) } script = '\n'.join((api.API_SCRIPT, api_call)) try: return self._browser.execute_async_script(script) except TimeoutException: # FIXME: KCR will occassionally not load library and fall over raise APIError
def _get_api_call(self, function_name, *args)
Runs an api call with javascript-formatted arguments. Args: function_name: The name of the KindleAPI call to run. *args: Javascript-formatted arguments to pass to the API call. Returns: The result of the API call. Raises: APIError: If the API call fails or times out.
9.309758
9.288346
1.002305
kbm = self._get_api_call('get_book_metadata', '"%s"' % asin) return KindleCloudReaderAPI._kbm_to_book(kbm)
def get_book_metadata(self, asin)
Returns a book's metadata. Args: asin: The ASIN of the book to be queried. Returns: A `KindleBook` instance corresponding to the book associated with `asin`.
11.36095
10.086068
1.1264
kbp = self._get_api_call('get_book_progress', '"%s"' % asin) return KindleCloudReaderAPI._kbp_to_progress(kbp)
def get_book_progress(self, asin)
Returns the progress data available for a book. NOTE: A summary of the two progress formats can be found in the docstring for `ReadingProgress`. Args: asin: The asin of the book to be queried. Returns: A `ReadingProgress` instance corresponding to the book associated with `asin`.
9.284286
10.145611
0.915104
kbp_dict = self._get_api_call('get_library_progress') return {asin: KindleCloudReaderAPI._kbp_to_progress(kbp) for asin, kbp in kbp_dict.iteritems()}
def get_library_progress(self)
Returns the reading progress for all books in the kindle library. Returns: A mapping of ASINs to `ReadingProgress` instances corresponding to the books in the current user's library.
9.092355
6.729393
1.35114
inst = KindleCloudReaderAPI(*args, **kwargs) try: yield inst except Exception: raise finally: inst.close()
def get_instance(*args, **kwargs)
Context manager for an instance of `KindleCloudReaderAPI`.
9.110911
2.895947
3.14609
path = os.path.abspath(path) if os.path.isdir(path): config, wordlists = _load_data(path) elif os.path.isfile(path): config = _load_config(path) wordlists = {} else: raise InitializationError('File or directory not found: {0}'.format(path)) for name, wordlist in wordlists.items(): if name in config: raise InitializationError("Conflict: list {!r} is defined both in config " "and in *.txt file. If it's a {!r} list, " "you should remove it from config." .format(name, _CONF.TYPE.WORDS)) config[name] = wordlist return config
def load_config(path)
Loads configuration from a path. Path can be a json file, or a directory containing config.json and zero or more *.txt files with word lists or phrase lists. Returns config dict. Raises InitializationError when something is wrong.
4.047245
3.698191
1.094385
path = os.path.abspath(path) if not os.path.isdir(path): raise InitializationError('Directory not found: {0}'.format(path)) wordlists = {} for file_name in os.listdir(path): if os.path.splitext(file_name)[1] != '.txt': continue file_path = os.path.join(path, file_name) name = os.path.splitext(os.path.split(file_path)[1])[0] try: with codecs.open(file_path, encoding='utf-8') as file: wordlists[name] = _load_wordlist(name, file) except OSError as ex: raise InitializationError('Failed to read {}: {}'.format(file_path, ex)) config = _load_config(os.path.join(path, 'config.json')) return (config, wordlists)
def _load_data(path)
Loads data from a directory. Returns tuple (config_dict, wordlists). Raises Exception on failure (e.g. if data is corrupted).
2.073495
1.880661
1.102535
match = _OPTION_REGEX.match(line) if not match: raise ValueError('Invalid syntax') for name, type_ in _OPTIONS: if name == match.group(1): return name, type_(match.group(2)) raise ValueError('Unknown option')
def _parse_option(line)
Parses option line. Returns (name, value). Raises ValueError on invalid syntax or unknown option.
2.946521
2.65659
1.109137
items = [] max_length = None multiword = False multiword_start = None number_of_words = None for i, line in enumerate(stream, start=1): line = line.strip() if not line or line.startswith('#'): continue # Is it an option line, e.g. 'max_length = 10'? if '=' in line: if items: raise ConfigurationError('Invalid assignment at list {!r} line {}: {!r} ' '(options must be defined before words)' .format(name, i, line)) try: option, option_value = _parse_option(line) except ValueError as ex: raise ConfigurationError('Invalid assignment at list {!r} line {}: {!r} ' '({})' .format(name, i, line, ex)) if option == _CONF.FIELD.MAX_LENGTH: max_length = option_value elif option == _CONF.FIELD.NUMBER_OF_WORDS: number_of_words = option_value continue # pragma: no cover # Parse words if not multiword and _WORD_REGEX.match(line): if max_length is not None and len(line) > max_length: raise ConfigurationError('Word is too long at list {!r} line {}: {!r}' .format(name, i, line)) items.append(line) elif _PHRASE_REGEX.match(line): if not multiword: multiword = True multiword_start = len(items) phrase = tuple(line.split(' ')) if number_of_words is not None and len(phrase) != number_of_words: raise ConfigurationError('Phrase has {} word(s) (while number_of_words={}) ' 'at list {!r} line {}: {!r}' .format(len(phrase), number_of_words, name, i, line)) if max_length is not None and sum(len(x) for x in phrase) > max_length: raise ConfigurationError('Phrase is too long at list {!r} line {}: {!r}' .format(name, i, line)) items.append(phrase) else: raise ConfigurationError('Invalid syntax at list {!r} line {}: {!r}' .format(name, i, line)) if multiword: # If in phrase mode, convert everything to tuples for i in range(0, multiword_start): items[i] = (items[i], ) result = { _CONF.FIELD.TYPE: _CONF.TYPE.PHRASES, _CONF.FIELD.PHRASES: items } if number_of_words is not None: result[_CONF.FIELD.NUMBER_OF_WORDS] = number_of_words else: result = { _CONF.FIELD.TYPE: _CONF.TYPE.WORDS, _CONF.FIELD.WORDS: items } if max_length is not None: result[_CONF.FIELD.MAX_LENGTH] = max_length return result
def _load_wordlist(name, stream)
Loads list of words or phrases from file. Returns "words" or "phrases" dictionary, the same as used in config. Raises Exception if file is missing or invalid.
2.090771
2.079418
1.005459
# Have we done it already? try: return results[current] except KeyError: pass # Check recursion depth and detect loops if current in stack: raise ConfigurationError('Rule {!r} is recursive: {!r}'.format(stack[0], stack)) if len(stack) > 99: raise ConfigurationError('Rule {!r} is too deep'.format(stack[0])) # Track recursion depth stack.append(current) try: # Check what kind of list we have listdef = config[current] list_type = listdef[_CONF.FIELD.TYPE] # 1. List of words if list_type == _CONF.TYPE.WORDS: results[current] = WordList(listdef['words']) # List of phrases elif list_type == _CONF.TYPE.PHRASES: results[current] = PhraseList(listdef['phrases']) # 2. Simple list of lists elif list_type == _CONF.TYPE.NESTED: results[current] = NestedList([_create_lists(config, results, x, stack, inside_cartesian=inside_cartesian) for x in listdef[_CONF.FIELD.LISTS]]) # 3. Cartesian list of lists elif list_type == _CONF.TYPE.CARTESIAN: if inside_cartesian is not None: raise ConfigurationError("Cartesian list {!r} contains another Cartesian list " "{!r}. Nested Cartesian lists are not allowed." .format(inside_cartesian, current)) results[current] = CartesianList([_create_lists(config, results, x, stack, inside_cartesian=current) for x in listdef[_CONF.FIELD.LISTS]]) # 4. Scalar elif list_type == _CONF.TYPE.CONST: results[current] = Scalar(listdef[_CONF.FIELD.VALUE]) # Unknown type else: raise InitializationError("Unknown list type: {!r}".format(list_type)) # Return the result return results[current] finally: stack.pop()
def _create_lists(config, results, current, stack, inside_cartesian=None)
An ugly recursive method to transform config dict into a tree of AbstractNestedList.
2.548071
2.527432
1.008166
lst = self._lists[pattern] while True: result = lst[self._randrange(lst.length)] # 1. Check that there are no duplicates # 2. Check that there are no duplicate prefixes # 3. Check max slug length n = len(result) if (self._ensure_unique and len(set(result)) != n or self._check_prefix and len(set(x[:self._check_prefix] for x in result)) != n or self._max_slug_length and sum(len(x) for x in result) + n - 1 > self._max_slug_length): continue return result
def generate(self, pattern=None)
Generates and returns random name as a list of strings.
4.360669
4.31852
1.00976
return self._lists[pattern]._dump(stream, '', object_ids=object_ids)
def _dump(self, stream, pattern=None, object_ids=False)
Dumps current tree into a text stream.
11.535301
10.45524
1.103303
# (field_name, predicate, warning_msg, exception_msg) # predicate(g) is a function that returns True if generated combination g must be rejected, # see checks in generate() checks = [] # ensure_unique can lead to infinite loops for some tiny erroneous configs if self._ensure_unique: checks.append(( _CONF.FIELD.ENSURE_UNIQUE, self._ensure_unique, lambda g: len(set(g)) != len(g), '{generate} may be slow because a significant fraction of combinations contain repeating words and {field_name} is set', # noqa 'Impossible to generate with {field_name}' )) # # max_slug_length can easily slow down or block generation if set too small if self._max_slug_length: checks.append(( _CONF.FIELD.MAX_SLUG_LENGTH, self._max_slug_length, lambda g: sum(len(x) for x in g) + len(g) - 1 > self._max_slug_length, '{generate} may be slow because a significant fraction of combinations exceed {field_name}={field_value}', # noqa 'Impossible to generate with {field_name}={field_value}' )) # Perform the relevant checks for all generators, starting from 'all' n = 100 warning_treshold = 20 # fail probability: 0.04 for 2 attempts, 0.008 for 3 attempts, etc. for lst_id, lst in sorted(self._lists.items(), key=lambda x: '' if x is None else str(x)): context = {'generate': 'coolname.generate({})'.format('' if lst_id is None else repr(lst_id))} # For each generator, perform checks for field_name, field_value, predicate, warning_msg, exception_msg in checks: context.update({'field_name': field_name, 'field_value': field_value}) bad_count = 0 for i in range(n): g = lst[randrange(lst.length)] if predicate(g): bad_count += 1 if bad_count >= n: raise ConfigurationError(exception_msg.format(**context)) elif bad_count >= warning_treshold: import warnings warnings.warn(warning_msg.format(**context))
def _check_not_hanging(self)
Rough check that generate() will not hang or be very slow. Raises ConfigurationError if generate() spends too much time in retry loop. Issues a warning.warn() if there is a risk of slowdown.
5.050865
4.863075
1.038616
activity_table = get_activity_table(schema=schema) query = ( activity_table .update() .values( old_data=( activity_table.c.old_data + sa.cast(sa.func.json_build_object( column_name, func( activity_table.c.old_data[column_name], activity_table ) ), JSONB) ), changed_data=( activity_table.c.changed_data + sa.cast(sa.func.json_build_object( column_name, func( activity_table.c.changed_data[column_name], activity_table ) ), JSONB) ) ) .where(activity_table.c.table_name == table) ) return conn.execute(query)
def alter_column(conn, table, column_name, func, schema=None)
Run given callable against given table and given column in activity table jsonb data columns. This function is useful when you want to reflect type changes in your schema to activity table. In the following example we change the data type of User's age column from string to integer. :: from alembic import op from postgresql_audit import alter_column def upgrade(): op.alter_column( 'user', 'age', type_=sa.Integer ) alter_column( op, 'user', 'age', lambda value, activity_table: sa.cast(value, sa.Integer) ) :param conn: An object that is able to execute SQL (either SQLAlchemy Connection, Engine or Alembic Operations object) :param table: The table to run the column name changes against :param column_name: Name of the column to run callable against :param func: A callable to run against specific column in activity table jsonb data columns. The callable should take two parameters the jsonb value corresponding to given column_name and activity table object. :param schema: Optional name of schema to use.
2.144806
2.030415
1.056339
activity_table = get_activity_table(schema=schema) query = ( activity_table .update() .values( old_data=jsonb_change_key_name( activity_table.c.old_data, old_column_name, new_column_name ), changed_data=jsonb_change_key_name( activity_table.c.changed_data, old_column_name, new_column_name ) ) .where(activity_table.c.table_name == table) ) return conn.execute(query)
def change_column_name( conn, table, old_column_name, new_column_name, schema=None )
Changes given `activity` jsonb data column key. This function is useful when you want to reflect column name changes to activity table. :: from alembic import op from postgresql_audit import change_column_name def upgrade(): op.alter_column( 'my_table', 'my_column', new_column_name='some_column' ) change_column_name(op, 'my_table', 'my_column', 'some_column') :param conn: An object that is able to execute SQL (either SQLAlchemy Connection, Engine or Alembic Operations object) :param table: The table to run the column name changes against :param old_column_name: Name of the column to change :param new_column_name: New colum name :param schema: Optional name of schema to use.
2.322101
2.389943
0.971613
activity_table = get_activity_table(schema=schema) data = {column_name: default_value} query = ( activity_table .update() .values( old_data=sa.case( [ ( sa.cast(activity_table.c.old_data, sa.Text) != '{}', activity_table.c.old_data + data ), ], else_=sa.cast({}, JSONB) ), changed_data=sa.case( [ ( sa.and_( sa.cast( activity_table.c.changed_data, sa.Text ) != '{}', activity_table.c.verb != 'update' ), activity_table.c.changed_data + data ) ], else_=activity_table.c.changed_data ), ) .where(activity_table.c.table_name == table) ) return conn.execute(query)
def add_column(conn, table, column_name, default_value=None, schema=None)
Adds given column to `activity` table jsonb data columns. In the following example we reflect the changes made to our schema to activity table. :: import sqlalchemy as sa from alembic import op from postgresql_audit import add_column def upgrade(): op.add_column('article', sa.Column('created_at', sa.DateTime())) add_column(op, 'article', 'created_at') :param conn: An object that is able to execute SQL (either SQLAlchemy Connection, Engine or Alembic Operations object) :param table: The table to remove the column from :param column_name: Name of the column to add :param default_value: The default value of the column :param schema: Optional name of schema to use.
2.655504
2.659006
0.998683
activity_table = get_activity_table(schema=schema) remove = sa.cast(column_name, sa.Text) query = ( activity_table .update() .values( old_data=activity_table.c.old_data - remove, changed_data=activity_table.c.changed_data - remove, ) .where(activity_table.c.table_name == table) ) return conn.execute(query)
def remove_column(conn, table, column_name, schema=None)
Removes given `activity` jsonb data column key. This function is useful when you are doing schema changes that require removing a column. Let's say you've been using PostgreSQL-Audit for a while for a table called article. Now you want to remove one audited column called 'created_at' from this table. :: from alembic import op from postgresql_audit import remove_column def upgrade(): op.remove_column('article', 'created_at') remove_column(op, 'article', 'created_at') :param conn: An object that is able to execute SQL (either SQLAlchemy Connection, Engine or Alembic Operations object) :param table: The table to remove the column from :param column_name: Name of the column to remove :param schema: Optional name of schema to use.
3.031321
3.296882
0.919451
activity_table = get_activity_table(schema=schema) query = ( activity_table .update() .values(table_name=new_table_name) .where(activity_table.c.table_name == old_table_name) ) return conn.execute(query)
def rename_table(conn, old_table_name, new_table_name, schema=None)
Renames given table in activity table. You should remember to call this function whenever you rename a versioned table. :: from alembic import op from postgresql_audit import rename_table def upgrade(): op.rename_table('article', 'article_v2') rename_table(op, 'article', 'article_v2') :param conn: An object that is able to execute SQL (either SQLAlchemy Connection, Engine or Alembic Operations object) :param old_table_name: The name of table to rename :param new_table_name: New name of the renamed table :param schema: Optional name of schema to use.
2.596527
2.816405
0.92193
if hasattr(cls, '__versioned__') and cls not in self.pending_classes: self.pending_classes.add(cls)
def instrument_versioned_classes(self, mapper, cls)
Collect versioned class and add it to pending_classes list. :mapper mapper: SQLAlchemy mapper object :cls cls: SQLAlchemy declarative class
3.449115
3.953717
0.872373
for cls in self.pending_classes: self.audit_table(cls.__table__, cls.__versioned__.get('exclude')) assign_actor(self.base, self.transaction_cls, self.actor_cls)
def configure_versioned_classes(self)
Configures all versioned classes that were collected during instrumentation process.
14.041526
16.788757
0.836365
parser = argparse.ArgumentParser( description='Get a citation using a PubMed ID or PubMed URL') parser.add_argument('query', help='PubMed ID or PubMed URL') parser.add_argument( '-m', '--mini', action='store_true', help='get mini citation') parser.add_argument( '-e', '--email', action='store', help='set user email', default='') args = parser.parse_args(args=args) lookup = PubMedLookup(args.query, args.email) publication = Publication(lookup, resolve_doi=False) if args.mini: out.write(publication.cite_mini() + '\n') else: out.write(publication.cite() + '\n')
def pubmed_citation(args=sys.argv[1:], out=sys.stdout)
Get a citation via the command line using a PubMed ID or PubMed URL
2.741443
2.46642
1.111507
parser = argparse.ArgumentParser( description='Get a publication URL using a PubMed ID or PubMed URL') parser.add_argument('query', help='PubMed ID or PubMed URL') parser.add_argument( '-d', '--doi', action='store_false', help='get DOI URL') parser.add_argument( '-e', '--email', action='store', help='set user email', default='') args = parser.parse_args(args=args) lookup = PubMedLookup(args.query, args.email) publication = Publication(lookup, resolve_doi=args.doi) out.write(publication.url + '\n')
def pubmed_url(args=sys.argv[1:], resolve_doi=True, out=sys.stdout)
Get a publication URL via the command line using a PubMed ID or PubMed URL
2.882136
2.459294
1.171937
author_list = self._author_list if len(author_list) <= max_authors: authors_et_al = self.authors else: authors_et_al = ", ".join( self._author_list[:max_authors]) + ", et al." return authors_et_al
def authors_et_al(self, max_authors=5)
Return string with a truncated author list followed by 'et al.'
2.584092
2.315165
1.116159
citation_data = { 'title': self.title, 'authors': self.authors_et_al(max_authors), 'year': self.year, 'journal': self.journal, 'volume': self.volume, 'issue': self.issue, 'pages': self.pages, } citation = "{authors} ({year}). {title} {journal}".format( **citation_data) if self.volume and self.issue and self.pages: citation += " {volume}({issue}): {pages}.".format(**citation_data) elif self.volume and self.issue: citation += " {volume}({issue}).".format(**citation_data) elif self.volume and self.pages: citation += " {volume}: {pages}.".format(**citation_data) elif self.volume: citation += " {volume}.".format(**citation_data) elif self.pages: citation += " {pages}.".format(**citation_data) else: citation += "." return citation
def cite(self, max_authors=5)
Return string with a citation for the record, formatted as: '{authors} ({year}). {title} {journal} {volume}({issue}): {pages}.'
1.709338
1.528612
1.118229
citation_data = [self.first_author] if len(self._author_list) > 1: citation_data.append(self.last_author) citation_data.extend([self.year, self.journal]) return " - ".join(citation_data)
def cite_mini(self)
Return string with a citation for the record, formatted as: '{first_author} - {year} - {journal}'
3.485174
2.737606
1.273074
key_path = ['PubmedArticleSet', 'PubmedArticle', 'MedlineCitation', 'Article', 'Abstract', 'AbstractText'] abstract_xml = reduce(dict.get, key_path, xml_dict) abstract_paragraphs = [] if isinstance(abstract_xml, str): abstract_paragraphs.append(abstract_xml) elif isinstance(abstract_xml, dict): abstract_text = abstract_xml.get('#text') try: abstract_label = abstract_xml['@Label'] except KeyError: abstract_paragraphs.append(abstract_text) else: abstract_paragraphs.append( "{}: {}".format(abstract_label, abstract_text)) elif isinstance(abstract_xml, list): for abstract_section in abstract_xml: try: abstract_text = abstract_section['#text'] except KeyError: abstract_text = abstract_section try: abstract_label = abstract_section['@Label'] except KeyError: abstract_paragraphs.append(abstract_text) else: abstract_paragraphs.append( "{}: {}".format(abstract_label, abstract_text)) else: raise RuntimeError("Error parsing abstract.") return "\n\n".join(abstract_paragraphs)
def parse_abstract(xml_dict)
Parse PubMed XML dictionary to retrieve abstract.
1.913198
1.842784
1.038211
url = 'http://eutils.ncbi.nlm.nih.gov/entrez/eutils/' \ 'efetch.fcgi?db=pubmed&rettype=abstract&id={}' \ .format(self.pmid) try: response = urlopen(url) except URLError: xml_dict = '' else: xml = response.read().decode() xml_dict = xmltodict.parse(xml) return xml_dict
def get_pubmed_xml(self)
Use a PubMed ID to retrieve PubMed metadata in XML form.
2.291555
2.247386
1.019654
if self.record.get('HasAbstract') == 1 and xml_dict: self.abstract = self.parse_abstract(xml_dict) else: self.abstract = ''
def set_abstract(self, xml_dict)
If record has an abstract, extract it from PubMed's XML data
4.65645
3.343079
1.392863
if 'DOI' in self.record: doi_url = "/".join(['http://dx.doi.org', self.record['DOI']]) if resolve_doi: try: response = urlopen(doi_url) except URLError: self.url = '' else: self.url = response.geturl() else: self.url = doi_url else: self.url = ''
def set_article_url(self, resolve_doi=True)
If record has a DOI, set article URL based on where the DOI points.
2.535191
2.306217
1.099286
key_path = ['PubmedArticleSet', 'PubmedArticle', 'MedlineCitation', 'Article', 'Journal', 'JournalIssue', 'PubDate'] pubdate_xml = reduce(dict.get, key_path, xml_dict) if isinstance(pubdate_xml, dict): self.year = pubdate_xml.get('Year') month_short = pubdate_xml.get('Month') self.day = pubdate_xml.get('Day') try: self.month = datetime.datetime.strptime( month_short, "%b").month except (ValueError, TypeError): self.month = '' else: self.year = '' self.month = '' self.day = ''
def set_pub_year_month_day(self, xml_dict)
Set publication year, month, day from PubMed's XML data
2.386994
2.19952
1.085234
parse_result = urlparse(pubmed_url) pattern = re.compile(r'^/pubmed/(\d+)$') pmid = pattern.match(parse_result.path).group(1) return pmid
def parse_pubmed_url(pubmed_url)
Get PubMed ID (pmid) from PubMed URL.
3.137106
2.715351
1.155322
handle = Entrez.esummary(db="pubmed", id=pmid) record = Entrez.read(handle) return record
def get_pubmed_record(pmid)
Get PubMed record from PubMed ID.
2.527635
2.414247
1.046966
if default_value is None: default_value = self.default_value # set registers to their values reg_set = self.block.wirevector_subset(Register) if register_value_map is not None: for r in reg_set: self.value[r] = self.regvalue[r] = register_value_map.get(r, default_value) # set constants to their set values for w in self.block.wirevector_subset(Const): self.value[w] = w.val assert isinstance(w.val, numbers.Integral) # for now # set memories to their passed values for mem_net in self.block.logic_subset('m@'): memid = mem_net.op_param[1].id if memid not in self.memvalue: self.memvalue[memid] = {} if memory_value_map is not None: for (mem, mem_map) in memory_value_map.items(): if isinstance(mem, RomBlock): raise PyrtlError('error, one or more of the memories in the map is a RomBlock') if isinstance(self.block, PostSynthBlock): mem = self.block.mem_map[mem] # pylint: disable=maybe-no-member self.memvalue[mem.id] = mem_map max_addr_val, max_bit_val = 2**mem.addrwidth, 2**mem.bitwidth for (addr, val) in mem_map.items(): if addr < 0 or addr >= max_addr_val: raise PyrtlError('error, address %s in %s outside of bounds' % (str(addr), mem.name)) if val < 0 or val >= max_bit_val: raise PyrtlError('error, %s at %s in %s outside of bounds' % (str(val), str(addr), mem.name)) # set all other variables to default value for w in self.block.wirevector_set: if w not in self.value: self.value[w] = default_value self.ordered_nets = tuple((i for i in self.block)) self.reg_update_nets = tuple((self.block.logic_subset('r'))) self.mem_update_nets = tuple((self.block.logic_subset('@')))
def _initialize(self, register_value_map=None, memory_value_map=None, default_value=None)
Sets the wire, register, and memory values to default or as specified. :param register_value_map: is a map of {Register: value}. :param memory_value_map: is a map of maps {Memory: {address: Value}}. :param default_value: is the value that all unspecified registers and memories will default to. If no default_value is specified, it will use the value stored in the object (default to 0)
3.204626
3.109523
1.030584
# Check that all Input have a corresponding provided_input input_set = self.block.wirevector_subset(Input) supplied_inputs = set() for i in provided_inputs: if isinstance(i, WireVector): name = i.name else: name = i sim_wire = self.block.wirevector_by_name[name] if sim_wire not in input_set: raise PyrtlError( 'step provided a value for input for "%s" which is ' 'not a known input ' % name) if not isinstance(provided_inputs[i], numbers.Integral) or provided_inputs[i] < 0: raise PyrtlError( 'step provided an input "%s" which is not a valid ' 'positive integer' % provided_inputs[i]) if len(bin(provided_inputs[i]))-2 > sim_wire.bitwidth: raise PyrtlError( 'the bitwidth for "%s" is %d, but the provided input ' '%d requires %d bits to represent' % (name, sim_wire.bitwidth, provided_inputs[i], len(bin(provided_inputs[i]))-2)) self.value[sim_wire] = provided_inputs[i] supplied_inputs.add(sim_wire) # Check that only inputs are specified, and set the values if input_set != supplied_inputs: for i in input_set.difference(supplied_inputs): raise PyrtlError('Input "%s" has no input value specified' % i.name) self.value.update(self.regvalue) # apply register updates from previous step for net in self.ordered_nets: self._execute(net) # Do all of the mem operations based off the new values changed in _execute() for net in self.mem_update_nets: self._mem_update(net) # at the end of the step, record the values to the trace # print self.value # Helpful Debug Print if self.tracer is not None: self.tracer.add_step(self.value) # Do all of the reg updates based off of the new values for net in self.reg_update_nets: argval = self.value[net.args[0]] self.regvalue[net.dests[0]] = self._sanitize(argval, net.dests[0]) # finally, if any of the rtl_assert assertions are failing then we should # raise the appropriate exceptions check_rtl_assertions(self)
def step(self, provided_inputs)
Take the simulation forward one cycle :param provided_inputs: a dictionary mapping wirevectors to their values for this step All input wires must be in the provided_inputs in order for the simulation to accept these values Example: if we have inputs named 'a' and 'x', we can call: sim.step({'a': 1, 'x': 23}) to simulate a cycle with values 1 and 23 respectively
4.243398
4.114773
1.031259
wire = self.block.wirevector_by_name.get(w, w) return self.value[wire]
def inspect(self, w)
Get the value of a wirevector in the last simulation cycle. :param w: the name of the WireVector to inspect (passing in a WireVector instead of a name is deprecated) :return: value of w in the current step of simulation Will throw KeyError if w does not exist in the simulation.
11.733273
9.141994
1.283448
if net.op in 'r@': return # registers and memory write ports have no logic function elif net.op in self.simple_func: argvals = (self.value[arg] for arg in net.args) result = self.simple_func[net.op](*argvals) elif net.op == 'c': result = 0 for arg in net.args: result = result << len(arg) result = result | self.value[arg] elif net.op == 's': result = 0 source = self.value[net.args[0]] for b in net.op_param[::-1]: result = (result << 1) | (0x1 & (source >> b)) elif net.op == 'm': # memories act async for reads memid = net.op_param[0] mem = net.op_param[1] read_addr = self.value[net.args[0]] if isinstance(mem, RomBlock): result = mem._get_read_data(read_addr) else: result = self.memvalue[memid].get(read_addr, self.default_value) else: raise PyrtlInternalError('error, unknown op type') self.value[net.dests[0]] = self._sanitize(result, net.dests[0])
def _execute(self, net)
Handle the combinational logic update rules for the given net. This function, along with edge_update, defined the semantics of the primitive ops. Function updates self.value accordingly.
4.146855
4.077745
1.016948
if net.op != '@': raise PyrtlInternalError memid = net.op_param[0] write_addr = self.value[net.args[0]] write_val = self.value[net.args[1]] write_enable = self.value[net.args[2]] if write_enable: self.memvalue[memid][write_addr] = write_val
def _mem_update(self, net)
Handle the mem update for the simulation of the given net (which is a memory). Combinational logic should have no posedge behavior, but registers and memory should. This function, used after _execute, defines the semantics of the primitive ops. Function updates self.memvalue accordingly (using prior_value)
4.341629
3.79548
1.143894
# validate_inputs for wire, value in provided_inputs.items(): wire = self.block.get_wirevector_by_name(wire) if isinstance(wire, str) else wire if value > wire.bitmask or value < 0: raise PyrtlError("Wire {} has value {} which cannot be represented" " using its bitwidth".format(wire, value)) # building the simulation data ins = {self._to_name(wire): value for wire, value in provided_inputs.items()} ins.update(self.regs) ins.update(self.mems) # propagate through logic self.regs, self.outs, mem_writes = self.sim_func(ins) for mem, addr, value in mem_writes: self.mems[mem][addr] = value # for tracer compatibility self.context = self.outs.copy() self.context.update(ins) # also gets old register values if self.tracer is not None: self.tracer.add_fast_step(self) # check the rtl assertions check_rtl_assertions(self)
def step(self, provided_inputs)
Run the simulation for a cycle :param provided_inputs: a dictionary mapping WireVectors (or their names) to their values for this step eg: {wire: 3, "wire_name": 17}
5.546304
4.923812
1.126425
try: return self.context[self._to_name(w)] except AttributeError: raise PyrtlError("No context available. Please run a simulation step in " "order to populate values for wires")
def inspect(self, w)
Get the value of a wirevector in the last simulation cycle. :param w: the name of the WireVector to inspect (passing in a WireVector instead of a name is deprecated) :return: value of w in the current step of simulation Will throw KeyError if w is not being tracked in the simulation.
12.603105
10.041574
1.255093
if isinstance(mem, RomBlock): raise PyrtlError("ROM blocks are not stored in the simulation object") return self.mems[self._mem_varname(mem)]
def inspect_mem(self, mem)
Get the values in a map during the current simulation cycle. :param mem: the memory to inspect :return: {address: value} Note that this returns the current memory state. Modifying the dictonary will also modify the state in the simulator
13.26071
17.389627
0.762564
if isinstance(wire, (Input, Register)): return 'd[' + repr(wire.name) + ']' # passed in elif isinstance(wire, Const): return str(wire.val) # hardcoded else: return self._varname(wire)
def _arg_varname(self, wire)
Input, Const, and Registers have special input values
5.869991
4.549496
1.290251
sl = symbol_len-1 if len(w) > 1: out = self._revstart if current_val != self.prior_val: out += self._x + hex(current_val).rstrip('L').ljust(sl)[:sl] elif n == 0: out += hex(current_val).rstrip('L').ljust(symbol_len)[:symbol_len] else: out += ' '*symbol_len out += self._revstop else: pretty_map = { (0, 0): self._low + self._low * sl, (0, 1): self._up + self._high * sl, (1, 0): self._down + self._low * sl, (1, 1): self._high + self._high * sl, } out = pretty_map[(self.prior_val, current_val)] return out
def _render_val_with_prev(self, w, n, current_val, symbol_len)
Return a string encoding the given value in a waveform. :param w: The WireVector we are rendering to a waveform :param n: An integer from 0 to segment_len-1 :param current_val: the value to be rendered :param symbol_len: and integer for how big to draw the current value Returns a string of printed length symbol_len that will draw the representation of current_val. The input prior_val is used to render transitions.
3.380911
3.314058
1.020172
if len(self.trace) == 0: raise PyrtlError('error, simulation trace needs at least 1 signal to track ' '(by default, unnamed signals are not traced -- try either passing ' 'a name to a WireVector or setting a "wirevector_subset" option)') for wire in self.trace: tracelist = self.trace[wire] wirevec = self._wires[wire] tracelist.append(value_map[wirevec])
def add_step(self, value_map)
Add the values in value_map to the end of the trace.
8.912223
8.366392
1.065241
for wire_name in self.trace: self.trace[wire_name].append(fastsim.context[wire_name])
def add_fast_step(self, fastsim)
Add the fastsim context to the trace.
7.438646
5.021515
1.481355
if len(self.trace) == 0: raise PyrtlError('error, cannot print an empty trace') if base not in (2, 8, 10, 16): raise PyrtlError('please choose a valid base (2,8,10,16)') basekey = {2: 'b', 8: 'o', 10: 'd', 16: 'x'}[base] ident_len = max(len(w) for w in self.trace) if compact: for w in sorted(self.trace, key=_trace_sort_key): vals = ''.join('{0:{1}}'.format(x, basekey) for x in self.trace[w]) file.write(w.rjust(ident_len) + ' ' + vals + '\n') else: maxlenval = max(len('{0:{1}}'.format(x, basekey)) for w in self.trace for x in self.trace[w]) file.write(' ' * (ident_len - 3) + "--- Values in base %d ---\n" % base) for w in sorted(self.trace, key=_trace_sort_key): vals = ' '.join('{0:>{1}{2}}'.format(x, maxlenval, basekey) for x in self.trace[w]) file.write(w.ljust(ident_len + 1) + vals + '\n') file.flush()
def print_trace(self, file=sys.stdout, base=10, compact=False)
Prints a list of wires and their current values. :param int base: the base the values are to be printed in :param bool compact: whether to omit spaces in output lines
2.470757
2.483964
0.994683