content
stringlengths
22
815k
id
int64
0
4.91M
def parse_test_config(doc): """ Get the configuration element. """ test_config = doc.documentElement if test_config.tagName != 'configuration': raise RuntimeError('expected configuration tag at root') return test_config
5,344,800
def _get_control_vars(control_vars): """ Create the section of control variables Parameters ---------- control_vars: str Functions to define control variables. Returns ------- text: str Control variables section and header of model variables section. """ text = textwrap.dedent(""" ########################################################################## # CONTROL VARIABLES # ########################################################################## def _init_outer_references(data): for key in data: __data[key] = data[key] def time(): return __data['time']() """) text += control_vars text += textwrap.dedent(""" ########################################################################## # MODEL VARIABLES # ########################################################################## """) return text
5,344,801
def echo(): """Echo data""" return request.get_data() + '\n'
5,344,802
def get_ids(): """ Get all SALAMI IDs related to RWC """ # Filename for SALAMI RWC metadata metadata_file = os.path.join( dpath.SALAMI, 'metadata', 'id_index_rwc.csv') ids = [] with open(metadata_file, "r") as rwc_file: reader = csv.reader(rwc_file) next(reader) #skip header for row in reader: ids.append(int(row[0])) ids = ids[1:] # First one has no annotations!? return ids
5,344,803
def _get_signature_def(signature_def_key, export_dir, tags): """Construct a `SignatureDef` proto.""" signature_def_key = ( signature_def_key or signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY) metagraph_def = saved_model_cli.get_meta_graph_def(export_dir, tags) try: signature_def = signature_def_utils.get_signature_def_by_key( metagraph_def, signature_def_key) except ValueError as e: try: formatted_key = _DEFAULT_INPUT_ALTERNATIVE_FORMAT.format( signature_def_key) signature_def = signature_def_utils.get_signature_def_by_key( metagraph_def, formatted_key) logging.warning('Could not find signature def "%s". ' 'Using "%s" instead', signature_def_key, formatted_key) except ValueError: raise ValueError( 'Got signature_def_key "{}". Available signatures are {}. ' 'Original error:\n{}'.format( signature_def_key, list(metagraph_def.signature_def), e)) return signature_def
5,344,804
def illuminance_to_exposure_value(E, S, c=250): """ Computes the exposure value :math:`EV` from given scene illuminance :math:`E` in :math:`Lux`, *ISO* arithmetic speed :math:`S` and *incident light calibration constant* :math:`c`. Parameters ---------- E : array_like Scene illuminance :math:`E` in :math:`Lux`. S : array_like *ISO* arithmetic speed :math:`S`. c : numeric, optional *Incident light calibration constant* :math:`c`. With a flat receptor, *ISO 2720:1974* recommends a range for :math:`c`. of 240 to 400 with illuminance in :math:`Lux`; a value of 250 is commonly used. With a hemispherical receptor, *ISO 2720:1974* recommends a range for :math:`c` of 320 to 540 with illuminance in :math:`Lux`; in practice, values typically are between 320 (Minolta) and 340 (Sekonic). Returns ------- ndarray Exposure value :math:`EV`. Notes ----- - The exposure value :math:`EV` indicates a combination of camera settings rather than the focal plane exposure, i.e. luminous exposure, photometric exposure, :math:`H`. The focal plane exposure is time-integrated illuminance. References ---------- :cite:`Wikipediabj` Examples -------- >>> illuminance_to_exposure_value(2.5, 100) 0.0 """ E = as_float_array(E) S = as_float_array(S) c = as_float_array(c) EV = np.log2(E * S / c) return EV
5,344,805
def feedback(code, guess): """ Return a namedtuple Feedback(blacks, whites) where blacks is the number of pegs from the guess that are correct in both color and position and whites is the number of pegs of the right color but wrong position. """ blacks = sum(g == c for g, c in zip(guess, code)) whites = sum((Counter(guess) & Counter(code)).values()) - blacks return Feedback(blacks, whites)
5,344,806
def print_config(conf): """ Print debug information for a halogen.config.ConfigBase subclass """ width: int = terminal_size().width print("=" * width) print("") print(f" • CONFIG: {conf.name}") print(f" • PREFIX: {conf.prefix}") print("") print("-" * width) print(" • INCLUDES:") print("") print(conf.get_includes()) print("") print("-" * width) print(" • LIBS:") print("") print(conf.get_libs()) print("") print("-" * width) print(" • CFLAGS:") print("") print(conf.get_cflags()) print("") print("-" * width) print(" • LDFLAGS:") print("") print(conf.get_ldflags()) print("") print("-" * width) print(" » stringification:") print("") print(str(conf)) print("") # print("-" * width)
5,344,807
def Logging(logfile=None): """Custom logging function. Args: logfile: The name of log files. Log will be stored in logs_dir. Returns: The same output of the call function with logging information. """ # Create logs_dir if the directory logs is not exist. logs_dir = 'logs' if os.path.isdir(logs_dir) is False: os.mkdir(logs_dir) def Logging_decorator(func): # Define logger, set the logger name as func.__name__ logger = logging.getLogger(func.__name__) # run logger.name to check # Set level for logger logger.setLevel(logging.DEBUG) # Define the handler and formatter for console logging consoleHandler = logging.StreamHandler() # Define StreamHandler consoleHandler.setLevel(logging.DEBUG) # Set level concolsFormatter = logging.Formatter('%(name)s - %(levelname)s - %(message)s') # Define formatter consoleHandler.setFormatter(concolsFormatter) # # Set formatter logger.addHandler(consoleHandler) # Add handler to logger # Define the handler and formatter for file logging if logfile is not None: fileHandler = logging.FileHandler(f'{logs_dir}/{logfile}.log') # Define FileHandler fileHandler.setLevel(logging.WARNING) # Set level fileFormatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s') # Define formatter fileHandler.setFormatter(fileFormatter) # Set formatter logger.addHandler(fileHandler) # Add handler to logger @functools.wraps(func) def wrapper_decorator(*args, **kwargs): # Before running func #logger.debug(f"{func.__name__} - {args} - {kwargs}") logger.debug(f"{func.__name__}({args}, {kwargs})") try: output = func(*args,**kwargs) except: logger.exception(f"{func.__name__}({args}, {kwargs})") # After running func return output return wrapper_decorator return Logging_decorator
5,344,808
def find_winning_dates(placed_bets, winning_date): """ Finds the placed bets with the dates closest to the winning date :param placed_bets: iterable of PlacedDateBet :param winning_date: datetime.date :return: list of winning PlacedDateBets """ from datetime import date from .models import PlacedDateBet assert isinstance(winning_date, date) dates = [] for placed_bet in placed_bets: assert isinstance(placed_bet, PlacedDateBet) dates.append(placed_bet.placed_date) timedeltas = [] for date in dates: timedeltas.append(abs(winning_date - date)) closest = min(timedeltas) indices = [] for i in range(0, len(timedeltas)): if timedeltas[i] == closest: indices.append(i) winning_bets = [] for index in indices: winning_bets.append(placed_bets[index]) return winning_bets
5,344,809
def create_greedy_policy(Q): """ Creates a greedy policy based on Q values. Args: Q: A dictionary that maps from state -> action values Returns: A function that takes an observation as input and returns a vector of action probabilities. """ def policy_fn(observation): a_probs = np.zeros(len(Q[observation])) a_probs[np.argmax(Q[observation])] = 1.0 return a_probs return policy_fn
5,344,810
def delay_data_inputflag_future(delay_data_inputflag_future_main): """Make function level future shape delay uvcal object.""" delay_object = delay_data_inputflag_future_main.copy() yield delay_object del delay_object
5,344,811
def get_tags(rule, method, **options): """ gets the valid tags for given rule. :param pyrin.api.router.handlers.base.RouteBase rule: rule instance to be processed. :param str method: http method name. :rtype: list[str] """ return get_component(SwaggerPackage.COMPONENT_NAME).get_tags(rule, method, **options)
5,344,812
def register_engines(): """Register engines.""" for filename in os.listdir(os.path.join(os.path.dirname(__file__))): if not os.path.isfile(os.path.join(os.path.dirname(__file__), filename)): continue if filename == '__init__.py': continue try: importlib.import_module( os.path.join('pydtk.db.v3.search_engines', os.path.splitext(filename)[0]).replace(os.sep, '.') ) except ModuleNotFoundError: logging.debug('Failed to load handlers in {}'.format(filename))
5,344,813
def import_module_set_env(import_dict): """ https://stackoverflow.com/questions/1051254/check-if-python-package-is-installed Safely imports a module or package and sets an environment variable if it imports (or is already imported). This is used in the main function for checking whether or not `cupy` is installed. If it is not installed, then options for cuda-enabled functions will be greyed out. """ for key in import_dict: if key in sys.modules: os.environ[import_dict[key]] = "True" pass elif (spec := importlib.util.find_spec(key)) is not None: module = importlib.util.module_from_spec(spec) sys.modules[key] = module spec.loader.exec_module(module) os.environ[import_dict[key]] = "True" else: os.environ[import_dict[key]] = "False" pass
5,344,814
async def test_config_no_update(ismartgateapi_mock, hass: HomeAssistant) -> None: """Test config setup where the data is not updated.""" api = MagicMock(GogoGate2Api) api.async_info.side_effect = Exception("Error") ismartgateapi_mock.return_value = api config_entry = MockConfigEntry( domain=DOMAIN, source=SOURCE_USER, data={ CONF_DEVICE: DEVICE_TYPE_ISMARTGATE, CONF_IP_ADDRESS: "127.0.0.1", CONF_USERNAME: "admin", CONF_PASSWORD: "password", }, ) config_entry.add_to_hass(hass) assert not await hass.config_entries.async_setup(entry_id=config_entry.entry_id) await hass.async_block_till_done() assert config_entry.data == { CONF_DEVICE: DEVICE_TYPE_ISMARTGATE, CONF_IP_ADDRESS: "127.0.0.1", CONF_USERNAME: "admin", CONF_PASSWORD: "password", }
5,344,815
def get_paybc_transaction_request(): """Return a stub payment transaction request.""" return { 'clientSystemUrl': 'http://localhost:8080/abcd', 'payReturnUrl': 'http://localhost:8081/xyz' }
5,344,816
def hid_exit(): """Clean up hidapi library resources. Arguments: None Returns: None""" global __hidapi assert __hidapi is not None if __hidapi.hid_exit() != 0: raise RuntimeError('hid_exit() failed.')
5,344,817
def dropout_forward(x, dropout_param): """ Performs the forward pass for (inverted) dropout. Inputs: - x: Input data, of any shape - dropout_param: A dictionary with the following keys: - p: Dropout parameter. We drop each neuron output with probability p. - mode: 'test' or 'train'. If the mode is train, then perform dropout; if the mode is test, then just return the input. - seed: Seed for the random number generator. Passing seed makes this function deterministic, which is needed for gradient checking but not in real networks. Outputs: - out: Array of the same shape as x. - cache: A tuple (dropout_param, mask). In training mode, mask is the dropout mask that was used to multiply the input; in test mode, mask is None. """ p, mode = dropout_param['p'], dropout_param['mode'] if 'seed' in dropout_param: np.random.seed(dropout_param['seed']) mask = None out = None if mode == 'train': ########################################################################### # TODO: Implement the training phase forward pass for inverted dropout. # # Store the dropout mask in the mask variable. # ########################################################################### mask = (np.random.rand(*x.shape)<(1-p))/(1-p) out = x*mask ########################################################################### # END OF YOUR CODE # ########################################################################### elif mode == 'test': ########################################################################### # TODO: Implement the test phase forward pass for inverted dropout. # ########################################################################### out = x ########################################################################### # END OF YOUR CODE # ########################################################################### cache = (dropout_param, mask) out = out.astype(x.dtype, copy=False) return out, cache
5,344,818
def cholesky(a: Union[numpy.ndarray, pandas.core.frame.DataFrame, List[List[float]]]): """ usage.scipy: 5 usage.statsmodels: 48 """ ...
5,344,819
def pretreatment(filename): """pretreatment""" poems = [] file = open(filename, "r") for line in file: #every line is a poem #print(line) title, poem = line.strip().split(":") #get title and poem poem = poem.replace(' ','') if '_' in poem or '《' in poem or '[' in poem or '(' in poem or '(' in poem: continue if len(poem) < 10 or len(poem) > 128: #filter poem continue poem = '[' + poem + ']' #add start and end signs poems.append(poem) print("唐诗总数: %d"%len(poems)) #counting words allWords = {} for poem in poems: for word in poem: if word not in allWords: allWords[word] = 1 else: allWords[word] += 1 #''' # erase words which are not common erase = [] for key in allWords: if allWords[key] < 2: erase.append(key) for key in erase: del allWords[key] #''' wordPairs = sorted(allWords.items(), key = lambda x: -x[1]) words, a= zip(*wordPairs) #print(words) words += (" ", ) wordToID = dict(zip(words, range(len(words)))) #word to ID wordTOIDFun = lambda A: wordToID.get(A, len(words)) poemsVector = [([wordTOIDFun(word) for word in poem]) for poem in poems] # poem to vector #print(poemsVector) #padding length to batchMaxLength batchNum = (len(poemsVector) - 1) // batchSize X = [] Y = [] #create batch for i in range(batchNum): batch = poemsVector[i * batchSize: (i + 1) * batchSize] maxLength = max([len(vector) for vector in batch]) temp = np.full((batchSize, maxLength), wordTOIDFun(" "), np.int32) for j in range(batchSize): temp[j, :len(batch[j])] = batch[j] X.append(temp) temp2 = np.copy(temp) #copy!!!!!! temp2[:, :-1] = temp[:, 1:] Y.append(temp2) return X, Y, len(words) + 1, wordToID, words
5,344,820
def forward_fdm(order, deriv, adapt=1, **kw_args): """Construct a forward finite difference method. Further takes in keyword arguments of the constructor of :class:`.fdm.FDM`. Args: order (int): Order of the method. deriv (int): Order of the derivative to estimate. adapt (int, optional): Number of recursive calls to higher-order derivatives to dynamically determine the step size. Defaults to `1`. Returns: :class:`.fdm.FDM`: The desired finite difference method. """ return FDM( list(range(order)), deriv, bound_estimator=_construct_bound_estimator( forward_fdm, order, adapt, **kw_args ), **kw_args )
5,344,821
def lrp_linear_torch(hin, w, b, hout, Rout, bias_nb_units, eps, bias_factor=0.0, debug=False): """ LRP for a linear layer with input dim D and output dim M. Args: - hin: forward pass input, of shape (D,) - w: connection weights, of shape (D, M) - b: biases, of shape (M,) - hout: forward pass output, of shape (M,) (unequal to np.dot(w.T,hin)+b if more than one incoming layer!) - Rout: relevance at layer output, of shape (M,) - bias_nb_units: total number of connected lower-layer units (onto which the bias/stabilizer contribution is redistributed for sanity check) - eps: stabilizer (small positive number) - bias_factor: set to 1.0 to check global relevance conservation, otherwise use 0.0 to ignore bias/stabilizer redistribution (recommended) Returns: - Rin: relevance at layer input, of shape (D,) """ sign_out = torch.where(hout.cpu() >= 0 , torch.Tensor([1.]), torch.Tensor([-1.])).view(1,-1) # shape (1, M) numer = (w * hin.view(-1,1)).cpu() + ( bias_factor * (b.view(1,-1)*1. + eps*sign_out*1.) / bias_nb_units ) # shape (D, M) # Note: here we multiply the bias_factor with both the bias b and the stabilizer eps since in fact # using the term (b[na,:]*1. + eps*sign_out*1.) / bias_nb_units in the numerator is only useful for sanity check # (in the initial paper version we were using (bias_factor*b[na,:]*1. + eps*sign_out*1.) / bias_nb_units instead) denom = hout.view(1,-1) + (eps*sign_out*1.) # shape (1, M) message = (numer/denom) * Rout.view(1,-1) # shape (D, M) Rin = message.sum(axis=1) # shape (D,) if debug: print("local diff: ", Rout.sum() - Rin.sum()) # Note: # - local layer relevance conservation if bias_factor==1.0 and bias_nb_units==D (i.e. when only one incoming layer) # - global network relevance conservation if bias_factor==1.0 and bias_nb_units set accordingly to the total number of lower-layer connections # -> can be used for sanity check return Rin
5,344,822
def create_resource_group(resource_client, resource_group, location): # type: (azure.mgmt.resource.resources.ResourceManagementClient, # str, str) -> None """Create a resource group if it doesn't exist :param azure.mgmt.resource.resources.ResourceManagementClient resource_client: resource client :param str resource_group: resource group name :param str location: location """ # check if resource group exists exists = resource_client.resource_groups.check_existence(resource_group) # create resource group if it doesn't exist if not exists: logger.info('creating resource group: {}'.format(resource_group)) resource_client.resource_groups.create_or_update( resource_group_name=resource_group, parameters=rgmodels.ResourceGroup( location=location, ) ) else: logger.debug('resource group {} exists'.format(resource_group))
5,344,823
def get_list_by_ingredient(ingredient): """ this should return data for filtered recipes by ingredient """ res = requests.get(f'{API_URL}/{API_KEY}/filter.php', params={"i":ingredient}) return res.json()
5,344,824
def test_get_account_balance_invalid_node_url(): """ Case: get a balance of an account by passing an invalid node URL. Expect: the following node URL is invalid error message. """ invalid_node_url = 'domainwithoutextention' runner = CliRunner() result = runner.invoke(cli, [ 'account', 'get-balance', '--address', '1120076ecf036e857f42129b58303bcf1e03723764a1702cbe98529802aad8514ee3cf', '--node-url', invalid_node_url, ]) expected_error = { 'errors': { 'node_url': [ f'The following node URL `{invalid_node_url}` is invalid.', ], }, } assert FAILED_EXIT_FROM_COMMAND_CODE == result.exit_code assert dict_to_pretty_json(expected_error) in result.output
5,344,825
def platform_config_update(config): """ Update configuration for the remote platform @param config The configuration dictionary to use/update """ remote_port_map = build_ifaces_map(get_ifaces()) config["port_map"] = remote_port_map.copy() config["caps_table_idx"] = 0
5,344,826
def chk_sudo(): """\ Type: decorator. The command will only be able to be executed by the author if the author is owner or have permissions. """ async def predicate(ctx): if is_sudoers(ctx.author): return True await ctx.message.add_reaction("🛑") raise excepts.NotMod() return commands.check(predicate)
5,344,827
def have_same_items(list1, list2): """ Проверяет состоят ли массивы list1 и list2 из одинакового числа одних и тех же элементов Parameters ---------- list1 : list[int] отсортированный по возрастанию массив уникальных элементов list2 : list[int] массив произвольной длинны произвольных чисел Returns ------- bool """ return True
5,344,828
def main( consumer_key: str, consumer_secret: str, access_token: str, access_token_secret: str, bearer_token: str, hashtags: frozenset[str] = frozenset( [ "binit", "binthis", "bintweet", "junktweet", "rmit", "rmthis", "rmme", "rmtweet", ] ), ) -> None: """[Remove (bin) tweet after specified time from certain hashtag. Any tweets with specified hashtag beyond 5 days will be removed.] Args: consumer_key (str): Twitter API (consumer) key. Get from https://developer.twitter.com/en/portal/projects. consumer_secret (str): Twitter API (consumer) secret key. Get from https://developer.twitter.com/en/portal/projects. access_token (str): Twitter access token key. Get from https://developer.twitter.com/en/portal/projects. access_token_secret (str): Twitter access token secret key. Get from https://developer.twitter.com/en/portal/projects. bearer_token (str): Twitter bearer token. Get from https://developer.twitter.com/en/portal/projects. hashtags (frozenset[str]): A set of distinct hashtag that will be used to indicate which tweets to be removed (bin). """ utcnow = datetime.now(timezone.utc) logger.info(f"bintweet starting...") logger.info(f"authenticating...") auth = tweepy.OAuthHandler(consumer_key, consumer_secret) auth.set_access_token(access_token, access_token_secret) api = tweepy.API(auth) me = api.me() logger.info(f"authentication is successful.") logger.bind(sensitive=True).info( "screen name: {screen_name}", screen_name=me.screen_name ) regex = re.compile( fr"#after(?P<{NUMBER}>\d)(?P<{UNIT}>[{UNIT_DAYS}{UNIT_HOURS}{UNIT_MINUTES}{UNIT_SECONDS}])" ) for hashtag in hashtags: logger.info(f"hashtag: {hashtag}") query = f"query={hashtag} (from:{me.screen_name})" tweet_fields = "tweet.fields=text,created_at" max_results = "max_results=100" url = f"https://api.twitter.com/2/tweets/search/recent?{query}&{tweet_fields}&{max_results}" headers = {"Authorization": "Bearer {}".format(bearer_token)} response = requests.request("GET", url, headers=headers).json() for tweet in response.get("data") or []: result = regex.search(tweet["text"]) logger.debug(tweet["id"]) if result: number = int(result.groupdict()[NUMBER]) unit = result.groupdict()[UNIT] if unit == UNIT_HOURS: if number > NUMBER_MAX_HOURS: number = NUMBER_MAX_HOURS removeDateTime = iso8601.parse_date( tweet["created_at"] ) + timedelta(hours=number) if utcnow > removeDateTime: logger.info(f"remove tweet {tweet['id']}") api.destroy_status(tweet["id"]) continue elif unit == UNIT_DAYS: if number > NUMBER_MAX_DAYS: number = NUMBER_MAX_DAYS removeDateTime = iso8601.parse_date( tweet["created_at"] ) + timedelta(days=number) if utcnow > removeDateTime: logger.info(f"remove tweet {tweet['id']}") api.destroy_status(tweet["id"]) continue elif unit == UNIT_MINUTES: if number > NUMBER_MAX_MINUTES: number = NUMBER_MAX_MINUTES removeDateTime = iso8601.parse_date( tweet["created_at"] ) + timedelta(minutes=number) if utcnow > removeDateTime: logger.info(f"remove tweet {tweet['id']}") api.destroy_status(tweet["id"]) continue elif unit == UNIT_SECONDS: if number > NUMBER_MAX_SECONDS: number = NUMBER_MAX_SECONDS removeDateTime = iso8601.parse_date( tweet["created_at"] ) + timedelta(seconds=number) if utcnow > removeDateTime: logger.info(f"remove tweet {tweet['id']}") api.destroy_status(tweet["id"]) continue # TODO check if tweets is older than 5 days removeDateTime = iso8601.parse_date( tweet["created_at"] ) + timedelta(days=NUMBER_MAX_DAYS) if utcnow > removeDateTime: logger.info(f"remove tweet {tweet['id']}") api.destroy_status(tweet["id"])
5,344,829
def analyze(geometry_filenames, mode='global', training_info=None, stride=None, box_size=None, configs=None, descriptor=None, model=None, format_=None, descriptors=None, save_descriptors=False, save_path_descriptors=None, nb_jobs=-1, **kwargs): """ Apply ARISE to given list of geometry files. This function is key to reproduce the single- and polycrystalline predictions in: [1] A. Leitherer, A. Ziletti, and L.M. Ghiringhelli, arXiv ??? (2021). Parameters: gometry_filenames: list list of geometry files to be analyzed. mode: str (default='global') If 'global', a global descriptor will be calculated and a global label (plus uncertainty) predicted. If 'local', the strided pattern matching algorithm introduced in [1] is applied. stride: float (default=None) Step size in strided pattern matching algorithm. Only relevant if mode='local'. If no value is specified, a stride of 4 Angstroem in each direction, for each of the geometry files is used. box_size: float (default=None) Size of the box employed in strided pattern matching algorithm. Only relevant if mode='local'. If no value is specified, a box size of 16 Angstroem is used, for each of the geometry files. configs: object (default=None) configuration object, defining folder structure. For more details, please have a look at the function set_configs from ai4materials.utils.utils_config descriptor: ai4materials descriptor object (default=None) If None, the SOAP descriptor as implemented in the quippy package (see ai4materials.descritpors.quippy_soap_descriptor) with the standard settings employed in [1] will be used. model: str, (default=None) If None, the model of [1] will be automatically loaded. Otherwise the path to the model h5 file needs to be specified alongside information on the training set (in particular, the relation between integer class labels and class labels). training_info: path to dict (default=None) Information on the realtion between int labels and structure labels. If model=None, training information of [1] will be loaded regardless of this keyword. If model not None, then specification of training_info is mandatory. The structure of this dictionary is defined as dict = {'data': ['nb_classes': 108, 'classes': [text label class 0, text label class 1, ... ie ordered class labels]]} format_: str, optional (default=None) format of geometry files. If not specified, the input files are assumed to have aims format in case of global mode, and xyz format in case of local mode. descriptors: path to desc or numpy array, optional (default=None) If mode=local, then this must be a path to a desc file containing the descriptors. If mode=global, then this must be a numpy array containing the descriptors. save_descriptors: bool, optional (default=False) Decides whether to save calculated descriptors into specified savepath or not (only for mode=local). save_path_descriptors: str, optional (default=None) path into which descriptors are saved (for mode=global) """ if not model == None: if training_info == None: raise ValueError("No information on the relation between int and str class labels is provided.") #if not (type(model) == str or type(model)==keras.engine.training.Model): # raise NotImplementedError("Either specifiy path or model loaded from h5 via keras.models.load_model") if stride == None: stride = [[4.0, 4.0, 4.0] for _ in range(len(geometry_filenames))] if box_size == None: box_size = [16.0 for _ in range(len(geometry_filenames))] if format_ == None: if mode == 'global': format_ = 'aims' elif mode == 'local': format_ = 'xyz' if not model == None: try: model_file_ending = model.split('.')[1] if not model_file_ending == '.h5': raise NotImplementedError("Model path must link to h5 file.") except: raise ValueError("Model must be a path to a h5 file or None. In the latter case, a pretrained model is loaded.") if mode == 'global': predictions, uncertainty = global_(geometry_filenames, descriptor=descriptor, model=model, format_=format_, descriptors=descriptors, save_descriptors=save_descriptors, save_path_descriptors=save_path_descriptors, **kwargs) elif mode == 'local': predictions, uncertainty = local(geometry_filenames, stride, box_size, configs, descriptor=descriptor, model=model, format_=format_, desc_filename=descriptors, nb_jobs=nb_jobs, **kwargs) else: raise ValueError("Argument 'mode' must either be 'local' or 'global'.") return predictions, uncertainty
5,344,830
def get_params(name, seed): """Some default parameters. Note that this will initially include training parameters that you won't need for metalearning since we have our own training loop.""" configs = [] overrides = {} overrides["dataset_reader"] = {"lazy": True} configs.append(Params(overrides)) configs.append( Params({"random_seed": seed, "numpy_seed": seed, "pytorch_seed": seed}) ) configs.append(Params.from_file("config/ud/en/udify_bert_finetune_en_ewt.json")) configs.append(Params.from_file("config/udify_base.json")) return util.merge_configs(configs)
5,344,831
def rgb2hex(rgb): """Converts an RGB 3-tuple to a hexadeximal color string. EXAMPLE ------- >>> rgb2hex((0,0,255)) '#0000FF' """ return ('#%02x%02x%02x' % tuple(rgb)).upper()
5,344,832
def path_regex( path_regex: Union[str, re.Pattern], *, disable_stage_removal: Optional[bool] = False ): """Validate the path in the event against the given path pattern. The following APIErrorResponse subclasses are used: PathNotFoundError: When the path doesn't match. Args: path: A regular expression to validate against. disable_stage_removal (bool): Preserve the original path with stage. """ return _get_decorator( validate_path_regex, path_regex=path_regex, disable_stage_removal=disable_stage_removal, update_event=True, )
5,344,833
async def root(): """Health check""" return {"status": "OK"}
5,344,834
def build_foreign_keys( resources: Dict[str, dict], prune: bool = True, ) -> Dict[str, List[dict]]: """Build foreign keys for each resource. A resource's `foreign_key_rules` (if present) determines which other resources will be assigned a foreign key (`foreign_keys`) to the reference's primary key: * `fields` (List[List[str]]): Sets of field names for which to create a foreign key. These are assumed to match the order of the reference's primary key fields. * `exclude` (Optional[List[str]]): Names of resources to exclude. Args: resources: Resource descriptors by name. prune: Whether to prune redundant foreign keys. Returns: Foreign keys for each resource (if any), by resource name. * `fields` (List[str]): Field names. * `reference['resource']` (str): Reference resource name. * `reference['fields']` (List[str]): Reference resource field names. Examples: >>> resources = { ... 'x': { ... 'schema': { ... 'fields': ['z'], ... 'primary_key': ['z'], ... 'foreign_key_rules': {'fields': [['z']]} ... } ... }, ... 'y': { ... 'schema': { ... 'fields': ['z', 'yy'], ... 'primary_key': ['z', 'yy'], ... 'foreign_key_rules': {'fields': [['z', 'zz']]} ... } ... }, ... 'z': {'schema': {'fields': ['z', 'zz']}} ... } >>> keys = build_foreign_keys(resources) >>> keys['z'] [{'fields': ['z', 'zz'], 'reference': {'resource': 'y', 'fields': ['z', 'yy']}}] >>> keys['y'] [{'fields': ['z'], 'reference': {'resource': 'x', 'fields': ['z']}}] >>> keys = build_foreign_keys(resources, prune=False) >>> keys['z'][0] {'fields': ['z'], 'reference': {'resource': 'x', 'fields': ['z']}} """ tree = _build_foreign_key_tree(resources) keys = {} for name in tree: firsts = [] followed = [] for fields in tree[name]: path = _traverse_foreign_key_tree(tree, name, fields) firsts.append(path[0]) followed.extend(path[1:]) keys[name] = firsts if prune: # Keep key if not on path of other key keys[name] = [key for key in keys[name] if key not in followed] return keys
5,344,835
def port_scan(ip): """Run a scan to determine what services are responding. Returns nmap output in JSON format. """ # validate input valid_ip = ipaddress.ip_address(ip) # nnap requires a `-6` option if the target is IPv6 v6_flag = '-6 ' if valid_ip.version == 6 else '' nmap_command = f'sudo nmap {v6_flag}{valid_ip} --stats-every 60 -oX - ' \ '-R -Pn -T4 --host-timeout 120m --max-scan-delay 5ms ' \ '--max-retries 2 --min-parallelism 32 ' \ '--defeat-rst-ratelimit -sV -O -sS -p1-65535' completed_process = run_it(nmap_command) xml_string = completed_process.stdout.decode() data = bf.data(fromstring(xml_string)) return data
5,344,836
def test_quoted_digits(): """ Any value that is composed entirely of digits should be quoted for safety. CloudFormation is happy for numbers to appear as strings. But the opposite (e.g. account numbers as numbers) can cause issues See https://github.com/awslabs/aws-cfn-template-flip/issues/41 """ value = dump_json(ODict(( ("int", 123456), ("float", 123.456), ("oct", "0123456"), ("bad-oct", "012345678"), ("safe-oct", "0o123456"), ("string", "abcdef"), ))) expected = "\n".join(( "int: 123456", "float: 123.456", "oct: '0123456'", "bad-oct: '012345678'", "safe-oct: '0o123456'", "string: abcdef", "" )) actual = cfn_flip.to_yaml(value) assert actual == expected
5,344,837
def create_table(p, table_name, schema): """Create a new Prism table. Parameters ---------- p : Prism Instantiated Prism class from prism.Prism() table_name : str The name of the table to obtain details about. If the default value of None is specified, details regarding first 100 tables is returned. schema : list A list of dictionaries containing the schema Returns ------- If the request is successful, a dictionary containing information about the table is returned. """ p.create_bearer_token() table = p.create_table(table_name, schema=schema) return table
5,344,838
def get_args(): """ Parses and processes args, returning the modified arguments as a dict. This is to maintain backwards compatibility with the old of parsing arguments. """ parser = make_parser() args = parser.parse_args() process_args(args) return vars(args)
5,344,839
def run_asm_pprinter(ir: gtirb.IR, args: Iterable[str] = ()) -> str: """ Runs the pretty-printer to generate an assembly output. :param ir: The IR object to print. :param args: Any additional arguments for the pretty printer. :returns: The assembly string. """ asm, _ = run_asm_pprinter_with_outputput(ir, args) return asm
5,344,840
def fix_ccdsec(hdu): """ Fix CCDSEC keywords in image extensions """ section_regexp = re.compile(SECTION_STRING) # In unbinned space ccdsec = _get_key_value(hdu, 'CCDSEC') detsec = _get_key_value(hdu, 'DETSEC') if None in [ccdsec, detsec]: raise ValueError("CCDSEC {}; detsec {}".format(ccdsec, detsec)) updated = False ccd_coords = list(section_regexp.match(ccdsec).groups()) detector_coords = list(section_regexp.match(detsec).groups()) # Y coordinates should match! if ccd_coords[2:4] != detector_coords[2:4]: raise ValueError("Y values: {} {}".format(ccdsec, detsec)) # X coordinates maybe wrong if ccd_coords[0:2] != detector_coords[0:2]: for i, x in enumerate(detector_coords[0:2]): offset_x = int(x) - CCDWIDTH if offset_x <= 0: if ccd_coords[i] != detector_coords[i]: # Use DETSEC ccd_coords[i] = detector_coords[i] updated = True else: # Reset offset to x offset_x = x elif offset_x > CCDWIDTH: updated = True offset_x -= CCDWIDTH # update ccd_coords ccd_coords[i] = offset_x # Reset CCDSEC ccdsec = "[{}:{},{}:{}]".format(ccd_coords[0], ccd_coords[1], ccd_coords[2], ccd_coords[3]) hdu.header['CCDSEC'] = ccdsec return updated
5,344,841
def reload(hdf): """Reload a hdf file, hdf = reload(hdf)""" filename = hdf.filename return load(filename)
5,344,842
def test__add_obscon_as_predictors() -> None: """ Test `hsr4hci.training.add_obscon_as_predictors`. """ np.random.seed(42) # Create an expected signal expected_signal = np.zeros(100) expected_signal[50] = 1 for i in range(5): expected_signal = np.convolve( expected_signal, np.ones(3) / 3, mode='same' ) expected_signal = expected_signal.reshape(100, 1) # Case 1: empty obscon array predictors = np.random.normal(0, 1, (100, 10)) obscon_array = np.empty((100, 0)) final_predictors = add_obscon_as_predictors( predictors=predictors, obscon_array=obscon_array, expected_signal=expected_signal, max_correlation=0.5, ) assert np.allclose(predictors, final_predictors) # Case 2: all values are the same for expected signal predictors = np.random.normal(0, 1, (100, 10)) obscon_array = np.empty((100, 0)) final_predictors = add_obscon_as_predictors( predictors=predictors, obscon_array=obscon_array, expected_signal=np.ones(100), max_correlation=0.5, ) assert np.allclose(predictors, final_predictors) # Case 3: any NaN-values in the expected signal predictors = np.random.normal(0, 1, (100, 10)) obscon_array = np.empty((100, 0)) final_predictors = add_obscon_as_predictors( predictors=predictors, obscon_array=obscon_array, expected_signal=np.full((100, ), np.nan), max_correlation=0.5, ) assert np.allclose(predictors, final_predictors) # Case 4: perfect correlation predictors = np.random.normal(0, 1, (100, 10)) final_predictors = add_obscon_as_predictors( predictors=predictors, obscon_array=expected_signal, expected_signal=expected_signal, max_correlation=0.5, ) assert np.allclose(predictors, final_predictors) # Case 5: one OC is too strongly correlated predictors = np.random.normal(0, 1, (100, 10)) obscon_array = np.hstack( [ np.random.normal(0, 1, (100, 1)), np.random.normal(0, 1, (100, 1)), np.random.normal(0, 0.05, (100, 1)) + expected_signal, np.random.normal(0, 0.10, (100, 1)) + expected_signal, ] ) final_predictors = add_obscon_as_predictors( predictors=predictors, obscon_array=obscon_array, expected_signal=expected_signal, max_correlation=0.5, ) assert final_predictors.shape[1] == predictors.shape[1] + 3
5,344,843
def test_is_testrun_available(api_client, tr_plugin): """ Test of method `is_testrun_available` """ tr_plugin.testrun_id = 100 api_client.send_get.return_value = {'is_completed': False} assert tr_plugin.is_testrun_available() is True api_client.send_get.return_value = {'error': 'An error occured'} assert tr_plugin.is_testrun_available() is False api_client.send_get.return_value = {'is_completed': True} assert tr_plugin.is_testrun_available() is False
5,344,844
def read_image(im_name, n_channel, data_dir='', batch_size=1, rescale=None): """ function for create a Dataflow for reading images from a folder This function returns a Dataflow object for images with file name containing 'im_name' in directory 'data_dir'. Args: im_name (str): part of image names (i.e. 'jpg' or 'im_'). n_channel (int): number of channels (3 for color images and 1 for grayscale images) data_dir (str): directory of images batch_size (int): number of images read from Dataflow for each batch rescale (bool): whether rescale image to 224 or not Returns: Image (object): batch images can be access by Image.next_batch_dict()['image'] """ def rescale_im(im, short_side=416): """ Pre-process for images images are rescaled so that the shorter side = 224 """ im = np.array(im) h, w = im.shape[0], im.shape[1] if h >= w: new_w = short_side im = imagetool.rescale_image(im, (int(h * new_w / w), short_side)) # im = skimage.transform.resize( # im, (int(h * new_w / w), short_side), preserve_range=True) else: new_h = short_side im = imagetool.rescale_image(im, (short_side, int(w * new_h / h))) # im = skimage.transform.resize( # im, (short_side, int(w * new_h / h)), preserve_range=True) # return im.astype('uint8') return im def normalize_im(im, *args): im = imagetool.rescale_image(im, rescale) # im = skimage.transform.resize( # im, rescale, preserve_range=True) # im = rescale_im(im, short_side=rescale) im = np.array(im) if np.amax(im) > 1: im = im / 255. return np.clip(im, 0., 1.) # if rescale: # pf_fnc = rescale_im # else: # pf_fnc = normalize_im if isinstance(rescale, int): rescale = [rescale, rescale] else: assert len(rescale) == 2 image_data = Image( im_name=im_name, data_dir=data_dir, n_channel=n_channel, shuffle=False, batch_dict_name=['image', 'shape'], pf_list=(normalize_im,())) image_data.setup(epoch_val=0, batch_size=batch_size) return image_data
5,344,845
def build_optimizer(config, model): """ Build optimizer, set weight decay of normalization to 0 by default. """ skip = {} skip_keywords = {} if hasattr(model, 'no_weight_decay'): skip = model.no_weight_decay() if hasattr(model, 'no_weight_decay_keywords'): skip_keywords = model.no_weight_decay_keywords() parameters = set_weight_decay(model, skip, skip_keywords) opt_lower = config.TRAIN.OPTIMIZER.NAME.lower() optimizer = None if opt_lower == 'sgd': optimizer = optim.SGD(parameters, momentum=config.TRAIN.OPTIMIZER.MOMENTUM, nesterov=True, lr=config.TRAIN.BASE_LR, weight_decay=config.TRAIN.WEIGHT_DECAY) elif opt_lower == 'adamw': optimizer = optim.AdamW(parameters, eps=config.TRAIN.OPTIMIZER.EPS, betas=config.TRAIN.OPTIMIZER.BETAS, lr=config.TRAIN.BASE_LR, weight_decay=config.TRAIN.WEIGHT_DECAY) return optimizer
5,344,846
def do_monitor_tcp_check_list(client, args): """ List monitor items """ kwargs = {} if args.tenant: kwargs['tenant'] = args.tenant if args.system: kwargs['is_system'] = True monitors = client.tcpcheck.list(**kwargs) utils.print_list(monitors, client.tcpcheck.columns)
5,344,847
def publish() -> None: """Publish project.""" check_version() raise NotImplementedError("Publishing hasn't been implemented yet.")
5,344,848
def powderrays(dlist, ki=None, phi=None): """Calculate powder ray positions.""" instr = session.instrument if ki is None: mono = instr._attached_mono ki = to_k(mono.read(), mono.unit) for line in check_powderrays(ki, dlist, phi): session.log.info(line)
5,344,849
def ising2d_worm(T_range, mcsteps, L): """T = temperature [K]; L = Length of grid.""" def new_head_position(worm, lattice): """ Extract current worm head position indices, then randomly set new worm head position index. lattice.occupied points to either lattice.bonds_x or lattice.bonds_y. """ [i, j] = worm.head bond_type = np.random.randint(1, worm.q) direction = ["Up", "Down", "Left", "Right"][np.random.randint(0, 4)] if direction=="Right": # use current indices to check for bond bond = [i, j] site = [0 if i==L-1 else i+1, j] lattice.bond_idx = 0 elif direction=="Left": # use new indices to check for bond site = [L-1 if i==0 else i-1, j] bond = [site[0], site[1]] lattice.bond_idx = 0 elif direction=="Up": # use current indices to check for bond bond = [i, j] site = [i, 0 if j==L-1 else j+1] lattice.bond_idx = 1 elif direction=="Down": # use new indices to check for bond site = [i, L-1 if j==0 else j-1] bond = [site[0], site[1]] lattice.bond_idx = 1 return bond, bond_type, site, lattice def accept_movement(current_bond, bond_type, temperature): """ Bond creation/deletion using Boltzman factor. Bonds are always deleted since 1/exp(-2/T) > 1 for all T>0. """ if current_bond: if current_bond==bond_type: # new_bond = 0 will delete the current bond accept_probability, new_bond = 1, 0 else: accept_probability, new_bond = 1-np.exp(-2/temperature), 0 else: accept_probability, new_bond = np.exp(-2/temperature), bond_type accept_move = True if np.random.rand()<accept_probability else False """ if current_bond==bond_type: accept_probability, new_bond = 1, 0 else: accept_probability, new_bond = np.exp(-2/temperature), bond_type accept_move = True if np.random.rand()<accept_probability else False """ return accept_move, new_bond def monte_carlo_step(lattice, worm, temperature): """ Since the lattice matrix is indexed as [column, row], we need to input the i, j indices in reversed order, as lattice.bond.occupied[j, i]. Measured quantities per step: Nb_step = number of bonds per step. G_micro = 2pt correlation function per micro_step corresponding to the partition function of the worm algorithm for the 2D Ising model. G_step = 2pt correlation function per step corresponding to the partition function of the metropolis algorithm for the 2D Ising model. * Note that G_micro(|i-j|) == G_step(|i-j|) when |i-j|=0. """ Nb_step = np.zeros((2)) G_micro, G_step = np.zeros((L+1)), np.zeros((L+1)) G_step_bool = np.zeros((L+1), dtype=bool) for micro_step in range(2*L**2): # propose head movement; [i, j] = new bond indices. [i, j], bond_type, new_site, lattice = new_head_position(worm, lattice) accept_move, bond_type = accept_movement(lattice.bonds[lattice.bond_idx, j, i], bond_type, temperature) if accept_move: # move worm head and either change or delete the bond. lattice.bonds[lattice.bond_idx, j, i] = bond_type worm.head = new_site # Update correlation function every microstep. diameter = worm.Diameter() G_micro[diameter] += 1 G_step_bool[diameter] = True if np.all(worm.head==worm.tail): # measure observables and reset worm when path is closed. G_step[G_step_bool] += 1 G_step_bool[:] = False B=(lattice.bonds>0).sum() Nb_step += B, B**2 worm.ResetPosition() return lattice, worm, G_micro, G_step, Nb_step # initialize main structures. print('Initializing Worm Algorithm.') observables = Observables(q, L, T_range, mcsteps) lattice = Lattice(L) worm = Worm(q, L) # correlation, correlation2, and bond_number each act as a pointer. correlation = observables.correlation #relates to G_micro correlation2 = observables.correlation2 #relates to G_step bond_number = observables.mean_bonds #relates to Nb_step print('Starting thermalization cycle ...') for step in range(int(mcsteps/5)): lattice, worm, G_micro, G_step, Nb_step = monte_carlo_step(lattice, worm, T_range[0]) print('Starting measurement cycle ...') for T_idx, T in enumerate(T_range): print(" ", "Running temperature =", T, "...") for step in range(mcsteps): lattice, worm, G_micro, G_step, Nb_step = monte_carlo_step(lattice, worm, T) # sum observables correlation[:, T_idx] += G_micro correlation2[:, T_idx] += G_step bond_number[:, T_idx] += Nb_step # average and store observables. observables.AverageObservables() print('Simulation Complete!') return observables, lattice, worm
5,344,850
def rcrgb_plot(a,out=None) : """ Plot logg classification from bitmask """ b=bitmask.ParamBitMask() rgb=np.where((a['PARAMFLAG'][:,1] & b.getval('LOGG_CAL_RGB')) > 0)[0] rc=np.where((a['PARAMFLAG'][:,1] & b.getval('LOGG_CAL_RC')) > 0)[0] ms=np.where((a['PARAMFLAG'][:,1] & b.getval('LOGG_CAL_MS')) > 0)[0] rgb_ms=np.where((a['PARAMFLAG'][:,1] & b.getval('LOGG_CAL_RGB_MS')) > 0)[0] fig,ax = plots.multi(1,1) plots.plotp(ax,a['FPARAM'][rgb,0],a['FPARAM'][rgb,1],color='r',size=1, xr=[8000,3000],yr=[6,-1],xt='$T_{eff}$',yt='log g') plots.plotp(ax,a['FPARAM'][rc,0],a['FPARAM'][rc,1],color='b',size=1) plots.plotp(ax,a['FPARAM'][ms,0],a['FPARAM'][ms,1],color='g',size=1) plots.plotp(ax,a['FPARAM'][rgb_ms,0],a['FPARAM'][rgb_ms,1],color='m',size=1) if out is not None : fig.savefig(out+'.png') plt.close()
5,344,851
def valid_chapter_name(chapter_name): """ 判断目录名称是否合理 Args: chapter_name ([type]): [description] """ for each in ["目录"]: if each in chapter_name: return False return True
5,344,852
def lex_min(perms: Iterable[Perm]) -> Tuple[Perm, ...]: """Find the lexicographical minimum of the sets of all symmetries.""" return min(all_symmetry_sets(perms))
5,344,853
def migrate_service(cluster, service, command, success_string, timeout, region): """Run a single task based on service's task definition in AWS ECS and wait for it to stop with success.""" migrate_service_func(cluster, service, command, success_string, timeout, region)
5,344,854
def subfield(string, delim, occurrence): """ function to extract specified occurence of subfield from string using specified field delimiter eg select subfield('abc/123/xyz','/',0) returns 'abc' eg select subfield('abc/123/xyz','/',1) returns '123' eg select subfield('abc/123/xyz','/',2) returns 'xyz' eg select subfield('abc/123/xyz','/',3) returns '' """ """ # this logic matches the functions written for msql and psql, # because they do not have a string method to do this ans = '' found = 0 for ch in string: if ch == delim: found += 1 if found == occurrence + 1: break elif found == occurrence: ans += ch if not found: ans = '' # else it returns the entire string return ans """ # python does have a suitable string method, so use it if delim in string: try: return string.split(delim)[occurrence] except IndexError: # equivalent to the last example above return '' else: return ''
5,344,855
def in_hull(points, hull): """ Test if points in `p` are in `hull` `p` should be a `NxK` coordinates of `N` points in `K` dimensions `hull` is either a scipy.spatial.Delaunay object or the `MxK` array of the coordinates of `M` points in `K`dimensions for which Delaunay triangulation will be computed """ # if not isinstance(hull,Delaunay): del points['flight_name'] del points['output'] del points['TEMPS'] del hull['flight_name'] del hull['output'] del hull['TEMPS'] hull = Delaunay(hull.as_matrix()) return hull.find_simplex(points.as_matrix())>=0
5,344,856
def sample_random(X_all, N): """Given an array of (x,t) points, sample N points from this.""" set_seed(0) # this can be fixed for all N_f idx = np.random.choice(X_all.shape[0], N, replace=False) X_sampled = X_all[idx, :] return X_sampled
5,344,857
def write_text_list_to_file(text_list, writer): """ Merge a list of texts into a single text file Args: text_list: A list of texts (List[String]) writer: A file writer """ try: for text in text_list: writer.writelines(text) except Exception as e: logger.error("Error: %s", str(e))
5,344,858
def hand_rankings(class_counts): """Print a table of the probability of various hands class_counts: histogram of hand classifications """ total = sum(class_counts.values()) print("Prob ", "Classification") print("------", "--------------") for classification in PokerHand.classifications + [None]: # In looking up the format operator again, I saw that Python now prefers using # str.format to %-formatting. The stuff inside the curly braces is a format string # for the first (and only, here) argument to format. : is sort of the replacement for %. # >6 tells it to right-justify within 6 spaces, and .2% tells it to display # as a percenage, with two digits after the decimal. print("{:>6.2%}".format(class_counts.get(classification,0)/total), classification)
5,344,859
def read(db, query: Optional[dict] = None, pql: any = None, order_by: Optional[str] = None, limit: Optional[int] = None, offset: Optional[int] = None, disable_count_total: bool = False, **kwargs): """Read data from DB. Args: db (MontyCollection): DB connection query (dict or Query): Query to select items pql (PQL) Python-Query-Language to select items order_by (list): column name to sort by with format [ ( column1, 1 or -1 ), ... ] limit (int): number of items to return per a page offset (int): offset of cursor disable_count_total (bool): set True to avoid counting total number of records **kwargs: kwargs for function `pandas.read_sql_query` or `influxdb.DataFrameClient.query` Returns: (list, int): list of data and total number of records """ if limit is None: limit = 0 if offset is None: offset = 0 if pql is not None and query is not None: raise ValueError('Either query or pql can be specified') if pql: query = PQL.find(pql) if query: query = _fix_query_exists(query) if order_by is None: data = db.find(query).skip(offset).limit(limit) count_total = db.count(query) if not disable_count_total else None else: data = db.find(query).sort(order_by).skip(offset).limit(limit) count_total = db.count(query) if not disable_count_total else None else: if order_by is None: data = db.find().skip(offset).limit(limit) count_total = db.count({}) if not disable_count_total else None else: data = db.find().sort(order_by).skip(offset).limit(limit) count_total = db.count({}) if not disable_count_total else None data = list(data) count_total = count_total if count_total is not None else len(data) return data, count_total
5,344,860
def check_horizontal(board: list) -> bool: """ Function check if in each line are unique elements. It there are function return True. False otherwise. >>> check_horizontal(["**** ****",\ "***1 ****",\ "** 3****",\ "* 4 1****",\ " 9 5 ",\ " 6 83 *",\ "3 1 **",\ " 8 2***",\ " 12 ****"]) True >>> check_horizontal(["**** ****",\ "***1 ****",\ "** 3****",\ "* 4 1****",\ " 9 5 ",\ " 6 83 *",\ "3 1 **",\ " 8 2***",\ "112 ****"]) False """ unique = True for line in board: if not check_unique(list(line)): unique = False break return unique
5,344,861
def _update(data, update): """Update *data* identified by *name* with *value*. Args: data (munch.Munch): data to update update (str): update to apply Raises: RuntimeError: on error """ try: name, value = update.split('=') except ValueError: raise RuntimeError('update {} is malformed'.format(update)) if '.' not in name: data[name] = value return prop_parts = name.split('.') if len(prop_parts) > 1: parent_prop_name = '.'.join(prop_parts[:-1]) name = prop_parts[-1] config = eval('data.{}'.format(parent_prop_name)) else: config = data config[name] = value
5,344,862
def IsDir(msg=None): """Verify the directory exists.""" def f(v): if os.path.isdir(v): return v else: raise Invalid(msg or 'not a directory') return f
5,344,863
def update_events(dt: float, pos_x: float, pos_y: float, dir_x: float, dir_y: float, plane_x: float, plane_y: float): """ Updates player position in response to user input. """ for e in pygame.event.get(): if e.type == pygame.KEYDOWN: if e.key == pygame.K_ESCAPE: pygame.quit() raise SystemExit elif e.type == pygame.QUIT: pygame.quit() raise SystemExit move_speed: float = dt * 5.0 rot_speed: float = dt * 3.0 pressed = pygame.key.get_pressed() new_xpos_plus: int = int(pos_x + dir_x * move_speed) new_ypos_plus: int = int(pos_y + dir_y * move_speed) if pressed[pygame.K_UP]: if not WORLD_MAP[new_xpos_plus][int(pos_y)]: pos_x += dir_x * move_speed if not WORLD_MAP[int(pos_x)][new_ypos_plus]: pos_y += dir_y * move_speed new_xpos_minus: int = int(pos_x - dir_x * move_speed) new_ypos_minus: int = int(pos_y - dir_y * move_speed) if pressed[pygame.K_DOWN]: if not WORLD_MAP[new_xpos_minus][int(pos_y)]: pos_x -= dir_x * move_speed if not WORLD_MAP[int(pos_x)][new_ypos_minus]: pos_y -= dir_y * move_speed if pressed[pygame.K_RIGHT]: old_dir_x: float = dir_x dir_x = dir_x * math.cos(-rot_speed) - dir_y * math.sin(-rot_speed) dir_y = old_dir_x * math.sin(-rot_speed) + dir_y * math.cos(-rot_speed) old_plane_x: float = plane_x plane_x = plane_x * math.cos(-rot_speed) - plane_y * math.sin(-rot_speed) plane_y = old_plane_x * math.sin(-rot_speed) + plane_y * math.cos(-rot_speed) if pressed[pygame.K_LEFT]: old_dir_x: float = dir_x dir_x = dir_x * math.cos(rot_speed) - dir_y * math.sin(rot_speed) dir_y = old_dir_x * math.sin(rot_speed) + dir_y * math.cos(rot_speed) old_plane_x: float = plane_x plane_x = plane_x * math.cos(rot_speed) - plane_y * math.sin(rot_speed) plane_y = old_plane_x * math.sin(rot_speed) + plane_y * math.cos(rot_speed) return pos_x, pos_y, dir_x, dir_y, plane_x, plane_y
5,344,864
def test_config_controller_failed(hass, mock_ctrl, mock_scanner): """Test for controller failure.""" config = { 'device_tracker': { CONF_PLATFORM: unifi.DOMAIN, CONF_USERNAME: 'foo', CONF_PASSWORD: 'password', } } mock_ctrl.side_effect = APIError( '/', 500, 'foo', {}, None) result = unifi.get_scanner(hass, config) assert result is False
5,344,865
def deg_to_xyz(lat_deg, lon_deg, altitude): """ http://www.oc.nps.edu/oc2902w/coord/geodesy.js lat,lon,altitude to xyz vector input: lat_deg geodetic latitude in deg lon_deg longitude in deg altitude altitude in km output: returns vector x 3 long ECEF in km """ clat = cos(radians(lat_deg)) slat = sin(radians(lat_deg)) clon = cos(radians(lon_deg)) slon = sin(radians(lon_deg)) _, rn, _ = radcur(lat_deg) ecc = EARTH_Ecc esq = ecc * ecc x = (rn + altitude) * clat * clon y = (rn + altitude) * clat * slon z = ((1 - esq) * rn + altitude) * slat return [x, y, z]
5,344,866
def remove_unused_colours(ip, line_colours): """ >>> remove_unused_colours(np.array([[0,0,3], [1,5,1], [2,0,6], [2,2,2],[4,4,0]]), {2, 4}) array([[0, 0, 0], [0, 0, 0], [2, 0, 0], [2, 2, 2], [4, 4, 0]]) """ #get a list of all unique colours all_colours = list(np.unique(ip)) #remove back ground colour 0 all_colours.remove(0) #remove the line colours for line_colour in line_colours: all_colours.remove(line_colour) #for all other colours, (i.e. those not back ground colour of zero of line colours) turn to back ground colour = 0 for each_colour in all_colours: ip[np.where(ip == each_colour)]= 0 return ip
5,344,867
def extract_info(spec): """Extract information from the instance SPEC.""" info = {} info['name'] = spec.get('InstanceTypeId') info['cpu'] = spec.get('CpuCoreCount') info['memory'] = spec.get('MemorySize') info['nic_count'] = spec.get('EniQuantity') info['disk_quantity'] = spec.get('DiskQuantity') if spec.get('LocalStorageAmount'): info['disk_count'] = spec.get('LocalStorageAmount') info['disk_size'] = spec.get('LocalStorageCapacity') info['disk_type'] = spec.get('LocalStorageCategory') # Some special families use NVMe driver for local disks _families = ['ecs.i3', 'ecs.i3g'] if spec.get('InstanceTypeFamily') in _families: info['local_disk_driver'] = 'nvme' else: info['local_disk_driver'] = 'virtio_blk' # Some special families use NVMe driver for cloud disks _families = ['ecs.g7se'] if spec.get('InstanceTypeFamily') in _families: info['cloud_disk_driver'] = 'nvme' else: info['cloud_disk_driver'] = 'virtio_blk' # Some security-enhanced instance families have 50% encrypted memory _families = ['ecs.c7t', 'ecs.g7t', 'ecs.r7t'] if spec.get('InstanceTypeFamily') in _families: info['memory'] = int(info['memory'] * 0.5) return info
5,344,868
def sum_values(**d): # doc string 예제. git commit 메시지 쓰듯이 쓰면 된다 """dict의 values를 더한 값을 리턴 key는 뭐가 들어오던지 말던지 신경 안 쓴다. """ return sum_func(*d.values())
5,344,869
def parse_title(title): """Parse strings from lineageos json :param title: format should be `code - brand phone` """ split_datum = title.split(' - ') split_name = split_datum[1].split(' ') device = split_datum[0] brand = split_name[0] name = ' '.join(split_name[1:]) return [brand, name, device, device]
5,344,870
def addCountersTransactions(b): """Step 2 : The above list with count as the last element should be [ [1, 1, 0, 1], [0, 0, 0, 4], [1, 1, 1, 3] ] converted to the following way [ [1, 1, 0, 1, 0], [1, 1, 1, 3, 4] ] with cnt 1 and cnt 2 for anti-mirroring technique Algorithm ========= Check for the first element in the listitem. If it is 1, cnt2 = 0 If it is 0, Not the values of the list except the last item (count) Check the Not valued list is matching with existing 1valued list If it is matching, then add the last count to cnt2 of that matched list else add a new entry with last count as cnt2 and cnt1 as 0 """ # n = list(b) # x = b[:] # cnt1 = [] # cnt2 = [] temp_list2 = [] t1list = [] zlist = [] for i in range(len(b)): #print b[i], b[i][0] if b[i][0] == 1: b[i] = b[i] + [0] #adding this list item to another list zlist = remove_counts(b[i],t1list) #print 'zlist = ',zlist temp_list2.append(b[i]) #print 'temp_list appended ', temp_list #print b if b[i][0] == 0: #print 'Found an item that starts with 0' for item in range(len(b[i])): #print b[i][item],i,item, len(b[i]) if b[i][item] == 0: #print 'Found a 0 item, change it to 1' b[i][item] = 1 else: #print 'Found a 1 item, change it to 0' if item != len(b[i])-1: #print 'Not the last element, so it is changed here (NOT)' b[i][item] = 0 else: b[i] = b[i] + [b[i][item]] b[i][item] = 0 #print 'Changed cos' #print 'Present list item inside loop is ', b[i] #print 'Present list item is ', b[i] temp = b[i] #print temp tlist = [] telist = remove_counts(temp,tlist) temp_list2.append(b[i]) #########print 'temp_list appended \n', temp_list2 #print 'telist = ',telist #print 'y is ', y # if telist in temp_list2: # print 'HEY FOUND HIM' # #b[i] = b[i] + [b[i][item]] # else: # print'Else not found' return temp_list2 '''Step 3: Do {I1} {I2} and {In} Then check for support and prune the list Do the above step for all the subsets and prune with support To compute {I1}, {I2}, ... {In} 1. For loop i to len(items) 2. Check for ith item in lists, If it is 1, Sum up Cnt1 and put it in Ii If it is 0, Sum up Cnt2 and put it in Ii 2. Print all Ii's '''
5,344,871
def aggregate_layers( hidden_states: dict, mode: typing.Union[str, typing.Callable] ) -> np.ndarray: """Input a hidden states dictionary (key = layer, value = 2D array of n_tokens x emb_dim) Args: hidden_states (dict): key = layer (int), value = 2D PyTorch tensor of shape (n_tokens, emb_dim) Raises: NotImplementedError Returns: dict: key = layer, value = array of emb_dim """ states_layers = dict() emb_aggregation = mode # iterate over layers for i in hidden_states.keys(): if emb_aggregation == "last": state = hidden_states[i][-1, :] # get last token elif emb_aggregation == "first": state = hidden_states[i][0, :] # get first token elif emb_aggregation == "mean": state = torch.mean(hidden_states[i], dim=0) # mean over tokens elif emb_aggregation == "median": state = torch.median(hidden_states[i], dim=0) # median over tokens elif emb_aggregation == "sum": state = torch.sum(hidden_states[i], dim=0) # sum over tokens elif emb_aggregation == "all" or emb_aggregation == None: state = hidden_states elif callable(emb_aggregation): state = emb_aggregation(hidden_states[i]) else: raise NotImplementedError( f"Sentence embedding method [{emb_aggregation}] not implemented" ) states_layers[i] = state.detach().cpu().numpy() return states_layers
5,344,872
def get_bg_stat_info(int_faces, adj_list, face_inds, face_inds_new): """ Out put list of faces and list of verts for each stat. """ stat_faces = [] stat_verts = [] for k in range(len(int_faces)): # Check if face already exists. if int_faces[k] != 0: continue else: # See if there are any adjacent faces. for j in range(len(adj_list[k])): if int_faces[adj_list[k][j]] != 0 and int_faces[adj_list[k][j-1]] != 0: #stat_faces.append([k, adj_list[k][j], adj_list[k][j-1]]) # Find relevant verticies stat_verts_new = find_vertex_ind(k, adj_list[k][j], adj_list[k][j-1], face_inds, face_inds_new) #remaining_verts = set(face_inds_new[k]) #remaining_verts.remove(vert_0) #remaining_verts = list(remaining_verts) #stat_verts_new = [vert_0] #print stat_verts_new, vert_0, remaining_verts, k, j if stat_verts_new != None: stat_faces.append([k, adj_list[k][j], adj_list[k][j-1]]) stat_verts.append(stat_verts_new) #assert len(stat_verts_new) == 3, "ERROR: stat_verts incorectly computed" return stat_faces, stat_verts
5,344,873
def _build_server_data(): """ Returns a dictionary containing information about the server environment. """ # server environment server_data = { 'host': socket.gethostname(), 'argv': sys.argv } for key in ['branch', 'root']: if SETTINGS.get(key): server_data[key] = SETTINGS[key] return server_data
5,344,874
def config_data() -> dict: """Dummy config data.""" return { "rabbit_connection": { "user": "guest", "passwd": "guest", "host": "localhost", "port": 5672, "vhost": "/", }, "queues": {"my_queue": {"settings": {"durable": True}, "limit": 0}}, "queue_limits": {0: ["my_queue"], 1: ["my_other_queue"]}, "notifiers": { "smtp": { "host": "localhost", "user": None, "passwd": None, "from_addr": "[email protected]", "to_addr": ["[email protected]"], "subject": "AMQPeek - RMQ Monitor", }, "slack": {"api_key": "apikey", "username": "ampeek", "channel": "#general"}, }, }
5,344,875
def write_layers(layers): """ """ ext = os.path.splitext(layers_file) if ext == '.pkl': # pickle pickle.dump(layers, open(layers_file, 'w')) else: # write image file layers.data = numpy.asarray(layers.data, dtype=layers_data_type) layers.write(file=layers_file)
5,344,876
def copy_linear(net, net_old_dict): """ Copy linear layers stored within net_old_dict to net. """ net.linear.weight.data = net_old_dict["linears.0.weight"].data net.linear.bias.data = net_old_dict["linears.0.bias"].data return net
5,344,877
async def read_book(request: Request) -> dict: """Read single book.""" data = await request.json() query = readers_books.insert().values(**data) last_record_id = await database.execute(query) return {"id": last_record_id}
5,344,878
def setup_dev(): """Runs the set-up needed for local development.""" return setup_general()
5,344,879
def countAllAnnotationLines( mpqa_dir="mpqa_dataprocessing\\database.mpqa.cleaned", doclist_filename='doclist.2.0' ): """ It counts all annotation lines available in all documents of a corpus. :return: an integer """ m2d = mpqa2_to_dict(mpqa_dir=mpqa_dir) mpqadict = m2d.corpus_to_dict(doclist_filename=doclist_filename) count = 0 for doc in mpqadict['doclist']: # Iterate over all docs count += len(mpqadict['docs'][doc]['annotations'].keys()) return count
5,344,880
def get_article_score(token, article_id): """ 查看文章 :param article_id: :param token: :return: """ data = { "id": article_id, "token": token } try: res = requests.post("https://information.17wanxiao.com/cms/api/info/detail", data=data).json() if res['result_']: # log.info('查看文章成功') pass else: log.warning(f'查看文章失败,{res}') except Exception as e: log.warning(f'查看文章失败,{e}')
5,344,881
def convergence(report: Report, **kwargs): """ Function that displays the convergence using a antco.report.Report object. Parameters ---------- report: antco.report.Report antco.report.Report instance returned by the antco.run() function. **kwargs figsize: tuple, default=(8, 5) Tuple indicating the size of the figure. title: str, default='Convergence' Plot title. alpha_grid: float, default=0.7 Transparency of the grid lines of the plot. alpha_graph: float, default=0.2 Transparency of the lines of the plot. save_plot: str, default=None File in which to save the generated graph, if no value is provided the graph will not be saved. Returns ------- :matplotlib.pyplot.Fig Figure with convergence graph. """ def _draw(ax_, params_: dict, alpha_: float, color_: str, label_: str, linestyle_: str, linewidth_: int): x = np.arange(len(params_)) y = [np.mean(vals) for vals in params_.values()] ax_.plot(x, y, color=color_, label=label_, alpha=alpha_, linestyle=linestyle_, linewidth=linewidth_) return ax_ # Check that the parameters necessary to represent convergence can be obtained. try: report.get('mean_cost') except Exception: raise Exception( 'The Report instance does not have the "mean_cost" value, make sure you have saved the "mean_cost" value ' 'throughout the interactions of the algorithm using the method report.save("mean_cost").') try: report.get('max_cost') except Exception: raise Exception( 'The Report instance does not have the "max_cost" value, make sure you have saved the "max_cost" value ' 'throughout the interactions of the algorithm using the method report.save("max_cost").') parameters = { 'mean_cost': {'color': '#85C1E9', 'label': 'Average cost', 'linestyle': 'solid', 'linewidth': 3}, 'max_cost': {'color': '#AF7AC5', 'label': 'Max cost', 'linestyle': 'dashed', 'linewidth': 2}} # Get optional arguments figsize = kwargs.get('figsize', (8, 5)) title = kwargs.get('title', 'Convergence') alpha_graph = kwargs.get('alpha_graph', 0.7) alpha_grid = kwargs.get('alpha_grid', 0.2) save_plot = kwargs.get('save_plot', None) fig, ax = plt.subplots(figsize=figsize) for param, values in parameters.items(): ax = _draw(ax, report.get(param), alpha_graph, values['color'], values['label'], values['linestyle'], values['linewidth']) ax.set_xlabel('Iteration') ax.set_ylabel('Cost') ax.spines['top'].set_visible(False) ax.spines['right'].set_visible(False) ax.grid(alpha=alpha_grid) ax.xaxis.set_major_locator(MaxNLocator(integer=True)) ax.legend(loc='upper center', bbox_to_anchor=(0.5, 0.1), fancybox=True, shadow=True, ncol=len(parameters)) ax.set_title(title) if save_plot is not None: plt.savefig(save_plot, dpi=150) return fig
5,344,882
def scene(): """ Check that the scene is valid for submission and creates a report """ xrs.validation_report.new_report() valid = True # Start by getting into object mode with nothing selected bpy.ops.object.mode_set(mode="OBJECT") bpy.ops.object.select_all(action='DESELECT') if (xrs.collection.collection_has_objects("master") == False): xrs.validation_report.write_error("master collection not found or has no objects") valid = False else: xrs.validation_report.write_ok("master collection found") if (xrs.collection.collection_has_objects("web") == False): valid = False xrs.validation_report.write_error( "web collection not found or has no objects" ) else: # Check all objects in the web collection web_objects = xrs.collection.get_objects("web") base_objects = xrs.collection.get_objects("master") transparent_object_count = 0 total_triangles = 0 xrs.validation_report.write_ok( "web collection found. object count: " + str(len(web_objects)) ) xrs.validation_report.write_hr() # TODO: Additional checks for master objects if ('dimensions_cube' not in bpy.data.objects): valid = False xrs.validation_report.write_error( "dimensions_cube not found" ) else: dimensions_cube = bpy.data.objects['dimensions_cube'] tolerance = 1.05 web_dimensions = xrs.collection.get_dimensions("web") # (WARN) Width if ( web_dimensions[0] > dimensions_cube.dimensions.x * tolerance ): xrs.validation_report.write_warning( "Model width is too big (" + str(web_dimensions[0]) + " > " + str(dimensions_cube.dimensions.x) + ")" ) elif ( web_dimensions[0] < dimensions_cube.dimensions.x / tolerance ): xrs.validation_report.write_warning( "Model width is too small (" + str(web_dimensions[0]) + " < " + str(dimensions_cube.dimensions.x) + ")" ) else: xrs.validation_report.write_ok( "Model width is " + str(web_dimensions[0]) ) # (WARN) Depth if ( web_dimensions[1] > dimensions_cube.dimensions.y * tolerance ): xrs.validation_report.write_warning( "Model depth is too big (" + str(web_dimensions[1]) + " > " + str(dimensions_cube.dimensions.y) + ")" ) elif ( web_dimensions[1] < dimensions_cube.dimensions.y / tolerance ): xrs.validation_report.write_warning( "Model depth is too small (" + str(web_dimensions[1]) + " < " + str(dimensions_cube.dimensions.y) + ")" ) else: xrs.validation_report.write_ok( "Model depth is " + str(web_dimensions[1]) ) # (WARN) Height if ( web_dimensions[2] > dimensions_cube.dimensions.z * tolerance ): xrs.validation_report.write_warning( "Model height is too big (" + str(web_dimensions[2]) + " > " + str(dimensions_cube.dimensions.z) + ")" ) elif ( web_dimensions[2] < dimensions_cube.dimensions.z / tolerance ): xrs.validation_report.write_warning( "Model height is too small (" + str(web_dimensions[2]) + " < " + str(dimensions_cube.dimensions.z) + ")" ) else: xrs.validation_report.write_ok( "Model height is " + str(web_dimensions[2]) ) xrs.validation_report.write_hr() # Base Collection for obj in base_objects: # (ERR) Modifiers need to be applied if (len(obj.modifiers) > 0): valid = False xrs.validation_report.write_error( obj.name + " needs to have all modifiers applied" ) else: xrs.validation_report.write_ok( obj.name + " has no modifiers" ) # (ERR) Transforms Not Applied (loc!=0,0,0;rot!=0,0,0;scale!=1) if (xrs.object.transforms_are_applied(obj) == False): valid = False xrs.validation_report.write_error( obj.name + " needs to have transforms applied" ) else: xrs.validation_report.write_ok( obj.name + " transforms are correct" ) # Web Collection for obj in web_objects: # (ERR) Modifiers need to be applied if (len(obj.modifiers) > 0): valid = False xrs.validation_report.write_error( obj.name + " needs to have all modifiers applied" ) else: xrs.validation_report.write_ok( obj.name + " has no modifiers" ) # (ERR) Non-manifold geometry non_manifold_vertext_count = xrs.object.non_manifold_vertex_count(obj) if (non_manifold_vertext_count > 0): xrs.validation_report.write_warning( obj.name + " has non-manifold geometry (" + str(non_manifold_vertext_count) + " vertices)" ) else: xrs.validation_report.write_ok( obj.name + " has no non-manifold geometry" ) # (ERR) Triangle count over 100k triangle_count = xrs.object.get_triangle_count(obj) total_triangles = total_triangles + triangle_count if (triangle_count > 100000): valid = False xrs.validation_report.write_error( obj.name + " has " + str(triangle_count) + " triangles. The web collection model must be less than 100,000 triangles" ) # (WARN) Triangle count over 60k if (triangle_count > 60000): xrs.validation_report.write_warning( obj.name + " has " + str(triangle_count) + " triangles. This web collection model should be optimized where possible" ) else: xrs.validation_report.write_ok( obj.name + " has " + str(triangle_count) + " triangles" ) # (ERR) Transforms Not Applied (loc!=0,0,0;rot!=0,0,0;scale!=1) if (xrs.object.transforms_are_applied(obj) == False): valid = False xrs.validation_report.write_error( obj.name + " needs to have transforms applied" ) else: xrs.validation_report.write_ok( obj.name + " transforms are correct" ) # (WARN) Object names match mesh names if (obj.name != obj.data.name): xrs.validation_report.write_warning( obj.name + " mesh is named " + obj.data.name + ". Names should match" ) else: xrs.validation_report.write_ok( obj.name + " mesh name matches" ) # Materials material_count = len(obj.material_slots) # (ERR) No material if (material_count == 0): valid = False xrs.validation_report.write_error( obj.name + " needs to have a material" ) else: # (WARN) >1 Material per web object if (material_count > 1): xrs.validation_report.write_warning( obj.name + " has " + str(material_count) + " materials and should only have 1" ) else: xrs.validation_report.write_ok( obj.name + " has 1 material" ) #web collection should have ao if xrs.tools.check_ao() == False: xrs.validation_report.write_warning( obj.name + " does not have an AO map. Please make one for the web collection model and put in the textures folder" ) else: xrs.validation_report.write_ok( obj.name + " has the needed AO map in the web collection" ) # (ERR) Material names are correct for slot in obj.material_slots: mat = slot.material # (ERR) Empty Material Slot if (mat is None): valid = False xrs.validation_report.write_error( obj.name + " has an empty material slot, which must be removed" ) else: # (WARN) Material name should be lower case if (mat.name.islower() == False): xrs.validation_report.write_warning( mat.name + " name should be lower case with _s" ) else: xrs.validation_report.write_ok( mat.name + " name is valid" ) # (ERR) Material uses nodes if (mat.use_nodes == False): valid = False xrs.validation_report.write_error( mat.name + " needs to use nodes" ) else: xrs.validation_report.write_ok( mat.name + " uses nodes" ) # (ERR) Material does not use a BSDF Shader bsdf = xrs.material.get_one_node_of_type( mat.node_tree.nodes, "BSDF_PRINCIPLED" ) if (bsdf is None): valid = False xrs.validation_report.write_error( mat.name + " needs to use a Principled BSDF shader" ) else: xrs.validation_report.write_ok( mat.name + " has a Principled BSDF shader" ) # Base Color if (check_and_report_material( bsdf, mat, "Base Color", "diffuse" ) == False): valid = False # Metallic (TODO: enforce 0 or 1) if (check_and_report_material( bsdf, mat, "Metallic", "metallic" ) == False): valid = False # Roughness if (check_and_report_material( bsdf, mat, "Roughness", "roughness" ) == False): valid = False # Emission if (check_and_report_material( bsdf, mat, "Emission", "emissive" ) == False): valid = False # Alpha (TODO: get count) # Alpha is in the diffuse texture if (check_and_report_material( bsdf, mat, "Alpha", "diffuse" ) == False): valid = False # Normal if (check_and_report_normal_material( bsdf, mat ) == False): valid = False xrs.validation_report.write_hr() # (WARN) web object count should only be > 1 if some are transparent #TODO # (WARN) Total triangle count for web collection if (total_triangles > 65000): xrs.validation_report.write_warning( "web collection meshes have " + str(total_triangles) + " triangles. There should be less than 65,000 triangles where possible" ) # Nice to have: # Preview Render # GLB export # Show the report after it is complete xrs.validation_report.show_report() return valid
5,344,883
def ca_set_container_policies(h_session, h_container, policies): """ Set multiple container policies. :param int h_session: Session handle :param h_container: target container handle :param policies: dict of policy ID ints and value ints :return: result code """ h_sess = CK_SESSION_HANDLE(h_session) container_id = CK_ULONG(h_container) pol_id_list = list(policies.keys()) pol_val_list = list(policies.values()) pol_ids = AutoCArray(data=pol_id_list, ctype=CK_ULONG) pol_vals = AutoCArray(data=pol_val_list, ctype=CK_ULONG) ret = CA_SetContainerPolicies( h_sess, container_id, pol_ids.size.contents, pol_ids.array, pol_vals.array ) return ret
5,344,884
def update_records() -> None: """ Updates the records page. """ records = load_file("records") times, people = records["records"], records["people"] # If graduated, add to alumni list graduated = [person for person in people if datetime.now() > summer(person[-1])] if len(graduated) > 0: alumni = load_file("alumni") # first entry is WCA profile url, second is name dump_file(alumni + [person[1] for person in graduated], "alumni") # Remove alumni people = [person for person in people if datetime.now() < summer(person[-1])] for url, name, year in people: prs = wca_profile(url) for event in prs: # PRs can only get better so remove old PR if it exists for mode in ["single", "average"]: times[event][mode] = [tuple(time) for time in times[event][mode] if name not in time] times[event][mode].append((prs[event][mode], name)) # More points is better times[event][mode].sort(reverse=event == "3x3x3 Multi-Blind") if times[event][mode][0][1] == name: cat = mode[0] + "ranks" for rank in ["nr", "cr", "wr"]: times[event][cat][rank] = prs[event][cat][rank] dump_file({"records": times, "people": people, "time": time.time()}, "records")
5,344,885
def run_trial(benchmark): """Runs the benchmark once and returns the elapsed time.""" args = ['.build/debug/slox', join('test', 'benchmark', benchmark + '.lox')] proc = Popen(args, stdin=PIPE, stdout=PIPE, stderr=PIPE) out, err = proc.communicate() out = out.decode("utf-8").replace('\r\n', '\n') # Remove the trailing last empty line. out_lines = out.split('\n') if out_lines[-1] == '': del out_lines[-1] # The benchmark should print the elapsed time last. return float(out_lines[-1])
5,344,886
def extend(arr, num=1, log=True, append=False): """Extend the given array by extraplation. Arguments --------- arr <flt>[N] : array to extend num <int> : number of points to add (on each side, if ``both``) log <bool> : extrapolate in log-space append <bool> : add the extended points onto the given array Returns ------- retval <flt>[M] : extension (or input ``arr`` with extension added, if ``append``). """ if(log): useArr = np.log10(arr) else: useArr = np.array(arr) steps = np.arange(1, num+1) left = useArr[0] + (useArr[0] - useArr[1])*steps[::-1].squeeze() rigt = useArr[-1] + (useArr[-1] - useArr[-2])*steps.squeeze() if(log): left = np.power(10.0, left) rigt = np.power(10.0, rigt) if(append): return np.hstack([left, arr, rigt]) return [left, rigt]
5,344,887
def to_relative(path, root, relative): """Converts any absolute path to a relative path, only if under root.""" if sys.platform == 'win32': path = path.lower() root = root.lower() relative = relative.lower() if path.startswith(root): logging.info('%s starts with %s' % (path, root)) path = os.path.relpath(path, relative) else: logging.info('%s not under %s' % (path, root)) return path
5,344,888
def do_associate_favorite(parser, token): """ @object - object to return the favorite count for """ try: tag, node, user = token.split_contents() except ValueError: raise template.TemplateSyntaxError, "%r tag requires one argument" % token.contents.split()[0] return AssociateFavorite(node, user)
5,344,889
def test_module_in_place(testdir): """Make sure the run in place option works""" # copy_example copies what is IN the given directory, not the directory itself... testdir.copy_example("testmodule") # Moving module to make sure we can run in place, # by making sure the module name in module.yml is in it's parent path os.mkdir("testmodule") shutil.move("model", "testmodule/model") shutil.move("module.yml", "testmodule/module.yml") shutil.move("plugins", "testmodule/plugins") shutil.move("tests", "testmodule/tests") os.chdir("testmodule") path = os.getcwd() assert not os.path.exists(os.path.join(path, "testfile")) pytest_inmanta.plugin.CURDIR = path result = testdir.runpytest("tests/test_location.py", "--use-module-in-place") result.assert_outcomes(passed=1) assert os.path.exists(os.path.join(path, "testfile"))
5,344,890
def parse_cookie(cookie: Type[BaseModel]) -> Tuple[List[Parameter], dict]: """Parse cookie model""" schema = get_schema(cookie) parameters = [] components_schemas = dict() properties = schema.get('properties') definitions = schema.get('definitions') if properties: for name, value in properties.items(): data = { "name": name, "in": ParameterInType.cookie, "description": value.get("description"), "required": name in schema.get("required", []), "schema": Schema(**value) } parameters.append(Parameter(**data)) if definitions: for name, value in definitions.items(): components_schemas[name] = Schema(**value) return parameters, components_schemas
5,344,891
def p_cast_operator(p): """ value : LEFT_PARENTHESIS value RIGHT_PARENTHESIS value %prec CCAST """ p[0] = '%s%s%s%s' % (p[1], p[2], p[3], p[4])
5,344,892
def bdev_nvme_add_error_injection(client, name, opc, cmd_type, do_not_submit, timeout_in_us, err_count, sct, sc): """Add error injection Args: name: Name of the operating NVMe controller opc: Opcode of the NVMe command cmd_type: Type of NVMe command. Valid values are: admin, io do_not_submit: Do not submit commands to the controller timeout_in_us: Wait specified microseconds when do_not_submit is true err_count: Number of matching NVMe commands to inject errors sct: NVMe status code type sc: NVMe status code Returns: True on success, RPC error otherwise """ params = {'name': name, 'opc': opc, 'cmd_type': cmd_type} if do_not_submit: params['do_not_submit'] = do_not_submit if timeout_in_us: params['timeout_in_us'] = timeout_in_us if err_count: params['err_count'] = err_count if sct: params['sct'] = sct if sc: params['sc'] = sc return client.call('bdev_nvme_add_error_injection', params)
5,344,893
def tetheredYN(L0, KxStar, Rtot, Kav, fully=True): """ Compare tethered (bispecific) vs monovalent """ if fully: return polyc(L0, KxStar, Rtot, [[1, 1]], [1.0], Kav)[2][0] / \ polyfc(L0 * 2, KxStar, 1, Rtot, [0.5, 0.5], Kav)[0] else: return polyc(L0, KxStar, Rtot, [[1, 1]], [1.0], Kav)[0][0] / \ polyfc(L0 * 2, KxStar, 1, Rtot, [0.5, 0.5], Kav)[0]
5,344,894
def __add_docker_image(aws_session, compose_config, service_name, tag): """ Adds the Docker image to a service in a docker-compose config. The image is only added if an existing image doesn't exist for the service. :param aws_session: The AWS session. :param compose_config: The docker-compose config being modified. :param service_name: The name of the service. :param tag: The tag to give the service. :return: """ if 'image' not in compose_config['services'][service_name]: url = __get_docker_image(aws_session, service_name, tag) __add_service(compose_config, service_name) compose_config['services'][service_name]['image'] = url
5,344,895
def test_timestamp_spacing_one_timestamp(times): """An index with only one timestamp has uniform spacing.""" assert_series_equal( time.spacing(times[[0]], times.freq), pd.Series(True, index=[times[0]]) )
5,344,896
def valid_payload(request): """ Fixture that yields valid data payload values. """ return request.param
5,344,897
def lookup_content(path, source_id): """ Look for a filename in the form of: ARCHIVE_SOURCEID.[extension] """ content_filename = None files = [f for f in os.listdir(path) if not f.endswith(".xml")] for f in files: tokens = os.path.splitext(f)[0].split("_") if len(tokens) == 0: continue if tokens[-1] == source_id: log.info("Content file FOUND: {0}", f) # content_path = os.path.join(path, f) content_filename = f break return content_filename
5,344,898
def external(pgm, inp, out, cor, tim=5): """ The external checker is used to check for outputs using an external program that reads the input and the generated output and writes to stdout the veredict. If the program runs for more than tim seconds, 'IE' is returned. 'IE' also returned for non-existing pgm. """ if not util.file_exists(pgm): return 'IE' tmp = util.tmp_file() pid = os.fork() if pid == 0: # Child os.system('./%s %s %s %s > %s' % (pgm, inp, out, cor, tmp)) os._exit(0) else: # Parent c = 0 while c <= tim: ret = os.waitpid(pid, os.WNOHANG) if ret[0] != 0: # Ok! ver = util.read_file(tmp).strip() util.del_file(tmp) return ver time.sleep(0.1) c += 0.1 os.kill(pid, signal.SIGKILL) return 'IE'
5,344,899