content
stringlengths
22
815k
id
int64
0
4.91M
def extract_unii_other_code(tree): """Extract the codes for other ingredients""" unii_other_xpath = \ '//generalizedMaterialKind/code[@codeSystem="%s"]/@code' % UNII_OTHER_OID return tree.getroot().xpath(unii_other_xpath)
5,350,800
def initfunc(f): """ Decorator for initialization functions that should be run exactly once. """ @functools.wraps(f) def wrapper(*args, **kwargs): if wrapper.initialized: return wrapper.initialized = True return f(*args, **kwargs) wrapper.initialized = False return wrapper
5,350,801
def edit_frame(frame): """edit frame to analyzable frame rgb 2 gray thresh frame color bitwise color Args frame (ndarray): original frame from movie Returns work_frame (ndarray): edited frame """ work_frame = frame work_frame = cv2.cvtColor(work_frame, cv2.COLOR_RGB2GRAY) work_frame = cv2.threshold(work_frame, FRAME_THRESH, 255, cv2.THRESH_BINARY)[1] work_frame = cv2.bitwise_not(work_frame) return work_frame
5,350,802
def manchester(bin_string): """ Applies the Manchester technique to a string of bits. :param bin_string: :type bin_string: str :return: :rtype: str """ signal_manager = Signal() for bin_digit in bin_string: if bin_digit == '0': # Generate +- if signal_manager.signal == '+': # It's positive signal_manager.keep() # + signal_manager.flip() # - else: # It's negative signal_manager.flip() # + signal_manager.flip() # - else: # Generate -+ if signal_manager.signal == '+': # It's positive signal_manager.flip() # - signal_manager.flip() # + else: # It's negative signal_manager.keep() # - signal_manager.flip() # + return str(signal_manager)
5,350,803
def load_admin_cells(identifier: str) -> List[MultiPolygon]: """Loads the administrative region cells Data is loaded from :py:const:`ADMIN_GEOJSON_TEMPLATE` ``% identifier``. This is a wrapper function for :py:func:`load_polygons_from_json`. Returns: A list of the administrative region cells. """ return load_polygons_from_json(ADMIN_GEOJSON_TEMPLATE % identifier)
5,350,804
def register_writer(format, cls=None): """Return a decorator for a writer function. A decorator factory for writer functions. A writer function should have at least the following signature: ``<class_name_or_generator>_to_<format_name>(obj, fh)``. `fh` is **always** an open filehandle. This decorator provides the ability to use filepaths in the same argument position as `fh`. They will automatically be opened and closed. **The writer must not close the filehandle**, cleanup will be handled external to the reader and is not its concern. Any additional `**kwargs` will be passed to the writer and may be used if necessary. The writer must not return a value. Instead it should only mutate the `fh` in a way consistent with it's purpose. If the writer accepts a generator, it should exhaust the generator to ensure that the potentially open filehandle backing said generator is closed. .. note:: Failure to adhere to the above interface specified for a writer will result in unintended side-effects. Parameters ---------- format : str A format name which a decorated writer will be bound to. cls : type, optional The class which a decorated writer will be bound to. If `cls` is None the writer will be bound as expecting a generator. Default is None. Returns ------- function A decorator to be used on a writer. The decorator will raise a ``skbio.io.DuplicateRegistrationError`` if there already exists a *writer* bound to the same permutation of `fmt` and `cls`. See Also -------- skbio.io.write skbio.io.get_writer """ def decorator(writer): format_class = _formats.setdefault(format, {}).setdefault(cls, {}) if 'writer' in format_class: raise DuplicateRegistrationError('writer', format, cls) file_args = [] writer_spec = inspect.getargspec(writer) if writer_spec.defaults is not None: # Concept from http://stackoverflow.com/a/12627202/579416 for key, default in zip( writer_spec.args[-len(writer_spec.defaults):], writer_spec.defaults): if default is FileSentinel: file_args.append(key) # We wrap the writer so that basic file handling can be managed # externally from the business logic. def wrapped_writer(obj, fp, mode='w', **kwargs): file_keys = [] files = [fp] for file_arg in file_args: if file_arg in kwargs: if kwargs[file_arg] is not None: file_keys.append(file_arg) files.append(kwargs[file_arg]) else: kwargs[file_arg] = None with open_files(files, mode) as fhs: for key, fh in zip(file_keys, fhs[1:]): kwargs[key] = fh writer(obj, fhs[0], **kwargs) wrapped_writer.__doc__ = writer.__doc__ wrapped_writer.__name__ = writer.__name__ format_class['writer'] = wrapped_writer return wrapped_writer return decorator
5,350,805
def get_downloadpath(user_id): """ find the download path """ path = settings.DOCUMENT_PATH + str(user_id) + '/' if not os.path.isdir(path): os.mkdir(path) return path
5,350,806
def lerp(a: mat33, b: mat33, t: float32) -> mat33: """ Linearly interpolate two values a and b using factor t, computed as ``a*(1-t) + b*t`` """ ...
5,350,807
def genetic_fit_call(fit_directory): """genetic_fit_call(fit_directory) Run a casm genetic algorithm fit. Assumes that the fit settings file already exists. Args: fit_directory (str): absolute path to the current genetic fit directory. Returns: none. """ os.chdir(fit_directory) print("Removing old data for individual 0") os.system( "rm check.0; rm checkhull_genetic_alg_settings_0_*; rm genetic_alg_settings_*" ) print("Running new fit") os.system("casm-learn -s genetic_alg_settings.json > fit.out") print("Writing data for individual 0") os.system("casm-learn -s genetic_alg_settings.json --checkhull --indiv 0 > check.0")
5,350,808
def accuracy(output, target, topk=1,axis=1,ignore_index=-100, exclude_mask=False): """Computes the precision@k for the specified values of k prec1, prec5 = accuracy(output.data, target, topk=(1, 5)) """ input_tensor=output.copy().detach() target_tensor=target.copy().detach() num_classes = int_shape(output)[axis] if len(input_tensor)==0: return to_tensor(0.0) is_logsoftmax = None from_logits = None output_exp = exp(input_tensor) if (ndim(input_tensor) >= 1 and 'float' in str(input_tensor.dtype) and input_tensor.min() >= 0 and input_tensor.max() <= 1): is_logsoftmax = False from_logits = True input_tensor = clip(input_tensor, min=1e-8, max=1 - 1e-8) elif (ndim(output_exp) >= 1 and 'float' in str(output_exp.dtype) and output_exp.min() >= 0 and output_exp.max() <= 1): is_logsoftmax = True from_logits = True input_tensor = clip(output_exp, min=1e-8, max=1 - 1e-8) else: is_logsoftmax = False from_logits = False if input_tensor.dtype!=torch.int64 and topk==1: if len(input_tensor.size())==1: #binary input_tensor=input_tensor.gt(0.5).float() else: input_tensor=argmax(input_tensor,axis).squeeze() if target_tensor.dtype!=torch.int64: target_tensor=argmax(target_tensor,axis).squeeze() if input_tensor.shape!=target_tensor.shape and topk==1: raise ValueError('input shape {0} is not competable with target shape {1}'.format(input_tensor.shape,target_tensor.shape)) input_mask=ones_like(input_tensor) if isinstance(ignore_index, int) and 0 <= ignore_index < num_classes: input_mask[input_tensor==ignore_index] = 0 elif isinstance(ignore_index, (list, tuple)): for idx in ignore_index: if isinstance(idx, int) and 0 <= idx < int_shape(output)[axis]: input_mask[input_tensor == idx] = 0 batch_size = target_tensor.size(0) if topk==1: return (input_tensor.eq(target_tensor).float()*input_mask).sum()/clip((input_mask).float().sum(),min=1) else: _, pred = input_tensor.topk(topk) pred = pred.t() correct = pred.eq(target_tensor.reshape((1, -1)).expand_as(pred)) correct_k = reduce_sum(correct[:topk].reshape(-1).float(),axis=0,keepdims=True) return correct_k.mul_(1 / batch_size)
5,350,809
def jitter_over_thresh(x: xr.DataArray, thresh: str, upper_bnd: str) -> xr.DataArray: """Replace values greater than threshold by a uniform random noise. Do not confuse with R's jitter, which adds uniform noise instead of replacing values. Parameters ---------- x : xr.DataArray Values. thresh : str Threshold over which to add uniform random noise to values, a quantity with units. upper_bnd : str Maximum possible value for the random noise, a quantity with units. Returns ------- xr.DataArray Notes ----- If thresh is low, this will change the mean value of x. """ return jitter(x, lower=None, upper=thresh, minimum=None, maximum=upper_bnd)
5,350,810
def index_set_names(index, names, level=None, inplace=False): """ Set Index or MultiIndex name. Able to set new names partially and by level. Parameters ---------- names : label or list of label Name(s) to set. level : int, label or list of int or label, optional If the index is a MultiIndex, level(s) to set (None for all levels). Otherwise level must be None. inplace : bool, default False Modifies the object directly, instead of creating a new Index or MultiIndex. Returns ------- Index The same type as the caller or None if inplace is True. See Also -------- Index.rename : Able to set new names without level. Examples -------- >>> import mars.dataframe as md >>> idx = md.Index([1, 2, 3, 4]) >>> idx.execute() Int64Index([1, 2, 3, 4], dtype='int64') >>> idx.set_names('quarter').execute() Int64Index([1, 2, 3, 4], dtype='int64', name='quarter') >>> idx = md.MultiIndex.from_product([['python', 'cobra'], ... [2018, 2019]]) >>> idx.execute() MultiIndex([('python', 2018), ('python', 2019), ( 'cobra', 2018), ( 'cobra', 2019)], ) >>> idx.set_names(['kind', 'year'], inplace=True) >>> idx.execute() MultiIndex([('python', 2018), ('python', 2019), ( 'cobra', 2018), ( 'cobra', 2019)], names=['kind', 'year']) >>> idx.set_names('species', level=0).execute() MultiIndex([('python', 2018), ('python', 2019), ( 'cobra', 2018), ( 'cobra', 2019)], names=['species', 'year']) """ op = DataFrameRename(index_mapper=names, level=level, output_types=get_output_types(index)) ret = op(index) if inplace: df_or_series = getattr(index, '_get_df_or_series', lambda: None)() if df_or_series is not None: from .rename_axis import rename_axis_with_level rename_axis_with_level(df_or_series, names, axis=index._axis, level=level, inplace=True) index.data = df_or_series.axes[index._axis].data else: index.data = ret.data else: return ret
5,350,811
def test_SingleScaler_combine_intensities(): """test combine intensities method""" p, e, r = (generated_param(), generated_exp(), generated_refl_for_comb()) exp = create_scaling_model(p, e, r) p.reflection_selection.method = "use_all" scaler = SingleScaler(p, exp[0], r) scaler.combine_intensities() # The input makes the profile intensities best - so check these are set in the # reflection table and global_Ih_table assert list(scaler.reflection_table["intensity"]) == list(r["intensity.prf.value"]) assert list(scaler.reflection_table["variance"]) == list( r["intensity.prf.variance"] ) block = scaler.global_Ih_table.blocked_data_list[0] block_sel = block.block_selections[0] suitable = scaler.suitable_refl_for_scaling_sel assert list(block.intensities) == list( scaler.reflection_table["intensity"].select(suitable).select(block_sel) ) assert list(block.variances) == list( scaler.reflection_table["variance"].select(suitable).select(block_sel) )
5,350,812
def get_critic(obs_dim: int) -> tf.keras.Model: """Get a critic that returns the expect value for the current state""" observation = tf.keras.Input(shape=(obs_dim,), name='observation') x = layers.Dense(64, activation='tanh')(observation) x = layers.Dense(64, activation='tanh')(x) value = layers.Dense(1, name='value')(x) critic = tf.keras.Model(observation, value) # critic.summary() return critic
5,350,813
def register_and_login_test_user(c): """ Helper function that makes an HTTP request to register a test user Parameters ---------- c : object Test client object Returns ------- str Access JWT in order to use in subsequent tests """ c.post( "/api/auth/register", json={ "username": "test", "password": "secret", "first_name": "tim", "last_name": "apple", "email": "[email protected]", "birthday": "1990-01-01", }, ) setup_resp = c.post( "/api/auth/login", json={"username": "test", "password": "secret"} ) setup_resp_json = setup_resp.get_json() setup_access_token = setup_resp_json["access_token"] return setup_access_token
5,350,814
def get_ssh_challenge_token(account, appid, ip=None, vo='def'): """ Get a challenge token for subsequent SSH public key authentication. The challenge token lifetime is 5 seconds. :param account: Account identifier as a string. :param appid: The application identifier as a string. :param ip: IP address of the client as a string. :param vo: The VO to act on. :returns: A dict with token and expires_at entries. """ kwargs = {'account': account} if not permission.has_permission(issuer=account, vo=vo, action='get_ssh_challenge_token', kwargs=kwargs): raise exception.AccessDenied('User can not get challenge token for account %s' % account) account = InternalAccount(account, vo=vo) return authentication.get_ssh_challenge_token(account, appid, ip)
5,350,815
def rootpath_capacity_exceeded(rootpath,newSize): """ Return True if rootpath is already allocated to the extent it cannot accomadate newSize, otherwise return False """ vols_in_rootpath = Volume.objects.filter(root_path=rootpath) rootpathallocsum = 0 if vols_in_rootpath.count() > 0: rootpathallocsum = vols_in_rootpath.aggregate( alSize=db.models.Sum('size_GB'))['alSize'] if rootpathallocsum + newSize > rootpath.capacity_GB: return True return False
5,350,816
def user_token(user: str) -> str: """ Authorize this request with the GitHub app set by the 'app_id' and 'private_key' environment variables. 1. Get the installation ID for the user that has installed the app 2. Request a new token for that user 3. Return it so it can be used in future API requests """ # Hardcode the installation to PyTorch so we can always get a valid ID key id = installation_id("pytorch") url = f"https://api.github.com/app/installations/{id}/access_tokens" r_bytes = requests.post(url, headers=app_headers()) r = json.loads(r_bytes.content.decode()) token = str(r["token"]) return token
5,350,817
def file_exists(path): """ Return True if the file from the path exists. :param path: A string containing the path to a file. :return: a boolean - True if the file exists, otherwise False """ return isfile(path)
5,350,818
def _static_to_href(pathto: Callable, favicon: Dict[str, str]) -> Dict[str, str]: """If a ``static-file`` is provided, returns a modified version of the icon attributes replacing ``static-file`` with the correct ``href``. If both ``static-file`` and ``href`` are provided, ``href`` will be ignored. """ if FILE_FIELD in favicon: attrs = favicon.copy() attrs["href"] = pathto( f"{OUTPUT_STATIC_DIR}/{attrs.pop(FILE_FIELD)}", resource=True ) return attrs return favicon
5,350,819
def test_IndentList_two(): """Test indent with two spaces.""" assert fmt.IndentList(2, ["abc", "d"]) == [" abc", " d"]
5,350,820
def concatenate(tensor1, tensor2, axis=0): """ Basically a wrapper for torch.dat, with the exception that the array itself is returned if its None or evaluates to False. :param tensor1: input array or None :type tensor1: mixed :param tensor2: input array :type tensor2: numpy.ndarray :param axis: axis to concatenate :type axis: int :return: concatenated array :rtype: numpy.ndarray """ assert isinstance(tensor2, torch.Tensor) or isinstance(tensor2, torch.autograd.Variable) if tensor1 is not None: assert isinstance(tensor1, torch.Tensor) or isinstance(tensor1, torch.autograd.Variable) return torch.cat((tensor1, tensor2), axis=axis) else: return tensor2
5,350,821
def setKey(key, keytype): """ if keytype is valid, save a copy of key accordingly and check if the key is valid """ global _key, _keytype, FREE_API_KEY, PREMIUM_API_KEY keytype = keytype.lower() if keytype in ("f", "fr", "free"): keytype = "free" FREE_API_KEY = key elif keytype.startswith("prem") or keytype in ("nonfree", "non-free"): keytype = "premium" PREMIUM_API_KEY = key else: print "invalid keytype", keytype return oldkey = _key oldkeytype = _keytype _key = key _keytype = keytype w = LocalWeather("london") # w.data != False rather than w.data to suppress Python 2.7 FurtureWarning: # "The behavior of this method will change in future versions...." if w is not None and hasattr(w, 'data') and w.data is not False: return True else: print "The key is not valid." _key = oldkey _keytype = oldkeytype return False
5,350,822
def sum_(obj): """Sum the values in the given iterable. Different from the built-in summation function, the summation is based on the first item in the iterable. Or a SymPy integer zero is created when the iterator is empty. """ i = iter(obj) try: init = next(i) except StopIteration: return Integer(0) else: return functools.reduce(operator.add, i, init)
5,350,823
def setup_s3_client(job_data): """Creates an S3 client Uses the credentials passed in the event by CodePipeline. These credentials can be used to access the artifact bucket. :param job_data: The job data structure :return: An S3 client with the appropriate credentials """ try: key_id = job_data['artifactCredentials']['accessKeyId'] key_secret = job_data['artifactCredentials']['secretAccessKey'] session_token = job_data['artifactCredentials']['sessionToken'] session = Session(aws_access_key_id=key_id, aws_secret_access_key=key_secret, aws_session_token=session_token) except Exception as e: logger.warn('No credentials in artifact - using default role access: {}'.format(e)) session = Session() return session.client('s3', config=botocore.client.Config(signature_version='s3v4'))
5,350,824
def collect_operations(opts): """ Produce a list of operations to take. Each element in the operations list is in the format: (function, (arguments,), 'logging message') """ operations = [] ####################### # Destination directory if os.path.exists(opts.dest_dir): if not opts.force: raise Exception( 'ERROR: The destination directory exists: "%s"\n' 'Use -f or --force option to overwrite the directory.' % opts.dest_dir ) else: operations.append( (shutil.rmtree, (opts.dest_dir,), 'Forcing deletion of existing destination directory "%s"' % opts.dest_dir) ) operations.append( (os.makedirs, (opts.dest_dir,), 'Creating destination directory "%s"' % opts.dest_dir) ) ########################## # Input joshua.config file config_text = opts.config.read() if opts.copy_config_options: config_text = filter_through_copy_config_script( config_text, opts.copy_config_options ) config_lines = config_text.split('\n') ############### # Files to copy # Parse the joshua.config and collect copy operations result_config_lines = [] grammar_configs_count = 0 for i, line in enumerate(config_lines): line_num = i + 1 if line_specifies_grammar(line): try: line, operation = process_line_containing_grammar( line, opts.orig_dir, opts.dest_dir, opts.grammar_paths, grammar_configs_count, opts.symlink, opts.absolute ) except PathException as e: # TODO: make this more appropriate for when the source # path was overridden by a command-line option message = ( # Prepend the line number to the error message 'ERROR: Configuration file "{0}" line {1}: {2}' .format(opts.config.name, line_num, str(e)) ) e.message = message raise e operations.append(operation) grammar_configs_count += 1 elif line_specifies_path(line): try: line, operation = process_line_containing_path( line, opts.orig_dir, opts.dest_dir, opts.symlink, opts.absolute ) except PathException as e: # Prepend the line number to the error message message = ( 'ERROR: Configuration file "{0}" line {1}: {2}' .format(opts.config.name, line_num, str(e)) ) e.message = message raise e operations.append(operation) result_config_lines.append(line) ########################### # Output joshua.config file # Create the Joshua configuration file for the package path = os.path.join(opts.dest_dir, OUTPUT_CONFIG_FILE_NAME) text = '\n'.join(result_config_lines) + '\n' operations.append( (write_string_to_file, (path, text), 'Writing the updated joshua.config to %s' % path ) ) ####################### # Bundle runner scripts # Write the scripts that run Joshua using the configuration and # resource in the bundle, and make their mode world-readable, and # world-executable. for file_name, file_text in [[BUNDLE_RUNNER_FILE_NAME, BUNDLE_RUNNER_TEXT], [SERVER_RUNNER_FILE_NAME, SERVER_RUNNER_TEXT], ]: path = os.path.join(opts.dest_dir, file_name) operations.append( (write_string_to_file, (path, file_text), 'Writing the bundle runner file "%s"' % path) ) mode = (stat.S_IREAD | stat.S_IRGRP | stat.S_IROTH | stat.S_IEXEC | stat.S_IXGRP | stat.S_IXOTH) operations.append( (os.chmod, (path, mode), 'Making the bundle runner file executable') ) ####################### # Write the README file path = os.path.join(opts.dest_dir, 'README') operations.append( (write_string_to_file, (path, README_TEMPLATE), 'Writing the README to "%s"' % path ) ) return operations
5,350,825
def pad_sequences(sequences, maxlen=None, dtype='int32', padding='pre', truncating='pre', value=0.): """ FROM KERAS Pads each sequence to the same length: the length of the longest sequence. If maxlen is provided, any sequence longer than maxlen is truncated to maxlen. Truncation happens off either the beginning (default) or the end of the sequence. Supports post-padding and pre-padding (default). # Arguments sequences: list of lists where each element is a sequence maxlen: int, maximum length dtype: type to cast the resulting sequence. padding: 'pre' or 'post', pad either before or after each sequence. truncating: 'pre' or 'post', remove values from sequences larger than maxlen either in the beginning or in the end of the sequence value: float, value to pad the sequences to the desired value. # Returns x: numpy array with dimensions (number_of_sequences, maxlen) """ lengths = [len(s) for s in sequences] nb_samples = len(sequences) if maxlen is None: maxlen = np.max(lengths) # take the sample shape from the first non empty sequence # checking for consistency in the main loop below. sample_shape = tuple() for s in sequences: if len(s) > 0: sample_shape = np.asarray(s).shape[1:] break x = (np.ones((nb_samples, maxlen) + sample_shape) * value).astype(dtype) for idx, s in enumerate(sequences): if len(s) == 0: continue # empty list was found if truncating == 'pre': trunc = s[-maxlen:] elif truncating == 'post': trunc = s[:maxlen] else: raise ValueError('Truncating type "%s" not understood' % truncating) # check `trunc` has expected shape trunc = np.asarray(trunc, dtype=dtype) if trunc.shape[1:] != sample_shape: raise ValueError('Shape of sample %s of sequence at position %s is different from expected shape %s' % (trunc.shape[1:], idx, sample_shape)) if padding == 'post': x[idx, :len(trunc)] = trunc elif padding == 'pre': x[idx, -len(trunc):] = trunc else: raise ValueError('Padding type "%s" not understood' % padding) return x
5,350,826
def basic_pyxll_function_22(x, y, z): """if z return x, else return y""" if z: # we're returning an integer, but the signature # says we're returning a float. # PyXLL will convert the integer to a float for us. return x return y
5,350,827
def job_hotelling(prob_label, tr, te, r, ni, n): """Hotelling T-squared test""" with util.ContextTimer() as t: htest = tst.HotellingT2Test(alpha=alpha) test_result = htest.perform_test(te) return { 'test_method': htest, 'test_result': test_result, 'time_secs': t.secs}
5,350,828
def get(id: str): """Get a notebook.""" try: ep = f"{notebook_ep}/{id}" r = request("GET", ep, True) check_response(r) d = r.json() res = d.get("result") if res is None: raise Exception("Data not received.") # Notebook data _id = res["id"] name = res["name"] tag_colors = res.get("tag_colors") created = res["created"].replace("T", " ") last_mod = res["last_modified"].replace("T", " ") if tag_colors is not None: tag_colors = [f"{i}={v}" for i, v in tag_colors.items()] tag_colors = ", ".join(tag_colors) echo("ID:" + (" " * 12) + _id) echo(f"Name:" + (" " * 10) + name) if tag_colors is not None: echo(f"Tag colors:" + (" " * 4) + tag_colors) echo("Created:" + (" " * 7) + created) echo(f"Last modified: {last_mod}") except Exception as e: sys.exit(f"Error: {e}")
5,350,829
def return_circle_aperature(field, mask_r): """Filter the circle aperature of a light field. Filter the circle aperature of a light field. Parameters ---------- field : Field Input square field. mask_r : float, from 0 to 1 Radius of a circle mask. Returns ---------- X : array-like Filtered meshgird X. Y : array-like Filtered meshgrid Y. """ length = field.shape[0] norm_length = np.linspace(-1, 1, length) X, Y = np.meshgrid(norm_length, norm_length) norm_radius = np.sqrt(X**2 + Y**2) X[norm_radius > mask_r] = np.nan Y[norm_radius > mask_r] = np.nan return X, Y, norm_radius
5,350,830
def train(flags=None): """Train a seq2seq model on human motion""" if flags is None: flags = create_flags() train_dir, summaries_dir = get_dirs(flags) train_set, test_set, data_mean, data_std = read_qpos_data(flags.seq_length_in, flags.seq_length_out, flags.data_dir, not flags.omit_one_hot) # Limit TF to take a fraction of the GPU memory gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=1, allow_growth=True) device_count = {"GPU": 0} if flags.use_cpu else {"GPU": 1} with tf.Session(config=tf.ConfigProto(gpu_options=gpu_options, device_count=device_count)) as sess: # === Create the model === print("Creating %d layers of %d units." % (flags.num_layers, flags.size)) model = create_model(sess, flags=flags) model.train_writer.add_graph(sess.graph) print("Model created") # === This is the training loop === step_time, loss, val_loss = 0.0, 0.0, 0.0 current_step = 0 if flags.load <= 0 else flags.load + 1 previous_losses = [] data_keys = list(train_set.keys()) batches = math.ceil(len(data_keys) / flags.batch_size) # step_time, loss = 0, 0 for _ in range(flags.iterations): start_time = time.time() # shuffle data keys in each iteration random.shuffle(data_keys) # === Training step === total_frames = flags.seq_length_in + flags.seq_length_out sub_batches = 68 total_sub_batch_loss = 0 for sub_batch in range(int(sub_batches)): encoder_inputs, decoder_inputs, decoder_outputs = model.get_sub_batch(train_set, sub_batch, data_keys=data_keys) _, sub_batch_loss, loss_summary, lr_summary = model.step(sess, encoder_inputs, decoder_inputs, decoder_outputs[:, :, :66], False) model.train_writer.add_summary(loss_summary, current_step) model.train_writer.add_summary(lr_summary, current_step) total_sub_batch_loss += sub_batch_loss step_loss = total_sub_batch_loss / int(sub_batches) if current_step % 10 == 0: print("step {0:04d}; step_loss: {1:.4f}".format(current_step, step_loss)) step_time += (time.time() - start_time) / flags.test_every loss += step_loss / flags.test_every current_step += 1 # === steplearning rate decay === if current_step % flags.learning_rate_step == 0: sess.run(model.learning_rate_decay_op) # Once in a while, we save checkpoint, print statistics, and run evals i.e, validation. if current_step % flags.test_every == 0: # === Validation with randomly chosen seeds === forward_only = True test_data_keys = list(test_set.keys()) random.shuffle(test_data_keys) test_batch = 0 # === Testing step === test_sub_batches = 22 total_test_sub_batch_loss = 0 total_ms_loss = 0 for sub_batch in range(int(test_sub_batches)): encoder_inputs, decoder_inputs, decoder_outputs = model.get_sub_batch(test_set, test_data_keys, sub_batch) sub_batch_loss, ms_loss, loss_summary = model.step(sess, encoder_inputs, decoder_inputs, decoder_outputs[:, :, :66], forward_only) total_test_sub_batch_loss += sub_batch_loss total_ms_loss += ms_loss val_loss = total_test_sub_batch_loss / int(test_sub_batches) # Loss book-keeping avg_ms_loss = total_ms_loss / int(test_sub_batches) val_summary = tf.Summary(value=[tf.Summary.Value(tag='loss/loss', simple_value=val_loss)]) model.test_writer.add_summary(val_summary, current_step) # assuming 1 frame = 8.333ms print() print("{0: <16} |".format("milliseconds"), end="") for ms in [16.5, 33, 66.5, 83.3, 125, 150, 183, 208]: print(" {0:.2f} |".format(ms), end="") print() # Pretty print of the results for 80, 160, 320, 400, 560 and 1000 ms print("{0: <16} |".format('walking'), end="") # [1,3,7,9,13,24] => the no of timesteps in output/prediction sequnce (total is 25) # for training with parameter "--seq_length_out 25" for ms in [2, 4, 8, 10, 15, 18, 22, 25]: if flags.seq_length_out >= ms: print(" {0:.3f} |".format(avg_ms_loss[ms - 1]), end="") else: print(" n/a |", end="") print() print() print("============================\n" "Global step: %d\n" "Learning rate: %.4f\n" "Step-time (ms): %.4f\n" "Train loss avg: %.4f\n" "--------------------------\n" "Val loss: %.4f\n" "============================" % (model.global_step.eval(), model.learning_rate.eval(), step_time * 1000, loss, val_loss)) print() previous_losses.append(loss) # Save the model if current_step % flags.save_every == 0: print("Saving the model...") start_time = time.time() model.saver.save(sess, os.path.normpath(os.path.join(train_dir, 'checkpoint')), global_step=current_step) print("done in {0:.2f} ms".format((time.time() - start_time) * 1000)) # Reset global time and loss step_time, loss = 0, 0 sys.stdout.flush()
5,350,831
def user_get(): """ Get information from the database about an user, given his id. If there are field names received in the body, only those will be queried. If no field is provided, every field will be selected. The body should be a JSON object following the schema: { "user_id": id, "fields": ["field1", ...] } Returns: Response: - 200 in case of success and the user info in the body. - 400 if the body does not have all the necessary information or the field names are wrong. - 404 if the user is not found. """ body_schema = { "type": "object", "properties": { "user_id": {"type": "number"}, "fields": { "type": "array", "minItems": 1, "items": { "type": "string", } } }, "required": ["user_id"] } payload = request.get_json(silent=True) is_valid = validate_json(payload, body_schema) if not is_valid: return Response(status=400) user_id = payload["user_id"] if "fields" in payload: fields = payload["fields"] query = sql.SQL("SELECT {} FROM users WHERE user_id={};").format( sql.SQL(", ").join(map(sql.Identifier, fields)), sql.Literal(user_id) ) else: query = sql.SQL("SELECT * FROM users WHERE user_id={};").format( sql.Literal(payload["user_id"]) ) cursor = CONN.cursor(cursor_factory=RealDictCursor) try: cursor.execute(query) results = cursor.fetchall() except psycopg2.errors.UndefinedColumn: CONN.rollback() return Response(status=400) finally: cursor.close() CONN.commit() if len(results) == 0: return Response(status=404) return Response( status=200, response=json.dumps(results), mimetype="application/json" )
5,350,832
def get_data_file_args(args, language): """ For a interface, return the language-specific set of data file arguments Args: args (dict): Dictionary of data file arguments for an interface language (str): Language of the testbench Returns: dict: Language-specific data file arguments """ if language in args: return args[language] return args["generic"]
5,350,833
def mqtt_publish_temperature_val(): """Publish system temperature value in centigrades to MQTT data topic.""" message = round_temp(dev_system.temperature) if mqtt.connected: cfg_section = mqtt.GROUP_TOPICS cfg_option = 'system_data_temp_val' try: mqtt.publish(message, cfg_option, cfg_section) logger.debug( 'Published temperature %s°C to MQTT topic %s', message, mqtt.topic_name(cfg_option, cfg_section)) except Exception as errmsg: logger.error( 'Publishing temperature %s°C to MQTT topic %s failed: %s', message, mqtt.topic_name(cfg_option, cfg_section), errmsg) else: logger.debug('System temperature value %s°C', message)
5,350,834
def branch_exists(branch): """Return True if the branch exists.""" try: run_git("rev-parse --verify {}".format(branch), quiet=True) return True except ProcessExecutionError: return False
5,350,835
def spectrum_like_noise(signal: numpy.ndarray, *, sampling_rate=40000, keep_signal_amp_envelope=False, low_pass_cutoff=50, # Hz low_pass_order=6, seed: int = 42, window_length_sec: float = 20 / 1000, # 20 ms p_overlap: float = .5, long_term_avg: bool = True ) -> numpy.ndarray: """Create a noise with same spectrum as the input signal. randomises phase Parameters ---------- signal : array_like Input signal. sampling_rate : int Sampling frequency of the input signal. (Default value = 40000) keep_signal_amp_envelope : bool Apply the envelope of the original signal to the noise. (Default value = False) low_pass_cutoff : float low_pass_order : int seed : int long_term_avg : bool window_length_sec: int p_overlap: float Returns ------- ndarray Noise signal. """ assert window_length_sec > 0 assert 0 <= p_overlap <= 1 signal = zero_pad_to_power_2(signal) # Ensure welch works with any window size signal_length = signal.shape[-1] window_sum_squares = signal_length # scaling factor defined as sum of squared samples of window function sc = 2 / (sampling_rate * window_sum_squares) # Scaling coefficient 2 takes into account removal of energy at negative frequencies (we drop this side of PSD) if not long_term_avg: n_fft = next_pow_2(signal_length) spec = numpy.abs(fft.rfft(signal, n_fft)) psd = (spec ** 2) * sc else: n_per_seg = next_pow_2(int(sampling_rate * window_length_sec)) # next_pow_2 per seg == n_fft n_overlap = int(n_per_seg * p_overlap) f, psd = welch(signal, sampling_rate, nperseg=n_per_seg, noverlap=n_overlap, scaling='density', return_onesided=True, detrend=False, # window='boxcar', window='hanning', ) n_fft = n_per_seg psd /= (signal_length / n_per_seg ) # normalise? spec = numpy.sqrt((psd / sc)) noise = [] runs = signal_length // n_fft for i in range(runs + 1): numpy_seed(seed + i) noise.extend(numpy.real( fft.irfft( spec * numpy.exp(2 * numpy.pi * 1j * numpy.random.random(spec.shape[-1])), # Randomise phase. 0->360, 2 pi rads n_fft))) # Give each spectral component a random phase, PHI(f(k)) = random number, # uniformly distributed between 0 and 360 degrees (or equivalently, between 0 and 2Pi radians); noise = numpy.array(noise)[:signal_length] if keep_signal_amp_envelope: [bb, aa] = butter(low_pass_order, low_pass_cutoff / (sampling_rate / 2)) # Cutoff Hz, LP filter noise *= filtfilt(bb, # numerator aa, # denominator hilbert_envelope(signal) # envelope of speech signal in time domain ) return numpy.expand_dims(noise, 0)
5,350,836
def check_string_capitalised(string): """ Check to see if a string is in all CAPITAL letters. Boolean. """ return bool(re.match('^[A-Z_]+$', string))
5,350,837
def sample_zero_entries(edge_index, seed, num_nodes, sample_mult=1.0): """Obtain zero entries from a sparse matrix. Args: edge_index (tensor): (2, N), N is the number of edges. seed (int): to control randomness num_nodes (int): number of nodes in the graph sample_mult (float): the number of edges sampled is N * sample_mult. Returns: torch.tensor, (2, N) containing zero entries """ n_edges = edge_index.shape[1] np.random.seed(seed) # Number of edges in both directions must be even n_samples = int(np.ceil(sample_mult * n_edges / 2) * 2) adjacency = adj_from_edge_index(edge_index, num_nodes) zero_entries = np.zeros([2, n_samples], dtype=np.int32) nonzero_or_sampled = set(zip(*adjacency.nonzero())) i = 0 while True: t = tuple(np.random.randint(0, adjacency.shape[0], 2)) # Don't sample diagonal of the adjacency matrix if t[0] == t[1]: continue if t not in nonzero_or_sampled: # Add edge in both directions t_rev = (t[1], t[0]) zero_entries[:, i] = t zero_entries[:, i+1] = t_rev i += 2 if i == n_samples: break nonzero_or_sampled.add(t) nonzero_or_sampled.add(t_rev) return torch.tensor(zero_entries, dtype=torch.long)
5,350,838
def add_wsl_outputs(model, blob_in, dim, prefix=''): """Add RoI classification and bounding box regression output ops.""" if cfg.WSL.CONTEXT: fc8c, fc8d = add_wsl_context_outputs(model, blob_in, dim, prefix=prefix) else: # Box classification layer fc8c = model.FC( blob_in, prefix + 'fc8c', dim, model.num_classes - 1, weight_init=('XavierFill', {}), # weight_init=gauss_fill(0.01), bias_init=const_fill(0.0), ) fc8d = model.FC( blob_in, prefix + 'fc8d', dim, model.num_classes - 1, weight_init=('XavierFill', {}), # weight_init=gauss_fill(0.01), bias_init=const_fill(0.0), ) if cfg.WSL.CMIL and model.train: fc8c, fc8d = add_wsl_cmil(model, [fc8c, fc8d], dim, prefix=prefix) model.Softmax(fc8c, prefix + 'alpha_cls', axis=1) model.Transpose(fc8d, prefix + 'fc8d_t', axes=(1, 0)) model.Softmax(prefix + 'fc8d_t', prefix + 'alpha_det_t', axis=1) model.Transpose(prefix + 'alpha_det_t', prefix + 'alpha_det', axes=(1, 0)) model.net.Mul([prefix + 'alpha_cls', prefix + 'alpha_det'], prefix + 'rois_pred') if not model.train: # == if test # Add BackGround predictions model.net.Split( prefix + 'rois_pred', [prefix + 'rois_bg_pred', prefix + 'notuse'], split=[1, model.num_classes - 2], axis=1) model.net.Concat( [prefix + 'rois_bg_pred', prefix + 'rois_pred'], [prefix + 'cls_prob', prefix + 'cls_prob_concat_dims'], axis=1) if cfg.WSL.CONTEXT: blob_in = blob_in[0] dim = dim if cfg.WSL.CMIL: add_wsl_cmil_outputs(model, blob_in, dim, prefix=prefix) elif cfg.WSL.OICR : add_wsl_oicr_outputs(model, blob_in, dim, prefix=prefix) elif cfg.WSL.PCL: add_wsl_pcl_outputs(model, blob_in, dim, prefix=prefix)
5,350,839
def ped_file_parent_missing(fake_fs): """Return fake file system with PED file""" content = textwrap.dedent( """ # comment FAM II-1\tI-1\t0\t1\t2 FAM I-1 0\t0\t1\t1 """ ).strip() fake_fs.fs.create_file("/test.ped", create_missing_dirs=True, contents=content) return fake_fs
5,350,840
def symlink_sysmeta_to_usermeta(headers): """ Helper function to translate from cluster-facing X-Object-Sysmeta-Symlink-* headers to client-facing X-Symlink-* headers. :param headers: request headers dict. Note that the headers dict will be updated directly. """ for user_hdr, sysmeta_hdr in ( (TGT_OBJ_SYMLINK_HDR, TGT_OBJ_SYSMETA_SYMLINK_HDR), (TGT_ACCT_SYMLINK_HDR, TGT_ACCT_SYSMETA_SYMLINK_HDR), (TGT_ETAG_SYMLINK_HDR, TGT_ETAG_SYSMETA_SYMLINK_HDR), (TGT_BYTES_SYMLINK_HDR, TGT_BYTES_SYSMETA_SYMLINK_HDR)): if sysmeta_hdr in headers: headers[user_hdr] = headers.pop(sysmeta_hdr)
5,350,841
def GettingAyah(): """The code used to get an Ayah from the Quran every fixed time""" while True: ayah = random.randint(1, 6237) url = f'http://api.alquran.cloud/v1/ayah/{ayah}' res = requests.get(url) if len(res.json()['data']['text']) <= 280: return res.json()['data']['text']
5,350,842
def iterdecode(value): """ Decode enumerable from string presentation as a tuple """ if not value: return tuple() result = [] accumulator = u'' escaped = False for c in value: if not escaped: if c == CHAR_ESCAPE: escaped = True continue elif c == CHAR_SEPARATOR: result.append(accumulator) accumulator = u'' continue else: escaped = False accumulator += c result.append(accumulator) return tuple(result)
5,350,843
def run_resolution_filter(image=None, image_path=None, height=600, width=1000): """ This will take the image which is correctly rotated yolo output. Initially, We are doing for driving licenses only, Will return 1 if the height and width are greater than 700 and 1100 pixels else it will return 10002 :return: """ result = False if image is not None: result = test_image_height_and_width(image, desired_width=width, desired_height=height) if image_path is not None and image is None: img = cv2.imread(image_path) result = test_image_height_and_width(img, desired_width=width, desired_height=height) if result: return 1 else: return 10002
5,350,844
def text_to_emotion(text): """ テキストから感情を推測して返す Parameters ---------- text : string テキスト Returns ------- {'magnitude','score'} """ client = language.LanguageServiceClient() document = types.Document( content=text, type=enums.Document.Type.PLAIN_TEXT, language="ja" ) sentiment = client.analyze_sentiment(document=document).document_sentiment return {'magnitude':sentiment.magnitude,'score':sentiment.score}
5,350,845
def process_actions(list_response_json, headers, url, force_reset): """ If a policy does not exist on a given cluster find the right values defined in qos_dict and apply them """ qos_dict = {} # This dictionary sets the tiers and min/max/burst settings qos_dict['tiers'] = {"bronze": [500, 5000, 10000], "silver": [2000, 20000, 50000], "gold": [5000, 100000, 150000]} # Check to see if there are no policies set force_reset_dict = {} if len(list_response_json['result']['qosPolicies']) == 0: print(f"No existing QoS Policies found, implementing full install") for qos_key, qos_val in qos_dict['tiers'].items(): pol_name = qos_key min_iops = qos_val[0] max_iops = qos_val[1] burst_iops = qos_val[2] payload = build_payload(pol_name, min_iops, max_iops, burst_iops) connect_cluster(headers, url, payload) # If there are policies ignore them if they match names, remove that # name from the dict and move on else: for policy in list_response_json['result']['qosPolicies']: pol_name = policy['name'] if pol_name in qos_dict['tiers'].keys(): pol_id = policy['qosPolicyID'] min_iops = qos_dict['tiers'][pol_name][0] max_iops = qos_dict['tiers'][pol_name][1] burst_iops = qos_dict['tiers'][pol_name][2] pol_min = policy['qos']['minIOPS'] pol_max = policy['qos']['maxIOPS'] pol_burst = policy['qos']['burstIOPS'] if ((min_iops != pol_min or max_iops != pol_max or burst_iops != pol_burst) and force_reset is True): print(f"Policy mismatch detected on {pol_name}... resetting " f"as reset flag is set to True") print(qos_dict['tiers'][pol_name]) modify_qos_policy(headers, url, pol_id, min_iops, max_iops, burst_iops) elif ((min_iops != pol_min or max_iops != pol_max or burst_iops != pol_burst) and force_reset is False): print(f"Policy mismatch detected on {pol_name}... Leaving " f"as reset flag is set to false") else: print(f"QoS Policy {pol_name} found, policy is not in " f"configuration dictionary. Ignoring") pass if policy['name'] in qos_dict['tiers'].keys(): qos_dict['tiers'].pop(pol_name) return qos_dict
5,350,846
def list_bundles(maxResults=None, nextToken=None): """ List all available bundles. See also: AWS API Documentation Exceptions :example: response = client.list_bundles( maxResults=123, nextToken='string' ) :type maxResults: integer :param maxResults: Maximum number of records to list in a single response. :type nextToken: string :param nextToken: Pagination token. Set to null to start listing bundles from start. If non-null pagination token is returned in a result, then pass its value in here in another request to list more bundles. :rtype: dict ReturnsResponse Syntax { 'bundleList': [ { 'bundleId': 'string', 'title': 'string', 'version': 'string', 'description': 'string', 'iconUrl': 'string', 'availablePlatforms': [ 'OSX'|'WINDOWS'|'LINUX'|'OBJC'|'SWIFT'|'ANDROID'|'JAVASCRIPT', ] }, ], 'nextToken': 'string' } Response Structure (dict) -- Result structure contains a list of all available bundles with details. bundleList (list) -- A list of bundles. (dict) -- The details of the bundle. bundleId (string) -- Unique bundle identifier. title (string) -- Title of the download bundle. version (string) -- Version of the download bundle. description (string) -- Description of the download bundle. iconUrl (string) -- Icon for the download bundle. availablePlatforms (list) -- Developer desktop or mobile app or website platforms. (string) -- Developer desktop or target mobile app or website platform. nextToken (string) -- Pagination token. If non-null pagination token is returned in a result, then pass its value in another request to fetch more entries. Exceptions Mobile.Client.exceptions.InternalFailureException Mobile.Client.exceptions.ServiceUnavailableException Mobile.Client.exceptions.UnauthorizedException Mobile.Client.exceptions.TooManyRequestsException Mobile.Client.exceptions.BadRequestException :return: { 'bundleList': [ { 'bundleId': 'string', 'title': 'string', 'version': 'string', 'description': 'string', 'iconUrl': 'string', 'availablePlatforms': [ 'OSX'|'WINDOWS'|'LINUX'|'OBJC'|'SWIFT'|'ANDROID'|'JAVASCRIPT', ] }, ], 'nextToken': 'string' } :returns: Mobile.Client.exceptions.InternalFailureException Mobile.Client.exceptions.ServiceUnavailableException Mobile.Client.exceptions.UnauthorizedException Mobile.Client.exceptions.TooManyRequestsException Mobile.Client.exceptions.BadRequestException """ pass
5,350,847
def calculate_statistics(stats_table, vocab, period): """Calculate statistics for the provided vocabulary and add them to stats table. Parameters ---------- stats_table: dict, required The dictionary containing statistics. vocab: iterable of str, required The vocabulary for which to compute statistics. period: pandas.Interval, required The period of the vocabulary. """ if 'period' not in stats_table: stats_table['period'] = [] if 'num_tokens' not in stats_table: stats_table['num_tokens'] = [] stats_table['period'].append(format_period(period)) stats_table['num_tokens'].append(len(vocab))
5,350,848
def validate_ac_power(observation, values): """ Run a number of validation checks on a daily timeseries of AC power. Parameters ---------- observation : solarforecastarbiter.datamodel.Observation Observation object that the data is associated with values : pandas.Series Series of observation values Returns ------- timestamp_flag : pandas.Series Bitmask from :py:func:`.validator.check_timestamp_spacing` night_flag : pandas.Series Bitmask from :py:func:`.validator.check_day_night` or :py:func:`.validator.check_day_night_interval` limit_flag : pandas.Series Bitmask from :py:func:`.validator.check_ac_power_limits` """ solar_position, dni_extra, timestamp_flag, night_flag = _solpos_dni_extra( observation, values) day_night = \ ~quality_mapping.convert_mask_into_dataframe(night_flag)['NIGHTTIME'] limit_flag = validator.check_ac_power_limits( values, day_night, observation.site.modeling_parameters.ac_capacity, _return_mask=True) return timestamp_flag, night_flag, limit_flag
5,350,849
def partition(predicate, iterable): """Use `predicate` to partition entries into falsy and truthy ones. Recipe taken from the official documentation. https://docs.python.org/3/library/itertools.html#itertools-recipes """ t1, t2 = itertools.tee(iterable) return ( six.moves.filterfalse(predicate, t1), six.moves.filter(predicate, t2), )
5,350,850
def _download(url: str) -> bytes: """Download something from osu!web at `url`, returning the file contents.""" with _login() as sess: resp = sess.get(f"{url}/download", headers={"Referer": url}) if not resp.ok: raise ReplyWith("Sorry, a download failed.") return resp.content
5,350,851
def get_node(obj, path): """Retrieve a deep object based on a path. Return either a Wrapped instance if the deep object is not a node, or another type of object.""" subobj = obj indices = [] for item in path: try: subobj = subobj[item] except Exception as e: indices.append(item) subobj, indices = _select(subobj, indices) if isinstance(subobj, dict) or (isinstance(subobj, list) and subobj and isinstance(subobj[0], dict)): return Wrapped(obj, path) else: assert not indices, "This path does not exist." return subobj
5,350,852
def karto(data): """Karto node subscriber method. Sends the recieved karto node information to treatment method. :param data: OccupancyGrid Data, containing map information and metadata like height, width or resulution of the map. :type data: OccupancyGrid struct. """ global countkarto countkarto=countkarto+1 print ("Karto->",countkarto) treatment(data,'karto',countkarto)
5,350,853
def without_bond_orders(gra): """ resonance graph with maximum spin (i.e. no pi bonds) """ bnd_keys = list(bond_keys(gra)) # don't set dummy bonds to one! bnd_ord_dct = bond_orders(gra) bnd_vals = [1 if v != 0 else 0 for v in map(bnd_ord_dct.__getitem__, bnd_keys)] bnd_ord_dct = dict(zip(bnd_keys, bnd_vals)) return set_bond_orders(gra, bnd_ord_dct)
5,350,854
def check_struc(d1, d2, errors=[], level='wf'): """Recursively check struct of dictionary 2 to that of dict 1 Arguments --------- d1 : dict Dictionary with desired structure d2 : dict Dictionary with structre to check errors : list of str, optional Missing values in d2. Initial value is []. level : str, optional Level of search. Inital value is 'wf' (wind farm) for top-level dictionary. Returns ------- errors : list of str Missing values in d2. """ for k1, v1 in d1.items(): # loop through keys and values in first dict if k1 not in d2.keys(): # if key doesn't exist in d2 errors.append('{} not in dictionary'.format('.'.join([level,k1]))) elif isinstance(v1, dict): # otherwise, if item is a dict, recurse errors = check_struc(v1, d2[k1], errors=errors, # pass in accumulated errros level='.'.join([level, k1])) # change level return errors
5,350,855
def indexoflines(LFtop): """ Determining selected line index of Gromacs compatible topology files """ file1 = open(LFtop, "r") readline = file1.readlines() lineindex = ["x", "x", "x"] n = 0 for line in readline: linelist = line.split() if "atomtypes" in linelist: lineindex[0] = n n += 1 elif "moleculetype" in linelist: lineindex[1] = n n += 1 elif "system" in linelist: lineindex[2] = n n += 1 else: n += 1 file1.close() Idx = 0 while Idx < len(lineindex): if not str(lineindex[Idx]).isnumeric() == True: lineindex[Idx] = n + 1 Idx += 1 else: Idx += 1 return {'atomtypes': lineindex[0], 'moleculetype': lineindex[1], 'system': lineindex[2]}
5,350,856
def checkout_program(connection, name, destdir=None): """Download program sources""" adt_program = sap.adt.Program(connection, name) adt_program.fetch() download_abap_source(name, adt_program, '.prog', destdir=destdir) progdir, tpool = build_program_abap_attributes(adt_program) dump_attributes_to_file(name, (progdir, tpool), '.prog', 'LCL_OBJECT_PROG', destdir=destdir)
5,350,857
def print_details(field): """Print the definition of one field, in readable text""" print(u"Name: {}".format(apiname(field))) print(u"Label: {}".format(field["text"])) print(u"ID: {}".format(field["id"])) print(u"Type: {}".format(field["input_type"])) if "tooltip" in field: if field["tooltip"]: print(u"Tooltip: {}".format(field["tooltip"])) if "placeholder" in field: if field["placeholder"]: print(u"Placeholder: {}".format(field["placeholder"])) if "required" in field: print(u"Required: {}".format(field["required"])) if "values" in field: if field["values"]: print("Values:") v = sorted(field["values"], key=lambda x: x["value"]) for value in v: default_flag = " " if value["default"]: default_flag = "*" if not value["enabled"]: default_flag = "x" label = value["label"] print (u'{} {}={}'.format(default_flag, value["value"], label))
5,350,858
def onTrainingButtonPress(sid, data): """ onTrainingButtonPress(({'identifier': "-",'id': "-"}) The html interface calls this function when the user clicks any of the buttons that the html interface was sent with the function getTrainingButtons(). The identifier is the identifier that was sent with the button and the id is the id of the user gotten from the psiturk id (the same id used in join as shown below) """ uid = connections.get(sid, None) game = games.get(uid, None) if game is not None: game.event(uid, event_type='button', event_data=data['identifier'])
5,350,859
def create_mock_github(user='octo-cat', private=False): """Factory for mock GitHub objects. Example: :: >>> github = create_mock_github(user='octocat') >>> github.branches(user='octocat', repo='hello-world') >>> [{u'commit': {u'sha': u'e22d92d5d90bb8f9695e9a5e2e2311a5c1997230', ... u'url': u'https://api.github.com/repos/octocat/mock-repo/commits/e22d92d5d90bb8f9695e9a5e2e2311a5c1997230'}, ... u'name': u'dev'}, ... {u'commit': {u'sha': u'444a74d0d90a4aea744dacb31a14f87b5c30759c', ... u'url': u'https://api.github.com/repos/octocat/mock-repo/commits/444a74d0d90a4aea744dacb31a14f87b5c30759c'}, ... u'name': u'master'}, ... {u'commit': {u'sha': u'c6eaaf6708561c3d4439c0c8dd99c2e33525b1e6', ... u'url': u'https://api.github.com/repos/octocat/mock-repo/commits/c6eaaf6708561c3d4439c0c8dd99c2e33525b1e6'}, ... u'name': u'no-bundle'}] :param str user: Github username. :param bool private: Whether repo is private. :return: An autospecced GitHub Mock object """ github_mock = mock.create_autospec(GitHub) github_mock.repo.return_value = github3.repos.Repository.from_json({ u'archive_url': u'https://api.github.com/repos/{user}/mock-repo/{{archive_format}}{{/ref}}'.format(user=user), u'assignees_url': u'https://api.github.com/repos/{user}/mock-repo/assignees{{/user}}'.format(user=user), u'blobs_url': u'https://api.github.com/repos/{user}/mock-repo/git/blobs{{/sha}}'.format(user=user), u'branches_url': u'https://api.github.com/repos/{user}/mock-repo/branches{{/bra.format(user=user)nch}}'.format(user=user), u'clone_url': u'https://github.com/{user}/mock-repo.git'.format(user=user), u'collaborators_url': u'https://api.github.com/repos/{user}/mock-repo/collaborators{{/collaborator}}'.format(user=user), u'comments_url': u'https://api.github.com/repos/{user}/mock-repo/comments{{/number}}'.format(user=user), u'commits_url': u'https://api.github.com/repos/{user}/mock-repo/commits{{/sha}}'.format(user=user), u'compare_url': u'https://api.github.com/repos/{user}/mock-repo/compare/{{base}}...{{head}}', u'contents_url': u'https://api.github.com/repos/{user}/mock-repo/contents/{{+path}}'.format(user=user), u'contributors_url': u'https://api.github.com/repos/{user}/mock-repo/contributors'.format(user=user), u'created_at': u'2013-06-30T18:29:18Z', u'default_branch': u'dev', u'description': u'Simple, Pythonic, text processing--Sentiment analysis, part-of-speech tagging, noun phrase extraction, translation, and more.', u'downloads_url': u'https://api.github.com/repos/{user}/mock-repo/downloads'.format(user=user), u'events_url': u'https://api.github.com/repos/{user}/mock-repo/events'.format(user=user), u'fork': False, u'forks': 89, u'forks_count': 89, u'forks_url': u'https://api.github.com/repos/{user}/mock-repo/forks', u'full_name': u'{user}/mock-repo', u'git_commits_url': u'https://api.github.com/repos/{user}/mock-repo/git/commits{{/sha}}'.format(user=user), u'git_refs_url': u'https://api.github.com/repos/{user}/mock-repo/git/refs{{/sha}}'.format(user=user), u'git_tags_url': u'https://api.github.com/repos/{user}/mock-repo/git/tags{{/sha}}'.format(user=user), u'git_url': u'git://github.com/{user}/mock-repo.git'.format(user=user), u'has_downloads': True, u'has_issues': True, u'has_wiki': True, u'homepage': u'https://mock-repo.readthedocs.org/', u'hooks_url': u'https://api.github.com/repos/{user}/mock-repo/hooks'.format(user=user), u'html_url': u'https://github.com/{user}/mock-repo'.format(user=user), u'id': 11075275, u'issue_comment_url': u'https://api.github.com/repos/{user}/mock-repo/issues/comments/{{number}}'.format(user=user), u'issue_events_url': u'https://api.github.com/repos/{user}/mock-repo/issues/events{{/number}}'.format(user=user), u'issues_url': u'https://api.github.com/repos/{user}/mock-repo/issues{{/number}}'.format(user=user), u'keys_url': u'https://api.github.com/repos/{user}/mock-repo/keys{{/key_id}}'.format(user=user), u'labels_url': u'https://api.github.com/repos/{user}/mock-repo/labels{{/name}}'.format(user=user), u'language': u'Python', u'languages_url': u'https://api.github.com/repos/{user}/mock-repo/languages'.format(user=user), u'master_branch': u'dev', u'merges_url': u'https://api.github.com/repos/{user}/mock-repo/merges'.format(user=user), u'milestones_url': u'https://api.github.com/repos/{user}/mock-repo/milestones{{/number}}'.format(user=user), u'mirror_url': None, u'name': u'mock-repo', u'network_count': 89, u'notifications_url': u'https://api.github.com/repos/{user}/mock-repo/notifications{{?since,all,participating}}'.format(user=user), u'open_issues': 2, u'open_issues_count': 2, u'owner': {u'avatar_url': u'https://gravatar.com/avatar/c74f9cfd7776305a82ede0b765d65402?d=https%3A%2F%2Fidenticons.github.com%2F3959fe3bcd263a12c28ae86a66ec75ef.png&r=x', u'events_url': u'https://api.github.com/users/{user}/events{{/privacy}}'.format(user=user), u'followers_url': u'https://api.github.com/users/{user}/followers'.format(user=user), u'following_url': u'https://api.github.com/users/{user}/following{{/other_user}}'.format(user=user), u'gists_url': u'https://api.github.com/users/{user}/gists{{/gist_id}}'.format(user=user), u'gravatar_id': u'c74f9cfd7776305a82ede0b765d65402', u'html_url': u'https://github.com/{user}'.format(user=user), u'id': 2379650, u'login': user, u'organizations_url': u'https://api.github.com/users/{user}/orgs'.format(user=user), u'received_events_url': u'https://api.github.com/users/{user}/received_events', u'repos_url': u'https://api.github.com/users/{user}/repos'.format(user=user), u'site_admin': False, u'starred_url': u'https://api.github.com/users/{user}/starred{{/owner}}{{/repo}}', u'subscriptions_url': u'https://api.github.com/users/{user}/subscriptions'.format(user=user), u'type': u'User', u'url': u'https://api.github.com/users/{user}'.format(user=user)}, u'private': private, u'pulls_url': u'https://api.github.com/repos/{user}/mock-repo/pulls{{/number}}'.format(user=user), u'pushed_at': u'2013-12-30T16:05:54Z', u'releases_url': u'https://api.github.com/repos/{user}/mock-repo/releases{{/id}}'.format(user=user), u'size': 8717, u'ssh_url': u'[email protected]:{user}/mock-repo.git'.format(user=user), u'stargazers_count': 1469, u'stargazers_url': u'https://api.github.com/repos/{user}/mock-repo/stargazers'.format(user=user), u'statuses_url': u'https://api.github.com/repos/{user}/mock-repo/statuses/{{sha}}'.format(user=user), u'subscribers_count': 86, u'subscribers_url': u'https://api.github.com/repos/{user}/mock-repo/subscribers'.format(user=user), u'subscription_url': u'https://api.github.com/repos/{user}/mock-repo/subscription'.format(user=user), u'svn_url': u'https://github.com/{user}/mock-repo'.format(user=user), u'tags_url': u'https://api.github.com/repos/{user}/mock-repo/tags'.format(user=user), u'teams_url': u'https://api.github.com/repos/{user}/mock-repo/teams'.format(user=user), u'trees_url': u'https://api.github.com/repos/{user}/mock-repo/git/trees{{/sha}}'.format(user=user), u'updated_at': u'2014-01-12T21:23:50Z', u'url': u'https://api.github.com/repos/{user}/mock-repo'.format(user=user), u'watchers': 1469, u'watchers_count': 1469, # NOTE: permissions are only available if authorized on the repo 'permissions': { 'push': True } }) github_mock.branches.return_value = [ Branch.from_json({u'commit': {u'sha': u'e22d92d5d90bb8f9695e9a5e2e2311a5c1997230', u'url': u'https://api.github.com/repos/{user}/mock-repo/commits/e22d92d5d90bb8f9695e9a5e2e2311a5c1997230'.format(user=user)}, u'name': u'dev'}), Branch.from_json({u'commit': {u'sha': u'444a74d0d90a4aea744dacb31a14f87b5c30759c', u'url': u'https://api.github.com/repos/{user}/mock-repo/commits/444a74d0d90a4aea744dacb31a14f87b5c30759c'.format(user=user)}, u'name': u'master'}), Branch.from_json({u'commit': {u'sha': u'c6eaaf6708561c3d4439c0c8dd99c2e33525b1e6', u'url': u'https://api.github.com/repos/{user}/mock-repo/commits/c6eaaf6708561c3d4439c0c8dd99c2e33525b1e6'.format(user=user)}, u'name': u'no-bundle'}) ] # http://developer.github.com/v3/repos/contents/ github_mock.contents.return_value = { 'octokit.rb': github3.repos.contents.Contents.from_json({ "type": "file", "size": 625, "name": u"\xf0octokit.rb", "path": u"\xf0octokit.rb", "sha": "fff6fe3a23bf1c8ea0692b4a883af99bee26fd3b", "url": "https://api.github.com/repos/{user}/octokit/contents/lib/octokit.rb".format(user=user), "git_url": "https://api.github.com/repos/{user}/octokit/git/blobs/fff6fe3a23bf1c8ea0692b4a883af99bee26fd3b".format(user=user), "html_url": "https://github.com/{user}/octokit/blob/master/lib/octokit.rb", "_links": { "self": "https://api.github.com/repos/{user}/octokit/contents/lib/octokit.rb".format(user=user), "git": "https://api.github.com/repos/{user}/octokit/git/blobs/fff6fe3a23bf1c8ea0692b4a883af99bee26fd3b".format(user=user), "html": "https://github.com/{user}/octokit/blob/master/lib/octokit.rb" } }), 'octokit': github3.repos.contents.Contents.from_json({ "type": "dir", "size": 0, "name": u"\xf0octokit", "path": u"\xf0octokit", "sha": "a84d88e7554fc1fa21bcbc4efae3c782a70d2b9d", "url": "https://api.github.com/repos/{user}/octokit/contents/lib/octokit".format(user=user), "git_url": "https://api.github.com/repos/{user}/octokit/git/trees/a84d88e7554fc1fa21bcbc4efae3c782a70d2b9d", "html_url": "https://github.com/{user}/octokit/tree/master/lib/octokit".format(user=user), "_links": { "self": "https://api.github.com/repos/{user}/octokit/contents/lib/octokit".format(user=user), "git": "https://api.github.com/repos/{user}/octokit/git/trees/a84d88e7554fc1fa21bcbc4efae3c782a70d2b9d".format(user=user), "html": "https://github.com/{user}/octokit/tree/master/lib/octokit".format(user=user) } }) } github_mock.tree.return_value = github3.git.Tree.from_json({ 'url': u'https://api.github.com/repos/{user}/mock-repo/git/trees/dev'.format(user=user), 'sha': 'dev', 'tree': [ {u'mode': u'100644', u'path': u'coveragerc', u'sha': u'92029ff5ce192425d346b598d7e7dd25f5f05185', u'size': 245, u'type': u'blob', u'url': u'https://api.github.com/repos/{user}/mock-repo/git/blobs/92029ff5ce192425d346b598d7e7dd25f5f05185'.format(user=user)}, {u'mode': u'100644', u'path': u'.gitignore', u'sha': u'972ac8aeb0e652642b042064c835f27419e197b4', u'size': 520, u'type': u'blob', u'url': u'https://api.github.com/repos/{user}/mock-repo/git/blobs/972ac8aeb0e652642b042064c835f27419e197b4'.format(user=user)}, {u'mode': u'100644', u'path': u'.travis.yml', u'sha': u'86e1fef2834cc2682e753f3ed26ab3c2e100478c', u'size': 501, u'type': u'blob', u'url': u'https://api.github.com/repos/{user}/mock-repo/git/blobs/86e1fef2834cc2682e753f3ed26ab3c2e100478c'.format(user=user)} ] }) github_mock.commits.return_value = [ { 'sha': '12345', 'name': 'authname', 'email': 'authmail', 'date': 'yesterday', } ] return github_mock
5,350,860
def get_args_string(args: argparse.Namespace) -> str: """ Creates a string summarising the argparse arguments. :param args: parser.parse_args() :return: String of the arguments of the argparse namespace. """ string = '' if hasattr(args, 'experiment_name'): string += f'{args.experiment_name} ({datetime.now()})\n' max_length = max([len(k) for k, _ in vars(args).items()]) new_dict = OrderedDict((k, v) for k, v in sorted( vars(args).items(), key=lambda x: x[0] )) for key, value in new_dict.items(): string += ' ' * (max_length - len(key)) + key + ': ' + str(value) + '\n' return string
5,350,861
def top_9(limit=21): """Vrni dano število knjig (privzeto 9). Rezultat je seznam, katerega elementi so oblike [knjiga_id, avtor,naslov,slika] """ cur.execute( """SELECT book_id, authors, title, original_publication_year, average_rating,image_url FROM books ORDER BY average_rating DESC LIMIT %s """, [limit]) najboljsi = cur.fetchall() # Vrnemo nabor, kot je opisano v dokumentaciji funkcije: return(najboljsi)
5,350,862
async def startup(): """ Startup event. Makes a temp folder to store the uploaded files. """ p_information("Starting app...") if not os.path.exists(project_root / "static" / "temp"): os.mkdir(project_root / "static" / "temp")
5,350,863
def spm_dot_torch(X, x, dims_to_omit=None): """ Dot product of a multidimensional array with `x` -- Pytorch version, using Tensor instances @TODO: Instead of a separate function, this should be integrated with spm_dot so that it can either take torch.Tensors or nd.arrays The dimensions in `dims_to_omit` will not be summed across during the dot product Parameters ---------- 'X' [torch.Tensor] `x` [1D torch.Tensor or numpy object array containing 1D torch.Tensors] The array(s) to dot X with `dims_to_omit` [list :: int] (optional) Which dimensions to omit from summing across """ if x.dtype == object: dims = (np.arange(0, len(x)) + X.ndim - len(x)).astype(int) else: if x.shape[0] != X.shape[1]: """ Case when the first dimension of `x` is likely the same as the first dimension of `A` e.g. inverting the generative model using observations. Equivalent to something like self.values[np.where(x),:] when `x` is a discrete 'one-hot' observation vector """ dims = np.array([0], dtype=int) else: """ Case when `x` leading dimension matches the lagging dimension of `values` E.g. a more 'classical' dot product of a likelihood with hidden states """ dims = np.array([1], dtype=int) x_new = np.empty(1, dtype=object) x_new[0] = x.squeeze() x = x_new if dims_to_omit is not None: if not isinstance(dims_to_omit, list): raise ValueError("dims_to_omit must be a `list`") dims = np.delete(dims, dims_to_omit) if len(x) == 1: x = np.empty([0], dtype=object) else: x = np.delete(x, dims_to_omit) Y = X for d in range(len(x)): s = np.ones(Y.ndim, dtype=int) s[dims[d]] = max(x[d].shape) Y = Y * x[d].view(tuple(s)) Y = Y.sum(dim=int(dims[d]), keepdim=True) Y = Y.squeeze() # perform check to see if `y` is a number if Y.numel() <= 1: Y = np.asscalar(Y) Y = torch.Tensor([Y]) return Y
5,350,864
def inverseTranslateTaps(lowerTaps, pos): """Method to translate tap integer in range [-lower_taps, raise_taps] to range [0, lowerTaps + raiseTaps] """ # Hmmm... is it this simle? posOut = pos + lowerTaps return posOut
5,350,865
def GMLstring2points(pointstring): """Convert list of points in string to a list of points. Works for 3D points.""" listPoints = [] #-- List of coordinates coords = pointstring.split() #-- Store the coordinate tuple assert(len(coords) % 3 == 0) for i in range(0, len(coords), 3): listPoints.append([float(coords[i]), float(coords[i+1]), float(coords[i+2])]) return listPoints
5,350,866
def run(command, conf=None, tutorial=False, less_data=False, pdb=False, **args): """Main entry point for a direct call from Python Example usage: >>> from yam import run >>> run(conf='conf.json') :param command: if ``'create'`` the example configuration is created, optionally the tutorial data files are downloaded For all other commands this function loads the configuration and construct the arguments which are passed to `run2()` All args correspond to the respective command line and configuration options. See the example configuration file for help and possible arguments. Options in args can overwrite the configuration from the file. E.g. ``run(conf='conf.json', bla='bla')`` will set bla configuration value to ``'bla'``. """ if pdb: import traceback, pdb def info(type, value, tb): traceback.print_exception(type, value, tb) print() pdb.pm() sys.excepthook = info if conf in ('None', 'none', 'null', ''): conf = None # Copy example files if create_config or tutorial if command == 'create': if conf is None: conf = 'conf.json' create_config(conf, tutorial=tutorial, less_data=less_data) return # Parse config file if conf: try: with open(conf) as f: conf = json.load(f, cls=ConfigJSONDecoder) except ValueError as ex: msg = 'Error while parsing the configuration: %s' % ex raise ConfigError(msg) except IOError as ex: raise ConfigError(ex) # Populate args with conf, but prefer args conf.update(args) args = conf run2(command, **args)
5,350,867
def set_(key, value, service=None, profile=None): # pylint: disable=W0613 """ Set a key/value pair in the etcd service """ client = _get_conn(profile) client.set(key, value) return get(key, service, profile)
5,350,868
def compute_cw_score_normalized(p, q, edgedict, ndict, params = None): """ Computes the common weighted normalized score between p and q @param p -> A node of the graph @param q -> Another node in the graph @param edgedict -> A dictionary with key `(p, q)` and value `w`. @param ndict -> A dictionary with key `p` and the value a set `{p1, p2, ...}` @param params -> Should always be none here @return -> A real value representing the score """ if (len(ndict[p]) > len(ndict[q])): temp = p p = q q = temp score = 0 for elem in ndict[p]: if elem in ndict[q]: p_elem = edgedict[(p, elem)] if (p, elem) in edgedict else edgedict[(elem, p)] q_elem = edgedict[(q, elem)] if (q, elem) in edgedict else edgedict[(elem, q)] score += p_elem + q_elem degrees = params["deg"] return score / np.sqrt(degrees[p] * degrees[q])
5,350,869
def prompt_user_friendly_choice_list(msg, a_list, default=1, help_string=None): """Prompt user to select from a list of possible choices. :param msg:A message displayed to the user before the choice list :type msg: str :param a_list:The list of choices (list of strings or list of dicts with 'name' & 'desc') "type a_list: list :param default:The default option that should be chosen if user doesn't enter a choice :type default: int :returns: The list index of the item chosen. """ verify_is_a_tty() options = '\n'.join([' [{}] {}{}' .format(i + 1, x['name'] if isinstance(x, dict) and 'name' in x else x, ' - ' + x['desc'] if isinstance(x, dict) and 'desc' in x else '') for i, x in enumerate(a_list)]) allowed_vals = list(range(1, len(a_list) + 1)) linesToDelete = len(a_list) + 1 while True: val = _input('{}\n{}\nPlease enter a choice [Default choice({})]: '.format(msg, options, default)) if val == '?' and help_string is not None: for x in range(0, linesToDelete): delete_last_line() print('Please enter a choice [Default choice({})]: {}'.format(default, '?')) print(help_string) continue if not val: val = '{}'.format(default) try: ans = int(val) if ans in allowed_vals: for x in range(0, linesToDelete): delete_last_line() print('Please enter a choice [Default choice({})]: {}'.format(default, a_list[ans - 1])) # array index is 0-based, user input is 1-based return ans - 1 raise ValueError except ValueError: for x in range(0, linesToDelete): delete_last_line() print('Please enter a choice [Default choice({})]: {}'.format(default, val)) logger.warning('Valid values are %s', allowed_vals)
5,350,870
def get_elapsed(df, monitored_field, prefix='elapsed_'): """ Cumulative counting across a sorted dataframe. Given a particular field to monitor, this function will start tracking time since the last occurrence of that field. When the field is seen again, the counter is set to zero. Args: df (pd.DataFrame): A pandas DataFrame monitored_field (str): A string that is the name of the date column you wish to expand. Assumes the column is of type datetime64 prefix (str): The prefix to add to the newly created field. """ day1 = np.timedelta64(1, 'D') last_date = np.datetime64() last_store = 0 res = [] for s, v, d in zip(df["Store"].values, df[monitored_field].values, df["Date"].values): if s != last_store: last_date = np.datetime64() last_store = s if v: last_date = d res.append(((d - last_date).astype('timedelta64[D]') / day1).astype(int)) df[prefix + monitored_field] = res
5,350,871
def refresh_git_config_contexts(repository_record, job_result, delete=False): """Callback function for GitRepository updates - refresh all ConfigContext records managed by this repository.""" if "extras.configcontext" in repository_record.provided_contents and not delete: update_git_config_contexts(repository_record, job_result) else: delete_git_config_contexts(repository_record, job_result)
5,350,872
def fuse(search: typing.Dict, filepath: str): """Build a JSON doc of your pages""" with open(filepath, "w") as jsonfile: return json.dump( [x for x in _build_index(search, id_field="id")], fp=jsonfile, )
5,350,873
def aggregate_gradients_using_copy_with_device_selection( tower_grads, avail_devices, use_mean=True, check_inf_nan=False): """Aggregate gradients, controlling device for the aggregation. Args: tower_grads: List of lists of (gradient, variable) tuples. The outer list is over towers. The inner list is over individual gradients. use_mean: if True, mean is taken, else sum of gradients is taken. check_inf_nan: If true, check grads for nans and infs. Returns: The tuple ([(average_gradient, variable),], has_nan_or_inf) where the gradient has been averaged across all towers. The variable is chosen from the first tower. The has_nan_or_inf indicates the grads has nan or inf. """ agg_grads = [] has_nan_or_inf_list = [] for i, single_grads in enumerate(zip(*tower_grads)): with tf.device(avail_devices[i % len(avail_devices)]): grad_and_var, has_nan_or_inf = aggregate_single_gradient( single_grads, use_mean, check_inf_nan) agg_grads.append(grad_and_var) has_nan_or_inf_list.append(has_nan_or_inf) return agg_grads
5,350,874
def ToOrdinal(value): """ Convert a numerical value into an ordinal number. @param value: the number to be converted """ if value % 100//10 != 1: if value % 10 == 1: ordval = '{}st'.format(value) elif value % 10 == 2: ordval = '{}nd'.format(value) elif value % 10 == 3: ordval = '{}rd'.format(value) else: ordval = '{}th'.format(value) else: ordval = '{}th'.format(value) return ordval
5,350,875
def compute_partition(num_list: List[int]): """Compute paritions that add up.""" solutions = [] for bits in helper.bitprod(len(num_list)): iset = [] oset = [] for idx, val in enumerate(bits): (iset.append(num_list[idx]) if val == 0 else oset.append(num_list[idx])) if sum(iset) == sum(oset): solutions.append(bits) return solutions
5,350,876
def renumber(conllusent): """Fix non-contiguous IDs because of multiword tokens or removed tokens""" mapping = {line[ID]: n for n, line in enumerate(conllusent, 1)} mapping[0] = 0 for line in conllusent: line[ID] = mapping[line[ID]] line[HEAD] = mapping[line[HEAD]] return conllusent
5,350,877
def assign_project_locale_group_permissions(sender, **kwargs): """ Assign permissions group to a given ProjectLocale. """ if kwargs["raw"] or not kwargs["created"]: return instance = kwargs["instance"] try: assign_group_permissions( instance, "translators", ["can_translate_project_locale"] ) except ObjectDoesNotExist as e: errors.send_exception(e)
5,350,878
def test_human_gene_has_rgd_references_cross_reference(): """Test Human Gene has RGD References Cross Reference""" query = """MATCH (g:Gene)--(cr:CrossReference) WHERE g.primaryKey = 'HGNC:11204' AND cr.crossRefType = 'gene/references' AND cr.globalCrossRefId = 'RGD:1322513' AND cr.crossRefCompleteUrl = 'https://rgd.mcw.edu/rgdweb/report/gene/main.html?view=5&id=1322513' RETURN count(cr) AS counter""" result = execute_transaction(query) for record in result: assert record["counter"] == 1
5,350,879
def recall_at(target, scores, k): """Calculation for recall at k.""" if target in scores[:k]: return 1.0 else: return 0.0
5,350,880
def supports_dynamic_state() -> bool: """Checks if the state can be displayed with widgets. :return: True if widgets available. False otherwise. """ return widgets is not None
5,350,881
def create_partial_image_rdd_decoder(key_type): """Creates a partial, tuple decoder function. Args: value_type (str): The type of the value in the tuple. Returns: A partial :meth:`~geopyspark.protobufregistry.ProtoBufRegistry.image_rdd_decoder` function that requires ``proto_bytes`` to execute. """ return partial(image_rdd_decoder, key_decoder=key_type)
5,350,882
def to_json(simple_object): """ Serializes the ``simple_object`` to JSON using the EnhancedJSONEncoder above. """ return json.dumps(simple_object, cls=EnhancedJSONEncoder)
5,350,883
def bootstrap_metadata(): """ Provides cluster metadata which includes security modes """ return _metadata_helper('bootstrap-config.json')
5,350,884
def unique_slug(*, title: str, new_slug: str = None) -> str: """Create unique slug. Args: title: The text where the slug will be generate. new_slug: Custom slug to hard code. Returns: The created slug or hard code slug """ if new_slug is None: slug = slugify(title) new_slug = f"{slug}-{random_string()}" return new_slug
5,350,885
def dcg(r, k=None): """The Burges et al. (2005) version of DCG. This is what everyone uses (except trec_eval) :param r: results :param k: cut-off :return: sum (2^ y_i - 1) / log (i +2) """ result = sum([(pow(2, rel) - 1) / math.log(rank + 2, 2) for rank, rel in enumerate(r[:k])]) return result
5,350,886
async def detect_objects(computervision_client, image_url): """Detect objects from a remote image""" detect_objects_results_local = \ computervision_client.detect_objects(image_url) return detect_objects_results_local.objects
5,350,887
def test_data_analyst_cannot_have_region(data_analyst, region): """Test that an error will be thrown if a region is set on a data analyst user.""" with pytest.raises(ValidationError): data_analyst.region = region data_analyst.save()
5,350,888
def print_mystery (num = 10): """ Generates a set of suspects, their alibis and hair colours, and outputs the result. :``num``: The number of suspects. *Default 10*. """ rooms = get_rooms(num) sl = SuspectList(num, rooms) print "The victim: %s, %s" % (sl.get_victim().get_name(), sl.get_victim().describe_hair()) total_suspects = range(len(sl.suspects)) total_suspects.remove(sl.victim) print_header("All suspects"); for i in total_suspects: p = sl.get_suspect(i) print "%s, %s" % (p.get_name(), p.describe_hair()) print "\nThe clue: a %s hair!" % sl.get_murderer().hair confirmed = sl.get_cleared_suspects() print_header("Confirmed alibis"); sl.print_alibis(confirmed) unconfirmed = list(set(total_suspects) - set(confirmed)) print_header("Unconfirmed alibis"); sl.print_alibis(unconfirmed) print "\nThe murderer:", sl.get_murderer().get_name()
5,350,889
def batch_metrics_logger(run_id): """ Context manager that yields a BatchMetricsLogger object, which metrics can be logged against. The BatchMetricsLogger keeps metrics in a list until it decides they should be logged, at which point the accumulated metrics will be batch logged. The BatchMetricsLogger ensures that logging imposes no more than a 10% overhead on the training, where the training is measured by adding up the time elapsed between consecutive calls to record_metrics. If logging a batch fails, a warning will be emitted and subsequent metrics will continue to be collected. Once the context is closed, any metrics that have yet to be logged will be logged. :param run_id: ID of the run that the metrics will be logged to. """ batch_metrics_logger = BatchMetricsLogger(run_id) yield batch_metrics_logger batch_metrics_logger.flush()
5,350,890
def check_for_command(command): """ Ensure that the specified command is available on the PATH. """ try: subprocess.check_call(['which', command]) except subprocess.CalledProcessError as err: logging.error("Unable to find %s command", command) raise err
5,350,891
def split(endpoint_name, traffic_policy_dictionary): """Associate a service endpoint with traffic policy. Example: >>> serve.split("service-name", { "backend:v1": 0.5, "backend:v2": 0.5 }) Args: endpoint_name (str): A registered service endpoint. traffic_policy_dictionary (dict): a dictionary maps backend names to their traffic weights. The weights must sum to 1. """ assert endpoint_name in expand( global_state.route_table.list_service(include_headless=True).values()) assert isinstance(traffic_policy_dictionary, dict), "Traffic policy must be dictionary" prob = 0 for backend, weight in traffic_policy_dictionary.items(): prob += weight assert (backend in global_state.backend_table.list_backends() ), "backend {} is not registered".format(backend) assert np.isclose( prob, 1, atol=0.02), "weights must sum to 1, currently it sums to {}".format( prob) global_state.policy_table.register_traffic_policy( endpoint_name, traffic_policy_dictionary) ray.get(global_state.init_or_get_router().set_traffic.remote( endpoint_name, traffic_policy_dictionary))
5,350,892
def rgb(r=None, g=None, b=None, smooth=True, force=True): """ Set RGB values with PWM signal :param r: red value 0-1000 :param g: green value 0-1000 :param b: blue value 0-1000 :param smooth: runs colors change with smooth effect :param force: clean fade generators and set color :return: verdict string """ def __buttery(r_from, g_from, b_from, r_to, g_to, b_to): step_ms = 2 interval_sec = 0.3 if Data.RGB_CACHE[3] == 0: # Turn from OFF to on (to colors) r_from, g_from, b_from = 0, 0, 0 Data.RGB_CACHE[3] = 1 r_gen = transition(from_val=r_from, to_val=r_to, step_ms=step_ms, interval_sec=interval_sec) g_gen = transition(from_val=g_from, to_val=g_to, step_ms=step_ms, interval_sec=interval_sec) b_gen = transition(from_val=b_from, to_val=b_to, step_ms=step_ms, interval_sec=interval_sec) for _r in r_gen: Data.RGB_OBJS[0].duty(_r) Data.RGB_OBJS[1].duty(g_gen.__next__()) Data.RGB_OBJS[2].duty(b_gen.__next__()) sleep_ms(step_ms) __RGB_init() if force and Data.FADE_OBJS[0]: Data.FADE_OBJS = (None, None, None) # Dynamic input handling: user/cache r = Data.RGB_CACHE[0] if r is None else r g = Data.RGB_CACHE[1] if g is None else g b = Data.RGB_CACHE[2] if b is None else b # Set RGB channels if smooth: __buttery(r_from=Data.RGB_CACHE[0], g_from=Data.RGB_CACHE[1], b_from=Data.RGB_CACHE[2], r_to=r, g_to=g, b_to=b) else: Data.RGB_OBJS[0].duty(int(r)) Data.RGB_OBJS[1].duty(int(g)) Data.RGB_OBJS[2].duty(int(b)) # Save channel duties if LED on if r > 0 or g > 0 or b > 0: Data.RGB_CACHE = [r, g, b, 1] else: Data.RGB_CACHE[3] = 0 # Save state machine (cache) __persistent_cache_manager('s') return status()
5,350,893
def visualize_relative_weight_ranges_model(): """ Code example for model visualization """ visualization_url, process = start_bokeh_server_session(8002) model = models.resnet18(pretrained=True).to(torch.device('cpu')) model = model.eval() batch_norm_fold.fold_all_batch_norms(model, (1, 3, 224, 224)) # Usually it is observed that if we do BatchNorm fold the layer's weight range increases. # This helps in finding layers which can be equalized to get better performance on hardware visualize_model.visualize_relative_weight_ranges_to_identify_problematic_layers(model, visualization_url)
5,350,894
def handle_config(args, configs): """Handle `view` subcommand :param args: parsed arguments :type args: `argparse.Namespace` :param configs: configurations object :type configs: ``sfftk.core.configs.Configs`` :return int status: status """ if args.config_subcommand == "get": from .core.configs import get_configs return get_configs(args, configs) elif args.config_subcommand == "set": from .core.configs import set_configs return set_configs(args, configs) elif args.config_subcommand == "del": from .core.configs import del_configs return del_configs(args, configs)
5,350,895
def bottom_up_amons_of(mol, max_size, already_generated=set()): """ Generates all unique amons of mol by growing graphs up to a max_size around each atom already_generated: A set of smiles strings of amons that have already been generated """ if mol.NumHvyAtoms() < 1: return obConversion.SetInAndOutFormats("smi", "can") #step 1: generate the data for the graph graph = {} for atom in ob.OBMolAtomIter(mol): a = atom.GetId() bonds = [] for bond in ob.OBAtomBondIter(atom): b1 = bond.GetBeginAtom().GetId() b2 = bond.GetEndAtom().GetId() if b1 != a: bonds.append(b1) else: bonds.append(b2) graph[a] = bonds #step 2: generate all subgraphs of max_size subgraphs = [] for atom in ob.OBMolAtomIter(mol): for subgraph in traverse(atom.GetId(),graph,max_size): if subgraph not in subgraphs: subgraphs.append(subgraph) #step 3: generate smiles for all subgraphs by deleting all other atoms for subgraph in subgraphs: #copy molecule copy = ob.OBMol(mol) #delete all atoms not in the subgraph for atom in ob.OBMolAtomIter(mol): #iterate over atoms of mol, NOT copy, because atoms are deleted in place idx = atom.GetId() if idx not in subgraph: #all atoms in subgraph are kept #increment implicit H counts of bonding partners for bond in ob.OBAtomBondIter(atom): a1 = bond.GetBeginAtom().GetId() a2 = bond.GetEndAtom().GetId() bo = bond.GetBondOrder() if a1 != idx: if a1 in subgraph: #only kept atoms need to be handled copy.GetAtomById(a1).SetImplicitHCount(copy.GetAtomById(a1).GetImplicitHCount()+bo) else: if a2 in subgraph: #only kept atoms need to be handled copy.GetAtomById(a2).SetImplicitHCount(copy.GetAtomById(a2).GetImplicitHCount()+bo) copy.DeleteAtom(copy.GetAtomById(idx)) #convert to smiles smiles = obConversion.WriteString(copy).strip() obConversion.ReadString(copy, smiles) #this is done to really get canonical smiles smiles = obConversion.WriteString(copy).strip() if smiles not in already_generated: already_generated.add(smiles) yield copy
5,350,896
def dump_config(output=None, **kwargs): """ Dump current configuration to screen, useful for creating a new ``settings.cfg`` file Arguments: output (Optional[str]): output filename, stdout if None """ conf = arguments.read_config(**kwargs) conf.write(output or sys.stdout)
5,350,897
def filter_safe_actions( action_shield: Dict[int, Dict[ActionData, int]], energy: int, bel_supp_state: int ) -> List[ActionData]: """Utility function to filter actions according to required energy for them with given action shield. Parameters ---------- action_shield : List[Tuple[int, ActionData]] List of pairs of minimum energy and action for which it is required. energy : int Available energy. bel_supp_state : int State in belief support cmdp to filter actions by. Returns ------- List[ActionData] List of available actions for given energy and given belief support cmdp state. """ return [ action for action, min_energy in action_shield[bel_supp_state].items() if min_energy <= energy ]
5,350,898
def handle_embedded_annot_2(data): """ socketio Handler to aggregate original page metadata with sparql endpoints. emit the result of sparql requests @param data dict Contains the data needed to aggregate (url, etc). """ # step = 0 print("handle annot_2") sid = request.sid print(sid) RDF_TYPE[sid] = "turtle" uri = str(data["url"]) print("retrieving embedded annotations for " + uri) print("Retrieve KG for uri: " + uri) # page = requests.get(uri) # html = page.content # use selenium to retrieve Javascript genereted content html = util.get_html_selenium(uri) d = extruct.extract( html, syntaxes=["microdata", "rdfa", "json-ld"], errors="ignore" ) # remove whitespaces from @id values after axtruct for key, val in d.items(): for dict in d[key]: list(util.replace_value_char_for_key("@id", dict, " ", "_")) print(d) print("là") kg = ConjunctiveGraph() base_path = Path(__file__).parent # current directory static_file_path = str((base_path / "static/data/jsonldcontext.json").resolve()) for md in d["json-ld"]: if "@context" in md.keys(): print(md["@context"]) if ("https://schema.org" in md["@context"]) or ( "http://schema.org" in md["@context"] ): md["@context"] = static_file_path kg.parse(data=json.dumps(md, ensure_ascii=False), format="json-ld") for md in d["rdfa"]: if "@context" in md.keys(): if ("https://schema.org" in md["@context"]) or ( "http://schema.org" in md["@context"] ): md["@context"] = static_file_path kg.parse(data=json.dumps(md, ensure_ascii=False), format="json-ld") for md in d["microdata"]: if "@context" in md.keys(): if ("https://schema.org" in md["@context"]) or ( "http://schema.org" in md["@context"] ): md["@context"] = static_file_path kg.parse(data=json.dumps(md, ensure_ascii=False), format="json-ld") KGS[sid] = kg # step += 1 print(len(kg)) # emit('update_annot_2', step) emit("send_annot_2", str(kg.serialize(format=RDF_TYPE[sid]).decode()))
5,350,899