content
stringlengths
22
815k
id
int64
0
4.91M
def pre_search(cs, term, planlove): """ This hook is called before a (quicklove or regular) search, and is passed the same arguments as is the search function: - the search ``term`` - ``planlove``, a boolean of whether to restrict search to planlove. """ pass
5,350,600
def dequote(str): """Will remove single or double quotes from the start and end of a string and return the result.""" quotechars = "'\"" while len(str) and str[0] in quotechars: str = str[1:] while len(str) and str[-1] in quotechars: str = str[0:-1] return str
5,350,601
def uccsd_singlet_paramsize(n_qubits, n_electrons): """Determine number of independent amplitudes for singlet UCCSD Args: n_qubits(int): Number of qubits/spin-orbitals in the system n_electrons(int): Number of electrons in the reference state Returns: Number of independent parameters for singlet UCCSD with a single reference. """ n_occupied = int(numpy.ceil(n_electrons / 2.)) n_virtual = n_qubits / 2 - n_occupied n_single_amplitudes = n_occupied * n_virtual n_double_amplitudes = n_single_amplitudes ** 2 return (n_single_amplitudes + n_double_amplitudes)
5,350,602
def test_bq27546_exists(): """ Command: ls -l /sys/class/power_supply/bq27546-0 Results: Exit code 0 if valid Exit code 127 if no such file or directory """ resp = run_command( ["ls", "-l", "/sys/class/power_supply/bq27546-0"], return_exit_values=True ) assert resp == 0
5,350,603
def create_topic_rule_destination(destinationConfiguration=None): """ Creates a topic rule destination. The destination must be confirmed prior to use. See also: AWS API Documentation Exceptions :example: response = client.create_topic_rule_destination( destinationConfiguration={ 'httpUrlConfiguration': { 'confirmationUrl': 'string' } } ) :type destinationConfiguration: dict :param destinationConfiguration: [REQUIRED]\nThe topic rule destination configuration.\n\nhttpUrlConfiguration (dict) --Configuration of the HTTP URL.\n\nconfirmationUrl (string) -- [REQUIRED]The URL AWS IoT uses to confirm ownership of or access to the topic rule destination URL.\n\n\n\n\n :rtype: dict ReturnsResponse Syntax{ 'topicRuleDestination': { 'arn': 'string', 'status': 'ENABLED'|'IN_PROGRESS'|'DISABLED'|'ERROR', 'statusReason': 'string', 'httpUrlProperties': { 'confirmationUrl': 'string' } } } Response Structure (dict) -- topicRuleDestination (dict) --The topic rule destination. arn (string) --The topic rule destination URL. status (string) --The status of the topic rule destination. Valid values are: IN_PROGRESS A topic rule destination was created but has not been confirmed. You can set status to IN_PROGRESS by calling UpdateTopicRuleDestination . Calling UpdateTopicRuleDestination causes a new confirmation challenge to be sent to your confirmation endpoint. ENABLED Confirmation was completed, and traffic to this destination is allowed. You can set status to DISABLED by calling UpdateTopicRuleDestination . DISABLED Confirmation was completed, and traffic to this destination is not allowed. You can set status to ENABLED by calling UpdateTopicRuleDestination . ERROR Confirmation could not be completed, for example if the confirmation timed out. You can call GetTopicRuleDestination for details about the error. You can set status to IN_PROGRESS by calling UpdateTopicRuleDestination . Calling UpdateTopicRuleDestination causes a new confirmation challenge to be sent to your confirmation endpoint. statusReason (string) --Additional details or reason why the topic rule destination is in the current status. httpUrlProperties (dict) --Properties of the HTTP URL. confirmationUrl (string) --The URL used to confirm the HTTP topic rule destination URL. Exceptions IoT.Client.exceptions.InternalException IoT.Client.exceptions.InvalidRequestException IoT.Client.exceptions.ResourceAlreadyExistsException IoT.Client.exceptions.ServiceUnavailableException IoT.Client.exceptions.ConflictingResourceUpdateException :return: { 'topicRuleDestination': { 'arn': 'string', 'status': 'ENABLED'|'IN_PROGRESS'|'DISABLED'|'ERROR', 'statusReason': 'string', 'httpUrlProperties': { 'confirmationUrl': 'string' } } } """ pass
5,350,604
def variance(timeseries: SummarizerAxisTimeseries, param: dict): """ Calculate the variance of the timeseries """ v_mean = mean(timeseries) # Calculate variance v_variance = 0 for ts, value in timeseries.values(): v_variance = (value - v_mean)**2 # Average v_variance = len(timeseries.values) if v_variance == 0: return 0 return mean / v_variance
5,350,605
def mutate_bone_length(population, opt, gen_idx, method='simple'): """ Randomly mutate bone length in a population to increase variation in subject size. For example, H36M only contains adults yet you can modify bone length to represent children. Since the posture and subject size are independent, you can synthetize data for dancing kids for free if you already have data for dancing adults. You only need little prior knowledge on human bone length. """ # the camera parameters in H36M correspond to the five subjects # Rename the synthetic population as these subjects so that the camera # parameters can be used psuedo_subject_names = [1, 5, 6, 7, 8] dict_3d = {} for i in range(len(population)): if np.random.rand() > opt.MBLR: angles = position_to_angle(population[i].reshape(1, -1)) if method == 'simple': # The simplest way is to change to bone length to some value # according to prior knowledge about human bone size. # In our experiment, we collect these values manually from our # interactive visualization tool as well as cross validation. idx = np.random.randint(0, len(bl_templates)) angles[0, :, 0] = bl_templates[idx] population[i] = (angle_to_position(angles, population[i].reshape(1,-1))).reshape(-1) elif method == 'addnoise': # add Gaussian noise to current bone length to obtain new bone length raise ValueError('Deprecated') else: raise NotImplementedError poses_list = np.array_split(population, len(psuedo_subject_names)) for subject_idx in range(len(psuedo_subject_names)): dict_3d[(psuedo_subject_names[subject_idx], 'n/a', 'n/a')] =\ poses_list[subject_idx] save_path = get_save_path(opt, gen_idx) np.save(save_path, cast_to_float(dict_3d)) logging.info('file saved at ' + save_path) return
5,350,606
def test_radial_velocity1(): """ Put a central at (5, 5, 5) in a box WITHOUT periodic boundary conditions, with the central moving in the direction (3, 0, 0). Place all satellites at the point (6, 5, 5), moving in the direction (2, 0, 0), i.e., in the negative radial direction that is aligned with the x-dimension. Verify that we recover the correct radial velocity of -1 when ignoring PBCs """ Lbox = np.inf xc, yc, zc = 5., 5., 5. vxc, vyc, vzc = 3., 0., 0. input_drad = 1. xs, ys, zs = xc + input_drad, yc, zc vxs, vys, vzs = 2., vyc, vzc input_vrad = vxs - vxc inferred_drad, inferred_vrad = radial_distance_and_velocity(xs, ys, zs, vxs, vys, vzs, xc, yc, zc, vxc, vyc, vzc, Lbox) assert np.allclose(inferred_drad, input_drad) assert np.allclose(inferred_vrad, input_vrad) assert input_vrad == -1
5,350,607
def setup_i2c_sensor(sensor_class, sensor_name, i2c_bus, errors): """ Initialise one of the I2C connected sensors, returning None on error.""" if i2c_bus is None: # This sensor uses the multipler and there was an error initialising that. return None try: sensor = sensor_class(i2c_bus) except Exception as err: # Error initialising this sensor, try to continue without it. msg = "Error initialising {}:\n{}".format(sensor_name, err) print(msg) errors += (msg + "\n") return None else: print("{} initialised".format(sensor_name)) return sensor
5,350,608
def argparse_textwrap_unwrap_first_paragraph(doc): """Join by single spaces all the leading lines up to the first empty line""" index = (doc + "\n\n").index("\n\n") lines = doc[:index].splitlines() chars = " ".join(_.strip() for _ in lines) alt_doc = chars + doc[index:] return alt_doc
5,350,609
def iterator(x, y, z, coeff, repeat, radius=0): """ compute an array of positions visited by recurrence relation """ c_iterator.restype = ctypes.POINTER(ctypes.c_double * (3 * repeat)) start = to_double_ctype(np.array([x, y, z])) coeff = to_double_ctype(coeff) out = to_double_ctype(np.zeros(3 * repeat)) res = c_iterator(start, coeff, repeat, ctypes.c_double(radius), out).contents return np.array(res).reshape((repeat, 3)).T
5,350,610
def test_baked_django_docs_with_how_to_index(cookies): """Test Django docs how-to index template file has been generated correctly.""" default_django = cookies.bake() index_path = default_django.project_path / "docs/source/how-tos/index-how-to.rst" index_file = index_path.read_text().splitlines() assert "See below for a list of How-To for Django Boilerplate." in index_file
5,350,611
def open_pep( search: str, base_url: str = BASE_URL, pr: int | None = None, dry_run: bool = False ) -> str: """Open this PEP in the browser""" url = pep_url(search, base_url, pr) if not dry_run: import webbrowser webbrowser.open_new_tab(url) print(url) return url
5,350,612
def default_build_component(): """ Builds the component artifacts and recipes based on the build system specfied in the project configuration. Based on the artifacts specified in the recipe, the built component artifacts are copied over to greengrass component artifacts build folder and the artifact uris in the recipe are updated to reflect the same. Based on the project config file, component recipe is updated and a new recipe file is created in greengrass component recipes build folder. Parameters ---------- None Returns ------- None """ try: # Build the project with specified build system run_build_command() # From the recipe, copy necessary artifacts (depends on build system) to the build folder . copy_artifacts_and_update_uris() # Update recipe file with component configuration from project config file. create_build_recipe_file() except Exception as e: raise Exception("""{}\n{}""".format(error_messages.BUILD_FAILED, e))
5,350,613
def random_sparse_matrix(n, n_add_elements_frac=None, n_add_elements=None, elements=(-1, 1, -2, 2, 10), add_elements=(-1, 1)): """Get a random matrix where there are n_elements.""" n_total_elements = n * n n_diag_elements = n frac_diag = 1. * n_diag_elements / n_total_elements if n_add_elements is not None and n_add_elements_frac is not None: raise ValueError("Should only set either n_add_elements or n_add_elements_frac") if n_add_elements_frac is not None: n_add_elements = int(round(n_add_elements_frac * n_total_elements)) assert n_add_elements_frac >= 0, n_add_elements_frac assert n_add_elements_frac <= 1 - frac_diag, n_add_elements_frac assert n_add_elements >= 0 assert n_add_elements <= n_total_elements - n_diag_elements A = np.zeros((n, n)) remaining = set(range(n)) # main elements for i in range(n): j = np.random.choice(list(remaining)) remaining.remove(j) A[i, j] = np.random.choice(list(elements)) # additional elements left_indices = np.array(list(zip(*np.where(A == 0.0)))) # print(left_indices) # print(A) np.random.shuffle(left_indices) assert len(left_indices) >= n_add_elements for i_add in range(n_add_elements): i, j = left_indices[i_add] assert A[i, j] == 0.0 A[i, j] = np.random.choice(list(add_elements)) return A
5,350,614
def is_uuid_like(val): """Returns validation of a value as a UUID. :param val: Value to verify :type val: string :returns: bool .. versionchanged:: 1.1.1 Support non-lowercase UUIDs. """ try: return str(uuid.UUID(val)).replace("-", "") == _format_uuid_string(val) except (TypeError, ValueError, AttributeError): return False
5,350,615
def is_positive_integer(value: str) -> int: """ Helper function for argparse. Raise an exception if value is not a positive integer. """ int_value = int(value) if int_value <= 0: raise argparse.ArgumentTypeError("{} is not a positive integer".format(value)) return int_value
5,350,616
def merge_overlapped_spans(spans: List[Tuple[int, int]]) -> List[Tuple[int, int]]: """ Merge overlapped spans Parameters ---------- spans: input list of spans Returns ------- merged spans """ span_sets = list() for span in spans: span_set = set(range(span[0], span[1])) if not span_sets: span_sets.append(span_set) elif span_sets[-1] & span_set: if span_set - span_sets[-1]: span_sets[-1] = span_sets[-1] | span_set else: span_sets.append(span_set) merged_spans = list() for span_set in span_sets: merged_spans.append((min(span_set), max(span_set) + 1)) return merged_spans
5,350,617
def network(dataframe, author_col_name, target_col_name, source_col_name=None): """ This function runs a Network analysis on the dataset provided. :param dataframe: DataFrame containing the data on which to conduct the activity analysis. It must contain at least an *author*, a *target* and a *source* column. :type dataframe: pandas.DataFrame :param author_col_name: Name of the column containing the authors of the entries. :type author_col_name: str :param target_col_name: Name of the column containing the targets of the relationship that the network analysis is supposed to exploring. :type target_col_name: str :param source_col_name: Name of the column containing the sources of the relationships that the network analysis is supposed to be exploring. :type source_col_name: str :return: Object of type network containing a *dataframe* field and a *graph* one. """ graph = _network_from_dataframe(dataframe, author_col_name, target_col_name, source_col_name) no_edges = [] for u, v, weight in graph.edges.data("weight"): if weight == 0: no_edges.append((u, v)) graph.remove_edges_from(no_edges) degrees = nx.degree_centrality(graph) nodes = pd.DataFrame.from_records([degrees]).transpose() nodes.columns = ["centrality"] return Network(nodes, graph)
5,350,618
def get_cert_and_update_domain( zappa_instance, lambda_name, api_stage, domain=None, manual=False, ): """ Main cert installer path. """ try: create_domain_key() create_domain_csr(domain) get_cert(zappa_instance) create_chained_certificate() with open("{}/signed.crt".format(gettempdir())) as f: certificate_body = f.read() with open("{}/domain.key".format(gettempdir())) as f: certificate_private_key = f.read() with open("{}/intermediate.pem".format(gettempdir())) as f: certificate_chain = f.read() if not manual: if domain: if not zappa_instance.get_domain_name(domain): zappa_instance.create_domain_name( domain_name=domain, certificate_name=domain + "-Zappa-LE-Cert", certificate_body=certificate_body, certificate_private_key=certificate_private_key, certificate_chain=certificate_chain, certificate_arn=None, lambda_name=lambda_name, stage=api_stage, ) print( "Created a new domain name. Please note that it can take up to 40 minutes for this domain to be created and propagated through AWS, but it requires no further work on your part." ) else: zappa_instance.update_domain_name( domain_name=domain, certificate_name=domain + "-Zappa-LE-Cert", certificate_body=certificate_body, certificate_private_key=certificate_private_key, certificate_chain=certificate_chain, certificate_arn=None, lambda_name=lambda_name, stage=api_stage, ) else: print("Cerificate body:\n") print(certificate_body) print("\nCerificate private key:\n") print(certificate_private_key) print("\nCerificate chain:\n") print(certificate_chain) except Exception as e: print(e) return False return True
5,350,619
def _ww3_ounp_contents(run_date, run_type): """ :param str run_type: :param run_date: :py:class:`arrow.Arrow` :return: ww3_ounp.inp file contents :rtype: str """ start_date = ( run_date.format("YYYYMMDD") if run_type == "nowcast" else run_date.shift(days=+1).format("YYYYMMDD") ) run_hours = {"nowcast": 24, "forecast": 36, "forecast2": 30} output_interval = 600 # seconds output_count = int(run_hours[run_type] * 60 * 60 / output_interval) contents = f"""$ WAVEWATCH III NETCDF Point output post-processing $ $ First output time (YYYYMMDD HHmmss), output increment (s), number of output times {start_date} 000000 {output_interval} {output_count} $ $ All points defined in ww3_shel.inp -1 $ File prefix $ number of characters in date $ netCDF4 output $ one file, max number of points to process $ tables of mean parameters $ WW3 global attributes $ time,station dimension order $ WMO standard output SoG_ww3_points_ 8 4 T 100 2 0 T 6 """ return contents
5,350,620
def combine_votes(votes_files, to_prediction, to_file, method=0, prediction_info=NORMAL_FORMAT, input_data_list=None, exclude=None): """Combines the votes found in the votes' files and stores predictions. votes_files: should contain the list of file names to_prediction: is the Model method that casts prediction to numeric type if needed to_file: is the name of the final output file. """ votes = read_votes(votes_files, to_prediction) u.check_dir(to_file) with UnicodeWriter(to_file) as output: number_of_tests = len(votes) if input_data_list is None or len(input_data_list) != number_of_tests: input_data_list = None for index in range(0, number_of_tests): multivote = votes[index] input_data = (None if input_data_list is None else input_data_list[index]) write_prediction(multivote.combine(method, full=True), output, prediction_info, input_data, exclude)
5,350,621
def EMLP(rep_in,rep_out,group,ch=384,num_layers=3): """ Equivariant MultiLayer Perceptron. If the input ch argument is an int, uses the hands off uniform_rep heuristic. If the ch argument is a representation, uses this representation for the hidden layers. Individual layer representations can be set explicitly by using a list of ints or a list of representations, rather than use the same for each hidden layer. Args: rep_in (Rep): input representation rep_out (Rep): output representation group (Group): symmetry group ch (int or list[int] or Rep or list[Rep]): number of channels in the hidden layers num_layers (int): number of hidden layers Returns: Module: the EMLP objax module.""" logging.info("Initing EMLP (Haiku)") rep_in =rep_in(group) rep_out = rep_out(group) # Parse ch as a single int, a sequence of ints, a single Rep, a sequence of Reps if isinstance(ch,int): middle_layers = num_layers*[uniform_rep(ch,group)] elif isinstance(ch,Rep): middle_layers = num_layers*[ch(group)] else: middle_layers = [(c(group) if isinstance(c,Rep) else uniform_rep(c,group)) for c in ch] # assert all((not rep.G is None) for rep in middle_layers[0].reps) reps = [rep_in]+middle_layers # logging.info(f"Reps: {reps}") network = Sequential( *[EMLPBlock(rin,rout) for rin,rout in zip(reps,reps[1:])], Linear(reps[-1],rep_out) ) return network
5,350,622
def decompress(data): """ Decompress data in one shot. """ return GzipFile(fileobj=BytesIO(data), mode='rb').read()
5,350,623
def str_to_rgb(arg): """Convert an rgb string 'rgb(x,y,z)' to a list of ints [x,y,z].""" return list( map(int, re.match(r'rgb\((\d+),\s*(\d+),\s*(\d+)\)', arg).groups()) )
5,350,624
def divide_tarball(tarball: tarData, num: int) -> None: """Subdivide 'tarball' into 'num' tarballs. Will create 'num' new tarballs in current directory. Each tarball will have this format: "1.tar", "2.tar", etc. """ #TODO, direct output tarballs to tars/ dir # refactor to return something that can be saved outside of this function? # not easily testable. print("Gathering archives...") #Member list main_tar = tarfile.open(tarball, "r") names = main_tar.getmembers() file_count = len(names) #Determine fair amount new_amount = file_count // num remainder = file_count % num #Divide the bulk of the archives name_counter = 0 for file_ in range(1, num+1): name = f"tars/{str(file_)}.tar" new_tar = tarfile.open(name, mode="a:") #Put the new_amount of archives into the new tarball for new_file in range(new_amount): print(f"Subdividing the tarball: {name_counter}/{file_count}", end="\r", flush=True) archive_name = names[name_counter] name_counter += 1 data = main_tar.extractfile(archive_name) extracted = data.read() #add the extracted archive into the new tarball new_tar.addfile(archive_name, io.BytesIO(extracted)) new_tar.close() #Divide the remainders for file_ in range(1, remainder+1): name = str(file_)+".tar" new_tar = tarfile.open(name, mode="a:") archive_name = names[name_counter] name_counter += 1 data = main_tar.extractfile(archive_name) extracted = data.read() #add the extracted archive into the new tarball new_tar.addfile(archive_name, io.BytesIO(extracted)) new_tar.close() main_tar.close() print("Dividing archives finished.")
5,350,625
def vshift(x, shifts=0): """shift batch of images vertically""" return paddle.roll(x, int(shifts*x.shape[2]), axis=2)
5,350,626
def index(): """Shows book titles and descriptions""" tagid = request.query.tagid books = [] if tagid: try: tag = Tag.get(tagid) books = tag.books.all() except Tag.DoesNotExist: pass if not books: books = Book.all().order_by("title") return dict(books=books)
5,350,627
def delete_file(sender, instance, **kwargs): """ This function deletes the associated file from the file storage when it's instance is deleted from the database """ file_path = instance._file.path if os.path.exists(file_path): os.remove(file_path)
5,350,628
def _GetExternalDataConfig(file_path_or_simple_spec, use_avro_logical_types=False, parquet_enum_as_string=False, parquet_enable_list_inference=False): """Returns a ExternalDataConfiguration from the file or specification string. Determines if the input string is a file path or a string, then returns either the parsed file contents, or the parsed configuration from string. The file content is expected to be JSON representation of ExternalDataConfiguration. The specification is expected to be of the form schema@format=uri i.e. schema is separated from format and uri by '@'. If the uri itself contains '@' or '=' then the JSON file option should be used. "format=" can be omitted for CSV files. Raises: UsageError: when incorrect usage or invalid args are used. """ maybe_filepath = os.path.expanduser(file_path_or_simple_spec) if os.path.isfile(maybe_filepath): try: with open(maybe_filepath) as external_config_file: return yaml.safe_load(external_config_file) except yaml.error.YAMLError as e: raise app.UsageError( ('Error decoding YAML external table definition from ' 'file %s: %s') % (maybe_filepath, e)) else: source_format = 'CSV' schema = None connection_id = None error_msg = ('Error decoding external_table_definition. ' 'external_table_definition should either be the name of a ' 'JSON file or the text representation of an external table ' 'definition. Given:%s') % ( file_path_or_simple_spec) parts = file_path_or_simple_spec.split('@') if len(parts) == 1: # Schema and connection are not specified. format_and_uri = parts[0] elif len(parts) == 2: # when there are 2 components, it can be: # 1. format=uri@connection_id.e.g csv=gs://bucket/[email protected] # 2. schema@format=uri e.g.col1::INTEGER@csv=gs://bucket/file # if the first element is format=uri, then second element is connnection. # Else, the first is schema, second is format=uri. if parts[0].find('://') >= 0: # format=uri and connection specified. format_and_uri = parts[0] connection_id = parts[1] else: # Schema and format=uri are specified. schema = parts[0] format_and_uri = parts[1] elif len(parts) == 3: # Schema and connection both are specified schema = parts[0] format_and_uri = parts[1] connection_id = parts[2] else: raise app.UsageError(error_msg) separator_pos = format_and_uri.find('=') if separator_pos < 0: # Format is not specified uri = format_and_uri else: source_format = format_and_uri[0:separator_pos] uri = format_and_uri[separator_pos + 1:] if not uri: raise app.UsageError(error_msg) # When using short notation for external table definition # autodetect is always performed. return _CreateExternalTableDefinition( source_format, uri, schema, True, connection_id, use_avro_logical_types=use_avro_logical_types, parquet_enum_as_string=parquet_enum_as_string, parquet_enable_list_inference=parquet_enable_list_inference)
5,350,629
def _linux_iqn(): """ Return iSCSI IQN from a Linux host. """ ret = [] initiator = "/etc/iscsi/initiatorname.iscsi" try: with salt.utils.files.fopen(initiator, "r") as _iscsi: for line in _iscsi: line = line.strip() if line.startswith("InitiatorName="): ret.append(line.split("=", 1)[1]) except OSError as ex: if ex.errno != errno.ENOENT: log.debug("Error while accessing '%s': %s", initiator, ex) return ret
5,350,630
def getAndSaveDocuments(base_url, delay=None): """Get and save meta and object XML from node :param delay: Delay, in seconds, between getting documents. :output Subdirectories of the folder `result`, in the form of `result/{NODE_IDENTIFIER}/{INDEX}-{meta-object}.xml` """ sampled_documents_filepath = getScriptDirectory() + "/result/sampled_documents.csv" # Check if sample exists if not os.path.isfile(sampled_documents_filepath): print "getAndSaveDocuments() was called but sampled_documents.csv doesn't exist." return # Get and save each document in the sample documents = pandas.read_csv(sampled_documents_filepath) nodes = getNodeList(base_url) formats = getFormatList(base_url) print("Total sampled documents to save: %d" % documents.shape[0]) for i in range(0, documents.shape[0]): print "[%d of %d]" % (i + 1, documents.shape[0]) node_identifier = documents['authoritativeMN'][i] # Get the meta and object XML document_identifier = documents['identifier'][i] meta_xml = getIdentifierMetaXML(base_url, document_identifier) # Determine if the node identifier is in the Node list. # If not, it is an invalid node id, and should be replaced with # the authoritativeMN from the system metadata valid_node = True if (node_identifier not in nodes): valid_node = False if meta_xml is not None: node_id_element = meta_xml.find("./authoritativeMN") if node_id_element is not None: node_identifier = node_id_element.text # Remove "urn:node:" from node_identifier # # This remove redundant text from the folder names # but also deals with how Mac OS handles colons in file paths. # Mac OS considers colons (:) to separate folders in a file # hierarchy so ./result/urn:node:foo will be shown in Cocoa apps as # ./result/urn/node/foo where urn/node/foo is the folder name. # This is confusing because the folder appears with colons when viewed # from the terminal. This fixes removes the ambiguity between the terminal # and Cocoa applications. node_short_identifier = node_identifier.split(":") node_short_identifier = node_short_identifier[len(node_short_identifier) - 1] # Make the subdirectories to store files subdirectory_path = getScriptDirectory() + "/result/" + node_short_identifier # Don't get metadata again if directory exists for identifier if not os.path.exists(subdirectory_path): os.makedirs(subdirectory_path) if delay is not None: time.sleep(delay) # Extract the formatId from the sysmeta format_path = None if meta_xml is not None: format_id_element = meta_xml.find("./formatId") if format_id_element is not None: format_path = formats[format_id_element.text]['formatPath'] if format_path is None: print "\t\tFailed to extract metadata format from system metadata file. Continuing." continue object_xml = getIdentifierObjectXML(base_url, document_identifier) if delay is not None: time.sleep(delay) sysmeta_path = subdirectory_path + "/sysmeta/xml" if not os.path.exists(sysmeta_path): os.makedirs(sysmeta_path) if meta_xml is not None: ET.ElementTree(meta_xml).write(sysmeta_path + "/" + str(i).rjust(5, '0') + "-sysmeta.xml") metadata_path = subdirectory_path + "/" + format_path + "/xml" if not os.path.exists(metadata_path): os.makedirs(metadata_path) if object_xml is not None: ET.ElementTree(object_xml).write(metadata_path + "/" + str(i).rjust(5, '0') + "-metadata.xml")
5,350,631
def generate_file_storage_name(file_uri: str, suffix: str) -> str: """ Generate a filename using the hash of the file contents and some provided suffix. Parameters ---------- file_uri: str The URI to the file to hash. suffix: str The suffix to append to the hash as a part of the filename. Returns ------- dst: str The name of the file as it should be on Google Cloud Storage. """ hash = hash_file_contents(file_uri) return f"{hash}-{suffix}"
5,350,632
def mullerlyer_parameters(illusion_strength=0, difference=0, size_min=0.5, distance=1): """Compute Parameters for Müller-Lyer Illusion. Parameters ---------- illusion_strength : float The strength of the arrow shapes in biasing the perception of lines of unequal lengths. A positive sign represents the bottom arrows pointing outwards and upper arrows pointing inwards. A negative sign represents the bottom arrows pointing inwards and upper arrows pointing outwards. difference : float The objective length difference of the horizontal lines. Specifically, the real difference of upper horizontal line relative to the lower horizontal line. E.g., if ``difference=1``, the upper line will be 100% longer, i.e., 2 times longer than the lower line. A negative sign reflects the converse, where ``difference=-1`` will result in the lower line being 100% longer than the upper line. size_min : float Length of lower horizontal line. distance : float Distance between the upper and lower horizontal lines. Returns ------- dict Dictionary of parameters of the Müller-Lyer illusion. """ parameters = _ponzo_parameters_topbottom(difference=difference, size_min=size_min, distance=distance) length = size_min/2 if difference >= 0: angle = {"Top": -illusion_strength, "Bottom": illusion_strength} else: angle = {"Top": illusion_strength, "Bottom": -illusion_strength} for which in ["Top", "Bottom"]: for side in ["Left", "Right"]: if side == "Left": coord, _, _ = _coord_line(x1=parameters[which + "_x1"], y1=parameters[which + "_y1"], length=length, angle=angle[which]) else: coord, _, _ = _coord_line(x1=parameters[which + "_x2"], y1=parameters[which + "_y2"], length=length, angle=-angle[which]) x1, y1, x2, y2 = coord for c in ["1", "2"]: parameters["Distractor_" + which + side + c + "_x1"] = x1 parameters["Distractor_" + which + side + c + "_y1"] = y1 parameters["Distractor_" + which + side + c + "_x2"] = x2 if c == "1": parameters["Distractor_" + which + side + c + "_y2"] = y2 else: parameters["Distractor_" + which + side + c + "_y2"] = y2 - 2 * (y2 - y1) parameters.update({"Illusion": "MullerLyer", "Illusion_Strength": illusion_strength, "Illusion_Type": "Congruent" if illusion_strength > 0 else "Incongruent", "Distractor_Length": length}) return parameters
5,350,633
def filter_for_corsi(pbp): """ Filters given dataframe for goal, shot, miss, and block events :param pbp: a dataframe with column Event :return: pbp, filtered for corsi events """ return filter_for_event_types(pbp, {'Goal', 'Shot', 'Missed Shot', 'Blocked Shot'})
5,350,634
def challenges(ctx): """ Challenges and related options. """ if ctx.invoked_subcommand is None: welcome_text = """Welcome to the EvalAI CLI. Use evalai challenges --help for viewing all the options.""" echo(welcome_text)
5,350,635
def plot_spectra( spectra: Sequence[Spectrum], style: str, ax: Axes, labels: ITER_STR = None, colors: ITER_STR = None, alphas: ITER_FLOAT = None, markers: ITER_STR = None, linestyles: ITER_STR = None, linewidths: ITER_FLOAT = None, peaks: dict | bool = False, ): """ Plot Spectra on an axis. :param spectra: the Spectra to be plotted :param ax: the axis on which to plot :param style: the plot style :param labels: labels for the spectra, if None, generates based on the spectrum name :param colors: the colors to use :param alphas: transparency settings to use :param markers: the markers to use at each point on the plot :param linestyles: the styles of line to use :param linewidths: the widths of line to use :param peaks: peak highlighting parameters """ properties = (labels, colors, alphas, markers, linestyles, linewidths) for spectrum, label, color, alpha, marker, linestyle, linewidth in zip(spectra, *map(cycle_values, properties)): plot_spectrum( spectrum, style, ax, label=label, color=color, marker=marker, linestyle=linestyle, linewidth=linewidth, alpha=alpha, peaks=peaks, )
5,350,636
def upload(msg: Dict, public_key: bytes, ipns_keypair_name: str = '') -> Tuple[str, str]: """Upload encrypted string to IPFS. This can be manifest files, results, or anything that's been already encrypted. Optionally pins the file to IPNS. Pass in the IPNS key name To get IPNS key name, see create_new_ipns_link Args: msg (Dict): The message to upload and encrypt. public_key (bytes): The public_key to encrypt the file for. ipns_keypair_name (str): If left blank, then don't pin to IPNS Returns: Tuple[str, str]: returns [sha1 hash, ipfs hash] Raises: Exception: if adding bytes with IPFS fails. >>> credentials = { ... "gas_payer": "0x1413862C2B7054CDbfdc181B83962CB0FC11fD92", ... "gas_payer_priv": "28e516f1e2f99e96a48a23cea1f94ee5f073403a1c68e818263f0eb898f1c8e5" ... } >>> pub_key = b"2dbc2c2c86052702e7c219339514b2e8bd4687ba1236c478ad41b43330b08488c12c8c1797aa181f3a4596a1bd8a0c18344ea44d6655f61fa73e56e743f79e0d" >>> job = Job(credentials=credentials, escrow_manifest=manifest) >>> (hash_, manifest_url) = upload(job.serialized_manifest, pub_key) >>> manifest_dict = download(manifest_url, job.gas_payer_priv) >>> manifest_dict == job.serialized_manifest True """ try: manifest_ = json.dumps(msg, sort_keys=True) except Exception as e: LOG.error("Can't extract the json from the dict") raise e hash_ = hashlib.sha1(manifest_.encode('utf-8')).hexdigest() try: ipfs_file_hash = IPFS_CLIENT.add_bytes(_encrypt(public_key, manifest_)) except Exception as e: LOG.warning("Adding bytes with IPFS failed because of: {}".format(e)) raise e if ipns_keypair_name != '': try: # publish ipns ... docs: https://ipfs.io/ipns/12D3KooWEqnTdgqHnkkwarSrJjeMP2ZJiADWLYADaNvUb6SQNyPF/docs/http_client_ref.html#ipfshttpclient.Client.name IPFS_CLIENT.name.publish( f'/ipfs/{ipfs_file_hash}', key=ipns_keypair_name.lower(), allow_offline=True) except Exception as e: LOG.warning("IPNS failed because of: {}".format(e)) raise e return hash_, ipfs_file_hash
5,350,637
async def test_prefer_master(): """ If we ask the discoverer to prefer_master it should return a master node before returning a replica. """ discoverer = get_discoverer(None, None, "10.0.0.1", 2113, prefer_master) gossip = data.make_gossip("10.0.0.1", "10.0.0.2") with aioresponses() as mock: mock.get("http://10.0.0.1:2113/gossip", payload=gossip) assert await discoverer.discover() == NodeService("10.0.0.1", 1113, None)
5,350,638
def test_add_key_fails_bad_key(): """Test that 'aea add-key' fails because the key is not valid.""" oldcwd = os.getcwd() runner = CliRunner() agent_name = "myagent" with tempfile.TemporaryDirectory() as tmpdir: with mock.patch.object(aea.crypto.helpers.logger, "error") as mock_logger_error: os.chdir(tmpdir) result = runner.invoke(cli, [*CLI_LOG_OPTION, "create", agent_name]) assert result.exit_code == 0 os.chdir(Path(tmpdir, agent_name)) # create an empty file - surely not a private key pvk_file = "this_is_not_a_key.pem" Path(pvk_file).touch() result = runner.invoke(cli, [*CLI_LOG_OPTION, "add-key", DEFAULT, pvk_file]) assert result.exit_code == 1 mock_logger_error.assert_called_with("This is not a valid private key file: '{}'".format(pvk_file)) # check that no key has been added. f = open(Path(DEFAULT_AEA_CONFIG_FILE)) expected_json = yaml.safe_load(f) config = AgentConfig.from_json(expected_json) assert len(config.private_key_paths.read_all()) == 0 os.chdir(oldcwd)
5,350,639
def test_sakai_auth_url(oauth_mock): """ Test auth url retrieval for Sakai. Test that we can retrieve a formatted Oauth1 URL for Sakai """ def mock_fetch_token(mock_oauth_token, mock_oauth_token_secret): def mock_token_getter(mock_url): return { 'oauth_token': mock_oauth_token, 'oauth_token_secret': mock_oauth_token_secret, } return mock_token_getter mock_authorize_url = 'http://host/oauth-tool/authorize/' another_mock = MagicMock() another_mock.fetch_request_token.side_effect = mock_fetch_token( fixtures.oauth_creds_dict['HTTP_LMS_OAUTH_TOKEN'], fixtures.oauth_creds_dict['HTTP_LMS_OAUTH_SECRET'], ) oauth_mock.return_value = another_mock data = { 'request_token_url': 'http://host/oauth-tool/request_tokén', 'authorize_url': mock_authorize_url, 'callback_url': "http://this.doesnt.ma/tter", } headers = fixtures.get_mocked_headers('http://somebaseurl') del headers['HTTP_LMS_OAUTH_TOKEN'] del headers['HTTP_LMS_OAUTH_SECRET'] client = Client() resp = client.get( reverse('auth_url'), content_type='application/json', data=data, **headers, ) expected_auth_url = ( f'{mock_authorize_url}' f'?oauth_token={fixtures.oauth_creds_dict["HTTP_LMS_OAUTH_TOKEN"]}' ) assert resp.status_code == status.HTTP_200_OK actual_resp_json = resp.json() expected_resp_json = { 'auth_url': expected_auth_url, 'redirect_key': 'redirect_uri', 'oauth_token_secret': fixtures.oauth_creds_dict[ 'HTTP_LMS_OAUTH_SECRET' ], } assert actual_resp_json == expected_resp_json
5,350,640
def main(test_package_path, test_package, args): """Command line dialogs for creating a new file This method checks ``args`` for optional arguments for each of its prompts. If these are set to something other than ``None``, their corresponding input prompts will be skipped unless validation for that parameter fails. ``type``, ``module_name``, and ``class_name`` are the 3 values required to create a new file. If these are all set to something other than ``None``, this method will default to an empty ``description`` unless one is provided. ``force`` is the only optional parameter that does not have a prompt. It will default to ``False`` unless the ``--force`` flag is used when calling this method. The ``new page`` command has additional optional arguments ``--prototype`` and ``--yaml``/``--no-yaml`` (depending on the configuration of ``ProjectFilesConfig.ENABLE_PAGE_OBJECT_YAML``). Prompt for ``--prototype`` will not be shown if ``type``, ``module_name``, and ``class_name`` are all set to something other than ``None``. Instead, this method will use the standard page object template unless one is specified with ``prototype``. Currently there is no prompt for the ``--yaml``/``--no-yaml`` arguments, so the value of ``ProjectFilesConfig.ENABLE_PAGE_OBJECT_YAML`` will always be used unless ``--yaml``/``--no-yaml`` is specified. :param test_package_path: The root directory of the test package :param test_package: The python package name of the test package :param args: Parsed arguments for the ``new`` command """ new_file_start = False # Get common items from args # (Using getattr() with default values because some attributes might not be # present if args.type wasn't specified) file_type = getattr(args, 'type', None) module_name = getattr(args, 'module_name', None) class_name = getattr(args, 'class_name', None) description = getattr(args, 'description', None) force = getattr(args, 'force', False) # module and class names are the minimum required args, will ignore # optional prompts if this is True minimum_required_args = module_name and class_name try: # if module_name and class_name are set, use defaults for optional arguments if minimum_required_args and description is None: description = '' _validate_file_type = cmd.validate_choice( [new_file.TEST_TYPE, new_file.PAGE_TYPE], shorthand_choices={'t': new_file.TEST_TYPE, 'p': new_file.PAGE_TYPE} ) validated_file_type = cmd.prompt( '[t]est/[p]age', 'Create a new test case or page object?', validate=_validate_file_type, parsed_input=file_type ) validated_module_name = cmd.prompt( 'Module file name', 'Enter a file name for the new {} module'.format(validated_file_type), validate=cmd.validate_module_name, parsed_input=module_name ) class_type = 'test case' if validated_file_type == 'test' else 'page object' validated_class_name = cmd.prompt( '{} class name'.format(class_type.capitalize()), 'Enter a name for the initial {} class'.format(class_type), validate=cmd.validate_class_name, parsed_input=class_name ) validated_description = cmd.prompt( 'Description', '(Optional) Enter description of the new {} class'.format(class_type), validate=validate_description, default='', parsed_input=description ) # Arguments for page-specific prompts kwargs = {} if validated_file_type == new_file.PAGE_TYPE: prototype = getattr(args, 'prototype', None) use_yaml = getattr(args, 'use_yaml', config.ProjectFilesConfig.ENABLE_PAGE_OBJECT_YAML) if prototype is None and minimum_required_args: prototype = '' _prototype_choices = [name for name in new_file.PROTOTYPE_NAMES] # Allow for numeric shorthand answers (starting at 1) _prototype_shorthands = { str(ind + 1): choice for ind, choice in enumerate(_prototype_choices) } # Allow empty string since this is an optional parameter _prototype_choices.append('') _validate_prototype = cmd.validate_choice( _prototype_choices, shorthand_choices=_prototype_shorthands ) kwargs['prototype'] = cmd.prompt( 'Page object prototype', '(Optional) Select a page object prototype to subclass:', *[cmd.INDENT + '[{}] {}'.format(i, name) for i, name in _prototype_shorthands.items()], validate=_validate_prototype, default='', parsed_input=prototype ) # TODO: Add prompt if class supports it (will need to change arg default to None and pass config_module as param) kwargs['use_yaml'] = use_yaml # Start file creation new_file_start = True new_file_paths = new_file.new_file( test_package_path, test_package, file_type=validated_file_type, module_name=validated_module_name, class_name=validated_class_name, description=validated_description, force=force, **kwargs ) # Output new file path on success # TODO: Custom success messages based on type? E.g. instructions on filling out YAML file? success_msg = '\nFile' + ('s' if len(new_file_paths) > 1 else '') + ' created.' print(cmd.COLORS['success'](success_msg)) for new_file_path in new_file_paths: print(new_file_path) except KeyboardInterrupt: print('') if new_file_start: msg = 'File creation was cancelled mid-operation.' print(cmd.COLORS['warning'](msg)) sys.exit()
5,350,641
def cteRoster(*args, **keywords): """ Dynamic library stub function """ pass
5,350,642
def nodes_and_groups(expr: Expression) -> Tuple[List[Expression], Iterable[List[int]]]: """ Returns a list of all sub-expressions, and an iterable of lists of indices to sub-expressions that are equivalent. Example 1: (let (x 3) add ( (let (z 3) (add z (add x x))) (let (z 5) (add z (add x x))) ) ) Here, the two identical expressions '(add x x)' will be in one equivalence group (the closest binder for the free variable 'x' is the same). The four (single-node) sub-expressions 'x' will also be in one equivalence group. Example 2: In expression: (foo (let (x 3) (add x x)) # 1 (let (x 4) (add x x)) # 2 (let (y 3) (add y y)) # 3 ) - sub-expressions '(let (x 3) (add x x))' and '(let (y 3) (add y y))' are equivalent. - The sub-expressions `(add x x)` on line #1 and `(add y y)` on line #3 will not be in equivalence group, because they are in a different binding scope, even though they will evaluate to the same value. - '(let (x 3) (add x x))' and '(let (x 4) (add x x))' are not equivalent, because 'x' is assigned a different value. Also, for each 'add' expression, the pair of identical variables within it will, of course, be in an equivalence group. Args: expr: An expression Returns: A tuple of: * a list of subtrees (nodes) of the Expression; the same as expr.nodes, but returned to avoid an extra traversal (and more clearly corresponding to the second element as they are constructed by the same algorithm) * an iterable of lists of indices, where each list contains indices of nodes which are equivalent (compute the same value). Note that nodes that are not in """ nodes: List[Expression] = [] closest_binders: List[int] = [] def traverse(subexp: Expression, binder_stack: List[Tuple[str, int]]) -> None: idx = len(nodes) nodes.append(subexp) # Calculate the closest binder of a free-variable - intuitively, the scope of the subexp, # the highest point to which a let containing this subexp's value could be lifted. # (That is - this subexp cannot be the same as any other subexp unless their closest binder's are the same) closest_binder = -1 # Global for skip, (bv_name, binder_idx) in enumerate(reversed(binder_stack)): if bv_name in subexp.free_var_names: closest_binder = binder_idx if skip > 0 and len(subexp.children) > 0: binder_stack = binder_stack[:-skip] break closest_binders.append(closest_binder) if subexp.is_binder: bound_stack = binder_stack + [(subexp.bound_var.name, idx)] for i, c in enumerate(subexp.children): traverse( c, bound_stack if subexp.is_binder and subexp.binds_in_child(i) else binder_stack, ) traverse(expr, []) assert len(nodes) == expr.num_nodes assert len(closest_binders) == expr.num_nodes def equiv_groups() -> Iterable[List[int]]: # Group node indices by whether they have the same closest binder, same number of nodes, and are the same op. for g in utils.group_by( range(len(nodes)), lambda idx: (closest_binders[idx], nodes[idx].num_nodes, nodes[idx].op), ).values(): # Skip obviously-singleton groups if len(g) >= 2: yield from utils.group_by(g, lambda idx: nodes[idx]).values() return nodes, equiv_groups()
5,350,643
def masterxprv_from_electrummnemonic(mnemonic: Mnemonic, passphrase: str = "", network: str = 'mainnet') -> bytes: """Return BIP32 master extended private key from Electrum mnemonic. Note that for a 'standard' mnemonic the derivation path is "m", for a 'segwit' mnemonic it is "m/0h" instead. """ version, seed = electrum._seed_from_mnemonic(mnemonic, passphrase) prefix = _NETWORKS.index(network) if version == 'standard': xversion = _XPRV_PREFIXES[prefix] return rootxprv_from_seed(seed, xversion) elif version == 'segwit': xversion = _P2WPKH_PRV_PREFIXES[prefix] rootxprv = rootxprv_from_seed(seed, xversion) return derive(rootxprv, 0x80000000) # "m/0h" else: raise ValueError(f"Unmanaged electrum mnemonic version ({version})")
5,350,644
def test_getLastDateList(gregorian: str, tzolkin: TzolkinDate) -> None: """Test `Tzolkin.getLastDateList`.""" gregorian_date = datetime.datetime.strptime( gregorian, USED_DATEFMT ).date() - datetime.timedelta(days=1) to_test = Tzolkin.fromDateString(date_str=gregorian, fmt=USED_DATEFMT) tz_list = to_test.getLastDateList(start_date=gregorian_date) good_list = tzolkin2gregorian( tzolkin=tzolkin, start=gregorian_date, num_results=50, forward=False ) assert len(tz_list) == 50 # nosec for idx in range(0, len(good_list)): assert tz_list[idx] == good_list[idx]
5,350,645
def release_(ctx, version, branch, master_branch, release_branch, changelog_base, force): """ Release a branch. Note that this differs from the create-release command: 1. Create a Github release with the version as its title. 2. Create a commit bumping the version of setup.py on top of the branch. 3. Generated and upload changelog of the head of the branch, relative to the latest release. 4. Update the master branch to point to the release commit. 4. Close any related issues with a comment specifying the release title. The version is calculated automatically according to the changelog. Note that the release tag will point to the above mentioned commit. The command is mainly intended to be executed automatically using CI systems (as described below), and implements certain heuristics in order to perform properly. Note, the release process will only take place if the following conditions hold: 1. The current build passes validation. (see validate-build) 2. The tip of the branch passes validation. (see validate-commit) 3. The release does not yet exist. If either of these conditions is not satisfied, the command will be silently ignored and complete successfully. This is useful so that your builds will not fail when running on commits that shouldn't be released. This command is idempotent, given that the tip of your branch hasn't changed between executions. You can safely run this command in parallel, this is important when running your CI process on multiple systems concurrently. """ ci_provider = ctx.obj.ci_provider gh = ctx.obj.github branch = branch or (ci_provider.branch if ci_provider else None) release_branch = release_branch or gh.default_branch_name sha = ci_provider.sha if ci_provider else branch if not force: try: ctx.invoke(ci.validate_build, release_branch=release_branch) ctx.invoke(validate_commit, sha=sha) except TerminationException as e: if isinstance(e.cause, exceptions.ReleaseValidationFailedException): log.sub() log.echo("Not releasing: {}".format(str(e))) return raise log.echo("Releasing branch '{}'".format(branch), add=True) changelog = _generate_changelog(gh=gh, sha=sha, base=changelog_base) next_version = version or changelog.next_version if not next_version: err = ShellException('None of the commits in the changelog references an issue ' 'labeled with a release label. Cannot determine what the ' 'version number should be.') err.cause = 'You probably only committed internal issues since the last release, ' \ 'or forgot to reference the issue.' err.possible_solutions = [ 'Amend the message of one of the commits to reference a release issue', 'Push another commit that references a release issue', 'Use --version to specify a version manually' ] raise err release = _create_release(ctx=ctx, changelog=changelog, branch=branch, master_branch=master_branch, version=next_version, sha=sha) log.echo('Closing issues', add=True) for issue in changelog.all_issues: ctx.invoke(close_issue, number=issue.impl.number, release=release.title) log.sub() log.sub() log.echo('Successfully released: {}'.format(release.url)) return release
5,350,646
async def test_full_flow_implementation(hass): """Test registering an implementation and finishing flow works.""" gen_authorize_url = AsyncMock(return_value="https://example.com") convert_code = AsyncMock(return_value={"access_token": "yoo"}) config_flow.register_flow_implementation( hass, "test", "Test", gen_authorize_url, convert_code ) config_flow.register_flow_implementation( hass, "test-other", "Test Other", None, None ) flow = config_flow.NestFlowHandler() flow.hass = hass result = await flow.async_step_init() assert result["type"] == data_entry_flow.RESULT_TYPE_FORM assert result["step_id"] == "init" result = await flow.async_step_init({"flow_impl": "test"}) assert result["type"] == data_entry_flow.RESULT_TYPE_FORM assert result["step_id"] == "link" assert result["description_placeholders"] == {"url": "https://example.com"} result = await flow.async_step_link({"code": "123ABC"}) assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY assert result["data"]["tokens"] == {"access_token": "yoo"} assert result["data"]["impl_domain"] == "test" assert result["title"] == "Nest (via Test)"
5,350,647
def tmp_bind( logger: TLLogger, **tmp_values: Any ) -> Generator[TLLogger, None, None]: """ Bind *tmp_values* to *logger* & memorize current state. Rewind afterwards. """ saved = as_immutable(logger)._context try: yield logger.bind(**tmp_values) # type: ignore finally: logger._context.clear() logger._context.update(saved)
5,350,648
def rename(tax_idx, tax_queries, outdir, column=1, header=False): """ Renaming queries with new taxonomic classifications. All entries that cannot be re-named will be excluded in the output. """ # converting tax_idx to a simple index idx = {} # {old_tax : new_tax} for x in tax_idx: for k,v in x.items(): idx[k] = v[0] # renaming queries if not os.path.isdir(outdir): os.makedirs(outdir) outfile = os.path.join(outdir, 'queries_renamed.tsv') status = {'renamed' : 0, 'excluded' : 0} with _open(tax_queries) as inF, open(outfile, 'w') as outF: for i,line in enumerate(inF): try: line = line.decode('utf-8') except AttributeError: pass line = line.rstrip().split('\t') if header is True and i == 0: pass else: try: line[column-1] = idx[line[column-1]] status['renamed'] += 1 except KeyError: status['excluded'] += 1 continue if line[column-1].lower() == 'unclassified': status['renamed'] -= 1 status['excluded'] += 1 continue outF.write('\t'.join(line) + '\n') # status logging.info('File written: {}'.format(outfile)) logging.info(' No. of queries renamed: {}'.format(status['renamed'])) logging.info(' No. of queries excluded: {}'.format(status['excluded']))
5,350,649
def TryRevision(rev, profile, args): """Downloads revision |rev|, unzips it, and opens it for the user to test. |profile| is the profile to use.""" # Do this in a temp dir so we don't collide with user files. cwd = os.getcwd() tempdir = tempfile.mkdtemp(prefix='bisect_tmp') os.chdir(tempdir) # Download the file. download_url = BUILD_BASE_URL + (BUILD_ARCHIVE_URL % rev) + BUILD_ZIP_NAME def _Reporthook(blocknum, blocksize, totalsize): size = blocknum * blocksize if totalsize == -1: # Total size not known. progress = "Received %d bytes" % size else: size = min(totalsize, size) progress = "Received %d of %d bytes, %.2f%%" % ( size, totalsize, 100.0 * size / totalsize) # Send a \r to let all progress messages use just one line of output. sys.stdout.write("\r" + progress) sys.stdout.flush() try: print 'Fetching ' + download_url urllib.urlretrieve(download_url, BUILD_ZIP_NAME, _Reporthook) print except Exception, e: print('Could not retrieve the download. Sorry.') sys.exit(-1) # Unzip the file. print 'Unzipping ...' UnzipFilenameToDir(BUILD_ZIP_NAME, os.curdir) # Tell the system to open the app. args = ['--user-data-dir=%s' % profile] + args flags = ' '.join(map(pipes.quote, args)) exe = os.path.join(os.getcwd(), BUILD_DIR_NAME, BUILD_EXE_NAME) cmd = '%s %s' % (exe, flags) print 'Running %s' % cmd os.system(cmd) os.chdir(cwd) print 'Cleaning temp dir ...' try: shutil.rmtree(tempdir, True) except Exception, e: pass
5,350,650
def get_namedtuple_from_paramnames(owner, parnames): """ Returns the namedtuple classname for parameter names :param owner: Owner of the parameters, usually the spotpy setup :param parnames: Sequence of parameter names :return: Class """ # Get name of owner class typename = type(owner).__name__ parnames = ["p" + x if x.isdigit() else x for x in list(parnames)] return namedtuple('Par_' + typename, # Type name created from the setup name parnames)
5,350,651
def readNotificationGap(alarmName): """ Returns the notificationGap of the specified alarm from the database """ cur = conn.cursor() cur.execute('Select notificationGap FROM Alarms WHERE name is "%s"' % alarmName) gapNotification = int(cur.fetchone()[0]) conn.commit() return gapNotification
5,350,652
def vertical_line(p1, p2, p3): """ 过点p3,与直线p1,p2垂直的线 互相垂直的线,斜率互为互倒数 :param p1: [x,y] :param p2: [x,y] :param p3: [x,y] :return: 新方程的系数[na,nb,nc] """ line = fit_line(p1, p2) a, b, c = line # ax+by+c=0;一般b为-1 # 以下获取垂线的系数na,nb,nc if a == 0.: # 原方程为y=c ;新方程为x=-nc na = 1. nb = 0. elif b == 0.: # 原方程为x=-c;新方程为y=nc na = 0. nb = -1. else: # 斜率互为互倒数 a*na=-1; na = -1. / a nb = -1. # 根据ax+by+c=0求解系数c nc = -(na * p3[0] + nb * p3[1]) return [na, nb, nc]
5,350,653
def _get_value(key, entry): """ :param key: :param entry: :return: """ if key in entry: if entry[key] and str(entry[key]).lower() == "true": return True elif entry[key] and str(entry[key]).lower() == "false": return False return entry[key] return None
5,350,654
def calculate_bounded_area(x0, y0, x1, y1): """ Calculate the area bounded by two potentially-nonmonotonic 2D data sets This function is written to calculate the area between two arbitrary piecewise-linear curves. The method was inspired by the arbitrary polygon filling routines in vector software programs when the polygon self-intersects. Created: 2015 April 29, msswan """ # We start by taking the start of the first data set (pts0) and loop over # each segment (starting with the closest) and check to see if the # second data (pts1) set intersects. If there is an intersection, it joins # all the points together to make a polygon (reversing pts1 so that the # polygon integration calculation goes around in a single direction) and # calculates the area from that. Now it removes the points that it used to # create the polygon and adds the intersection point to pts0 (which is the # new starting point) and starts the loop again. # Turn the data into lists of tuples (x,y) coordinates pts0 = list(zip(x0, y0)) pts1 = list(zip(x1, y1)) area = 0.0 while len(pts0) + len(pts1) > 0: shouldbreak = False for idx in range(0, len(pts0)-1): for jdx in range(0, len(pts1)-1): doesintersect, int_pt = line_intersect(pts0[idx], pts0[idx+1], pts1[jdx], pts1[jdx+1]) if not doesintersect: continue polygon = list(reversed(pts1[:jdx])) + pts0[:idx] + [int_pt,] area += get_area(polygon) # Trim the processed points off of the datasets pts0 = [int_pt,] + pts0[idx+1:] pts1 = pts1[jdx+1:] # Exit out of both for-loops shouldbreak = True break if shouldbreak: break else: # Make a polygon out of whatever points remain polygon = list(reversed(pts1)) + pts0 area += get_area(polygon) # exit the while loop break return area
5,350,655
def test_check_loop_sync(caplog): """Test check_loop does nothing when called from thread.""" hasync.check_loop() assert "Detected I/O inside the event loop" not in caplog.text
5,350,656
def login(): """ Login to APIC-EM northbound APIs in shell. Returns: Client (NbClientManager) which is already logged in. """ try: client = NbClientManager( server=APIC, username=APIC_USER, password=APIC_PASSWORD, connect=True) return client except requests.exceptions.HTTPError as exc_info: if exc_info.response.status_code == 401: print('Authentication Failed. Please provide valid username/password.') else: print('HTTP Status Code {code}. Reason: {reason}'.format( code=exc_info.response.status_code, reason=exc_info.response.reason)) exit(1) except requests.exceptions.ConnectionError: print('Connection aborted. Please check if the host {host} is available.'.format(host=APIC)) exit(1)
5,350,657
def transfocator_compute_configuration(photon_energy_ev,s_target,\ symbol=["Be","Be","Be"], density=[1.845,1.845,1.845],\ nlenses_max = [15,3,1], nlenses_radii = [500e-4,1000e-4,1500e-4], lens_diameter=0.05, \ sigmaz=6.46e-4, alpha = 0.55, \ tf_p=5960, tf_q=3800, verbose=1 ): """ Computes the optimum transfocator configuration for a given photon energy and target image size. All length units are cm :param photon_energy_ev: the photon energy in eV :param s_target: the target image size in cm. :param symbol: the chemical symbol of the lens material of each type. Default symbol=["Be","Be","Be"] :param density: the density of each type of lens. Default: density=[1.845,1.845,1.845] :param nlenses_max: the maximum allowed number of lenases for each type of lens. nlenses_max = [15,3,1] :param nlenses_radii: the radii in cm of each type of lens. Default: nlenses_radii = [500e-4,1000e-4,1500e-4] :param lens_diameter: the physical diameter (acceptance) in cm of the lenses. If different for each type of lens, consider the smaller one. Default: lens_diameter=0.05 :param sigmaz: the sigma (standard deviation) of the source in cm :param alpha: an adjustable parameter in [0,1](see doc). Default: 0.55 (it is 0.76 for pure Gaussian beams) :param tf_p: the distance source-transfocator in cm :param tf_q: the distance transfocator-image in cm :param:verbose: set to 1 for verbose text output :return: a list with the number of lenses of each type. """ if s_target < 2.35*sigmaz*tf_q/tf_p: print("Source size FWHM is: %f um"%(1e4*2.35*sigmaz)) print("Maximum Demagnifications is: %f um"%(tf_p/tf_q)) print("Minimum possible size is: %f um"%(1e4*2.35*sigmaz*tf_q/tf_p)) print("Error: redefine size") return None deltas = [(1.0 - xraylib.Refractive_Index_Re(symbol[i],photon_energy_ev*1e-3,density[i])) \ for i in range(len(symbol))] focal_q_target = _tansfocator_guess_focal_position( s_target, p=tf_p, q=tf_q, sigmaz=sigmaz, alpha=alpha, \ lens_diameter=lens_diameter,method=2) focal_f_target = 1.0 / (1.0/focal_q_target + 1.0/tf_p) div_q_target = alpha * lens_diameter / focal_q_target #corrections for extreme cases source_demagnified = 2.35*sigmaz*focal_q_target/tf_p if source_demagnified > lens_diameter: source_demagnified = lens_diameter s_target_calc = numpy.sqrt( (div_q_target*(tf_q-focal_q_target))**2 + source_demagnified**2) nlenses_target = _transfocator_guess_configuration(focal_f_target,deltas=deltas,\ nlenses_max=nlenses_max,radii=nlenses_radii, ) if verbose: print("transfocator_compute_configuration: focal_f_target: %f"%(focal_f_target)) print("transfocator_compute_configuration: focal_q_target: %f cm"%(focal_q_target)) print("transfocator_compute_configuration: s_target: %f um"%(s_target_calc*1e4)) print("transfocator_compute_configuration: nlenses_target: ",nlenses_target) return nlenses_target
5,350,658
def update_contour(): """ Finds contours in the current color image and uses them to update contour_center and contour_area """ global contour_center global contour_area image = rc.camera.get_color_image() if image is None: contour_center = None contour_area = 0 else: # Find all of the orange contours contours = rc_utils.find_contours(image, ORANGE[0], ORANGE[1]) # Select the largest contour contour = rc_utils.get_largest_contour(contours, MIN_CONTOUR_AREA) if contour is not None: # Calculate contour information contour_center = rc_utils.get_contour_center(contour) contour_area = rc_utils.get_contour_area(contour) # Draw contour onto the image rc_utils.draw_contour(image, contour) rc_utils.draw_circle(image, contour_center) else: contour_center = None contour_area = 0 # Display the image to the screen rc.display.show_color_image(image)
5,350,659
def flickrapi_fn(fn_name, fn_args, # format: () fn_kwargs, # format: dict() attempts=3, waittime=5, randtime=False, caughtcode='000'): """ flickrapi_fn Runs flickrapi fn_name function handing over **fn_kwargs. It retries attempts, waittime, randtime with @retry Checks results is_good and provides feedback accordingly. Captures flicrkapi or BasicException error situations. caughtcode to report on exception error. Returns: fn_success = True/False fn_result = Actual flickrapi function call result fn_errcode = error reported by flickrapi exception """ @rate_limited.retry(attempts=attempts, waittime=waittime, randtime=randtime) def retry_flickrapi_fn(kwargs): """ retry_flickrapi_fn Decorator to retry calling a function """ return fn_name(**kwargs) logging.info('fn:[%s] attempts:[%s] waittime:[%s] randtime:[%s]', fn_name.__name__, attempts, waittime, randtime) if logging.getLogger().getEffectiveLevel() <= logging.INFO: for i, arg in enumerate(fn_args): logging.info('fn:[%s] arg[%s]={%s}', fn_name.__name__, i, arg) for name, value in fn_kwargs.items(): logging.info('fn:[%s] kwarg[%s]=[%s]', fn_name.__name__, name, value) fn_success = False fn_result = None fn_errcode = 0 try: fn_result = retry_flickrapi_fn(fn_kwargs) except flickrapi.exceptions.FlickrError as flickr_ex: fn_errcode = flickr_ex.code NPR.niceerror(caught=True, caughtprefix='+++Api', caughtcode=caughtcode, caughtmsg='Flickrapi exception on [{!s}]' .format(fn_name.__name__), exceptuse=True, exceptcode=flickr_ex.code, exceptmsg=flickr_ex, useniceprint=True, exceptsysinfo=True) except (IOError, httplib.HTTPException): NPR.niceerror(caught=True, caughtprefix='+++Api', caughtcode=caughtcode, caughtmsg='Caught IO/HTTP Error on [{!s}]' .format(fn_name.__name__)) except Exception as exc: NPR.niceerror(caught=True, caughtprefix='+++Api', caughtcode=caughtcode, caughtmsg='Exception on [{!s}]'.format(fn_name.__name__), exceptuse=True, exceptmsg=exc, useniceprint=True, exceptsysinfo=True) except BaseException: NPR.niceerror(caught=True, caughtprefix='+++Api', caughtcode=caughtcode, caughtmsg='BaseException on [{!s}]' .format(fn_name.__name__), exceptsysinfo=True) finally: pass if is_good(fn_result): fn_success = True logging.info('fn:[%s] Output for fn_result:', fn_name.__name__) logging.info(xml.etree.ElementTree.tostring( fn_result, encoding='utf-8', method='xml')) else: logging.error('fn:[%s] is_good(fn_result):[%s]', fn_name.__name__, 'None' if fn_result is None else is_good(fn_result)) fn_result = None logging.info('fn:[%s] success:[%s] result:[%s] errcode:[%s]', fn_name.__name__, fn_success, fn_result, fn_errcode) return fn_success, fn_result, fn_errcode
5,350,660
def test_network_cabling_mutually_exclusive_ips_and_file(): """Test that the `canu report network cabling` command only accepts IPs from command line OR file input, not both.""" with runner.isolated_filesystem(): result = runner.invoke( cli, [ "--cache", cache_minutes, "report", "network", "cabling", "--username", username, "--password", password, "--ips", ips, "--ips-file", "file.txt", ], ) assert result.exit_code == 2 assert ( "Error: Mutually exclusive options from 'Network cabling IPv4 input sources'" in str(result.output) )
5,350,661
def send_task_event(state, task, send_event_func, event): """ Send a task event delegating to 'send_event_func' which will send events to RabbitMQ or use the workflow context logger in local context :param state: the task state (valid: ['sending', 'started', 'rescheduled', 'succeeded', 'failed']) :param task: a WorkflowTask instance to send the event for :param send_event_func: function for actually sending the event somewhere :param event: a dict with either a result field or an exception fields follows celery event structure but used by local tasks as well """ if _filter_task(task, state): return if state in (TASK_FAILED, TASK_RESCHEDULED, TASK_SUCCEEDED) \ and event is None: raise RuntimeError('Event for task {0} is None'.format(task.name)) message = format_event_message( task.name, task.task_type, state, event.get('result'), event.get('exception'), task.current_retries, task.total_retries, postfix=' (dry run)' if task.workflow_context.dry_run else None ) event_type = get_event_type(state) additional_context = { 'task_current_retries': task.current_retries, 'task_total_retries': task.total_retries } if state in (TASK_FAILED, TASK_RESCHEDULED): additional_context['task_error_causes'] = event.get('causes') send_event_func(task=task, event_type=event_type, message=message, additional_context=additional_context)
5,350,662
def load_data(CWD): """ loads the data from a parquet file specified below input: CWD = current working directory path output: df_raw = raw data from parquet file as pandas dataframe """ folderpath_processed_data = CWD + '/data_sample.parquet' df_raw = pd.read_parquet(folderpath_processed_data) return df_raw
5,350,663
def run_command(cmd): """Run command, return output as string.""" output = subprocess.Popen(cmd, stdout=subprocess.PIPE, shell=True).communicate()[0] return output.decode("ascii")
5,350,664
def http_body(): """ Returns random binary body data. """ return strategies.binary(min_size=0, average_size=600, max_size=1500)
5,350,665
def run(data_s: str) -> tuple[int, int]: """Solve the puzzles.""" results = [check(line) for line in data_s.splitlines()] part1 = sum(result.error_score for result in results) part2 = int(median(result.completion_score for result in results if result.ok)) return part1, part2
5,350,666
def _getRotatingFileHandler(filename, mode='a', maxBytes=1000000, backupCount=0, encoding='utf-8', uid=None, gid=None): """Get a :class:`logging.RotatingFileHandler` with a logfile which is readable+writable only by the given **uid** and **gid**. :param str filename: The full path to the log file. :param str mode: The mode to open **filename** with. (default: ``'a'``) :param int maxBytes: Rotate logfiles after they have grown to this size in bytes. :param int backupCount: The number of logfiles to keep in rotation. :param str encoding: The encoding for the logfile. :param int uid: The owner UID to set on the logfile. :param int gid: The GID to set on the logfile. :rtype: :class:`logging.handlers.RotatingFileHandler` :returns: A logfile handler which will rotate files and chown/chmod newly created files. """ # Default to the current process owner's uid and gid: uid = os.getuid() if not uid else uid gid = os.getgid() if not gid else gid if not os.path.exists(filename): open(filename, 'a').close() os.chown(filename, uid, gid) try: os.chmod(filename, os.ST_WRITE | os.ST_APPEND) except AttributeError: # pragma: no cover logging.error(""" XXX FIXME: Travis chokes on `os.ST_WRITE` saying that the module doesn't have that attribute, for some reason: https://travis-ci.org/isislovecruft/bridgedb/builds/24145963#L1601""") os.chmod(filename, 384) fileHandler = partial(logging.handlers.RotatingFileHandler, filename, mode, maxBytes=maxBytes, backupCount=backupCount, encoding=encoding) return fileHandler
5,350,667
def _inject(*args, **kwargs): """Inject variables into the arguments of a function or method. This is almost identical to decorating with functools.partial, except we also propagate the wrapped function's __name__. """ def injector(f): assert callable(f) @functools.wraps(f) def wrapper(*w_args, **w_kwargs): return functools.partial(f, *args, **kwargs)(*w_args, **w_kwargs) wrapper.args = args wrapper.kwargs = kwargs wrapper.function = f return wrapper return injector
5,350,668
def frame_drop_correctors_ready(): """ Checks to see if the frame drop correctors 'seq_and_image_corr' topics are all being published. There should be a corrector topic for each camera. """ camera_assignment = get_camera_assignment() number_of_cameras = len(camera_assignment) number_of_correctors = get_number_of_corrector_topics() if number_of_cameras == number_of_correctors: return True else: return False
5,350,669
def loads(content: str) -> List[Dict[str, Any]]: """ Load the given YAML string """ template = list(yaml.load_all(content, Loader=SafeLineLoader)) # Convert an empty file to an empty dict if template is None: template = {} return template
5,350,670
def celery_worker(level="debug"): """Run the Celery process.""" cmd = 'celery worker -A framework.tasks -l {0}'.format(level) run(bin_prefix(cmd))
5,350,671
def get_ndim_horizontal_coords(easting, northing): """ Return the number of dimensions of the horizontal coordinates arrays Also check if the two horizontal coordinates arrays same dimensions. Parameters ---------- easting : nd-array Array for the easting coordinates northing : nd-array Array for the northing coordinates Returns ------- ndim : int Number of dimensions of the ``easting`` and ``northing`` arrays. """ ndim = np.ndim(easting) if ndim != np.ndim(northing): raise ValueError( "Horizontal coordinates dimensions mismatch. " + f"The easting coordinate array has {easting.ndim} dimensions " + f"while the northing has {northing.ndim}." ) return ndim
5,350,672
async def info(): """ API information endpoint Returns: [json] -- [description] app version, environment running in (dev/prd), Doc/Redoc link, Lincense information, and support information """ if RELEASE_ENV.lower() == "dev": main_url = "http://localhost:5000" else: main_url = HOST_DOMAIN openapi_url = f"{main_url}/docs" redoc_url = f"{main_url}/redoc" result = { "App Version": APP_VERSION, "Environment": RELEASE_ENV, "Docs": {"OpenAPI": openapi_url, "ReDoc": redoc_url}, "License": {"Type": LICENSE_TYPE, "License Link": LICENSE_LINK}, "Application_Information": {"Owner": OWNER, "Support Site": WEBSITE}, } return result
5,350,673
async def info(request: FasttextRequest): """ Returns info about the supervised model TODO - Add authentication :param request: :return: """ app: FasttextServer = request.app model: SupervisedModel = app.get_supervised_model() model_info = { "dimensions": model.get_dimension(), "isQuantised": model.is_quantized() } return json(request, model_info, 200)
5,350,674
def main(): """Start of the program.""" args = parse_arguments(sys.argv[1:]) settings = get_settings(args) args.func(settings)
5,350,675
def hard_example_mining(dist_mat, labels, return_inds=False): """For each anchor, find the hardest positive and negative sample. Args: dist_mat: pytorch Variable, pair wise distance between samples, shape [N, N] labels: pytorch LongTensor, with shape [N] return_inds: whether to return the indices. Save time if `False`(?) Returns: dist_ap: pytorch Variable, distance(anchor, positive); shape [N] dist_an: pytorch Variable, distance(anchor, negative); shape [N] p_inds: pytorch LongTensor, with shape [N]; indices of selected hard positive samples; 0 <= p_inds[i] <= N - 1 n_inds: pytorch LongTensor, with shape [N]; indices of selected hard negative samples; 0 <= n_inds[i] <= N - 1 NOTE: Only consider the case in which all labels have same num of samples, thus we can cope with all anchors in parallel. """ assert len(dist_mat.size()) == 2 assert dist_mat.size(0) == dist_mat.size(1) N = dist_mat.size(0) # shape [N, N] is_pos = labels.expand(N, N).eq(labels.expand(N, N).t()) is_neg = labels.expand(N, N).ne(labels.expand(N, N).t()) # `dist_ap` means distance(anchor, positive) # both `dist_ap` and `relative_p_inds` with shape [N, 1] dist_ap, relative_p_inds = torch.max( dist_mat[is_pos].contiguous().view(N, -1), 1, keepdim=True) # `dist_an` means distance(anchor, negative) # both `dist_an` and `relative_n_inds` with shape [N, 1] dist_an, relative_n_inds = torch.min( dist_mat[is_neg].contiguous().view(N, -1), 1, keepdim=True) # shape [N] dist_ap = dist_ap.squeeze(1) dist_an = dist_an.squeeze(1) if return_inds: # shape [N, N] ind = (labels.new().resize_as_(labels) .copy_(torch.arange(0, N).long()) .unsqueeze(0).expand(N, N)) # shape [N, 1] p_inds = torch.gather( ind[is_pos].contiguous().view(N, -1), 1, relative_p_inds.data) n_inds = torch.gather( ind[is_neg].contiguous().view(N, -1), 1, relative_n_inds.data) # shape [N] p_inds = p_inds.squeeze(1) n_inds = n_inds.squeeze(1) return dist_ap, dist_an, p_inds, n_inds return dist_ap, dist_an
5,350,676
def getSerialPorts(): """ Lists serial port names :raises EnvironmentError: On unsupported or unknown platforms :returns: A list of the serial ports available on the system """ print("Getting all available serial ports...") print("This may take a sec...") if sys.platform.startswith('win'): ports = ['COM%s' % (i + 1) for i in range(256)] elif sys.platform.startswith('linux') or sys.platform.startswith('cygwin'): # this excludes your current terminal "/dev/tty" ports = glob.glob('/dev/tty[A-Za-z]*') elif sys.platform.startswith('darwin'): ports = glob.glob('/dev/tty.*') else: raise EnvironmentError('Unsupported platform') result = [] for port in ports: try: s = serial.Serial(port) s.close() result.append(port) print(port) except (OSError, serial.SerialException): pass input("Press Enter to Continue..")
5,350,677
def test_create_project_environment_success(temporary_folder, temporary_home, fake_popen): """Test full cycle for creating an environment from conda.""" fake_popen.set_cmd_attrs('virtualenv --version', returncode=0) fake_popen.set_cmd_attrs('git --version', returncode=0) # make sure we write to the correct directory assert pathlib.Path.home() == temporary_folder arguments = { 'proj_name': 'venv_project', 'proj_path': pathlib.Path(temporary_folder), 'python_version': '0.0', 'aiida_version': '0.0.0', 'packages': ['aiida-vasp[extras1]', 'pymatgen==2019.3.13', 'aiidateam/aiida-ase:devel[extras1]'] } creator = CreateEnvVirtualenv(**arguments) fake_popen.set_cmd_attrs('virtualenv', returncode=0) creator.create_aiida_project_environment() base_folder = str((creator.env_folder / creator.proj_name).absolute()) src_folder = creator.src_folder.absolute() expected_cmd_order = [ "virtualenv --version", "git --version", # !!! There are 2 empty spaces expected after virtualenv due to the # !!! empty env_arguments list "virtualenv --python=python0.0 {}".format(base_folder), ("pip install --pre aiida-core==0.0.0 aiida-vasp[extras1] " "pymatgen==2019.3.13"), ("git clone --single-branch --branch devel https://github.com/" "aiidateam/aiida-ase {}" .format(str(src_folder / "aiida-ase"))), ("pip install --editable {}" .format(str(src_folder / "aiida-ase[extras1]"))) ] # compare expected cmd order with actual cmd order send to Popen actual_cmd_order = [_ for (_,) in fake_popen.args] assert actual_cmd_order == expected_cmd_order # test the written project specs path_to_config = (pathlib.Path.home() / constants.CONFIG_FOLDER / constants.PROJECTS_FILE) assert path_to_config.exists() is True loaded_specs = utils.load_project_spec() assert 'venv_project' in loaded_specs.keys() contents = loaded_specs['venv_project'] ppath = pathlib.Path(temporary_folder) srcpath = ppath / 'venv_project' / constants.DEFAULT_SRC_SUBFOLDER envpath = ppath / 'venv_project' / constants.DEFAULT_ENV_SUBFOLDER assert contents['project_path'] == str(ppath) assert contents['aiida'] == '0.0.0' assert contents['python'] == '0.0' assert contents['env_sub'] == str(envpath) assert contents['src_sub'] == str(srcpath) assert contents['manager'] == constants.MANAGER_NAME_VENV
5,350,678
def create_schema(force=False, checkfirst=True): """Create the tables and schema on the ModMon database. Parameters ---------- force : bool, optional Unless True ask for confirmation before taking potentially destructive action if checkfirst is False, by default False checkfirst : bool, optional If True don't recreate tables already present in the database, by default True """ if not checkfirst and not force: confirmed = ask_for_confirmation( "WARNING: This will delete all data currently in the database." ) if not confirmed: print("Aborting create.") return Base.metadata.create_all(ENGINE, checkfirst=checkfirst)
5,350,679
def dt_list_cached_mappings(): """ >>> old_state = test_config.setup() >>> import doctest >>> doctest.ELLIPSIS_MARKER = '-ignore-' >>> ListScript("crds.list --cached-mappings --full-path")() # doctest: +ELLIPSIS -ignore-/mappings/hst/hst.pmap -ignore-/mappings/hst/hst_0001.pmap -ignore-/mappings/hst/hst_0002.pmap -ignore- >>> doctest.ELLIPSIS_MARKER = '...' >>> test_config.cleanup(old_state) """
5,350,680
def trace(func): """Trace and capture provenance info inside a method /function.""" setup_logging() @wraps(func) def wrapper(*args, **kwargs): activity = func.__name__ activity_id = get_activity_id() # class_instance = args[0] class_instance = func class_instance.args = args class_instance.kwargs = kwargs # OSA specific # variables parsing global session_name, session_tag class_instance = parse_variables(class_instance) if class_instance.__name__ in REDUCTION_TASKS: session_tag = f"{activity}:{class_instance.ObservationRun}" session_name = f"{class_instance.ObservationRun}" else: session_tag = ( f"{activity}:{class_instance.PedestalRun}-{class_instance.CalibrationRun}" ) session_name = f"{class_instance.PedestalRun}-{class_instance.CalibrationRun}" # OSA specific # variables parsing # provenance capture before execution derivation_records = get_derivation_records(class_instance, activity) parameter_records = get_parameters_records(class_instance, activity, activity_id) usage_records = get_usage_records(class_instance, activity, activity_id) # activity execution start = datetime.datetime.now().isoformat() result = func(*args, **kwargs) end = datetime.datetime.now().isoformat() # no provenance logging if not log_is_active(class_instance, activity): return result # provenance logging only if activity ends properly session_id = log_session(class_instance, start) for log_record in derivation_records: log_prov_info(log_record) log_start_activity(activity, activity_id, session_id, start) for log_record in parameter_records: log_prov_info(log_record) for log_record in usage_records: log_prov_info(log_record) log_generation(class_instance, activity, activity_id) log_finish_activity(activity_id, end) return result return wrapper
5,350,681
def plot_images(images: list): """Plots a list of images, arranging them in a rectangular fashion""" num_plots = len(images) rows = round(math.sqrt(num_plots)) cols = math.ceil(math.sqrt(num_plots)) for k, img in enumerate(images): plt.subplot(rows, cols, k + 1) plt.axis('off') plt.imshow(img) plt.subplots_adjust(wspace=0, hspace=0) plt.show()
5,350,682
def _(path): """ Degenerate behavior for pathlib.Path objects. """ yield path
5,350,683
def broadcast_ms_tensors(network, ms_tensors, broadcast_ndim): """Broadcast TensorRT tensors to the specified dimension by pre-padding shape 1 dims""" broadcasted_ms_tensors = [None] * len(ms_tensors) for i, t in enumerate(ms_tensors): tensor = network.nodes[t] if len(tensor.shape) < broadcast_ndim: # append 1 size dims to front diff = broadcast_ndim - len(tensor.shape) shape = tuple([1] * diff + list(tensor.shape)) # TODO, check print ms_cell = _MsExpand0() out = ms_cell(tensor) op_key = network.add_ops(ms_cell) ms_tensor = network.add_node(out) network.add_pre(op_key, t) network.add_out(op_key, [ms_tensor]) # layer = network.add_shuffle(t) # layer.reshape_dims = shape # ms_tensor = layer.get_output(0) else: ms_tensor = t broadcasted_ms_tensors[i] = ms_tensor return broadcasted_ms_tensors
5,350,684
def get_rbf_gamma_based_in_median_heuristic(X: np.array, standardize: bool = False) -> float: """ Function implementing a heuristic to estimate the width of an RBF kernel (as defined in the Scikit-learn package) from data. :param X: array-like, shape = (n_samples, n_features), feature matrix :param standardize: boolean, indicating whether the data should be normalized (z-transformation) before the gamma is estimated. :return: scalar, gamma (of the sklearn RBF kernel) estimated from the data """ # Z-transform the data if requested if standardize: X = StandardScaler(copy=True).fit_transform(X) # Compute all pairwise euclidean distances D = euclidean_distances(X) # Get the median of the distances sigma = np.median(D) # Convert to sigma to gamma as defined in the sklearn package gamma = 1 / (2 * sigma**2) return gamma
5,350,685
def winter_storm( snd: xarray.DataArray, thresh: str = "25 cm", freq: str = "AS-JUL" ) -> xarray.DataArray: """Days with snowfall over threshold. Number of days with snowfall accumulation greater or equal to threshold. Parameters ---------- snd : xarray.DataArray Surface snow depth. thresh : str Threshold on snowfall accumulation require to label an event a `winter storm`. freq : str Resampling frequency. Returns ------- xarray.DataArray Number of days per period identified as winter storms. Notes ----- Snowfall accumulation is estimated by the change in snow depth. """ thresh = convert_units_to(thresh, snd) # Compute daily accumulation acc = snd.diff(dim="time") # Winter storm condition out = threshold_count(acc, ">=", thresh, freq) out.attrs["units"] = to_agg_units(out, snd, "count") return out
5,350,686
def remove_dir_edge(g, x, y): """Removes the directed edge x --> y""" if g.has_edge(x, y): g.remove_edge(x, y)
5,350,687
def add_site(site, url_file): """ For an OOI site, assemble a curated list of all the instruments and data streams that are available for data explorations. This file will create a YAML file per site. Additional HITL work is required to further clean-up and check the list, and pruning streams and methods down to the core set of science sensors. The results of this work is used to create the m2m_urls.yml file used in this python toolbox. :param site: OOI 8 character site designation (e.g. CE01ISSM) :param url_file: data file to save the results in. :return: None, creates the data file. """ sp = ' ' # create an explicit two-space indent string for the YAML file # open the YAML file to store the results with open(url_file, 'w') as f: # create the header portion of the YAML, getting the array and site names from the API site = site.upper() for site_vocab in VOCAB: if site_vocab.get('refdes') == site: break f.write('---\n{site}:\n'.format(site=site)) f.write('{sp}array: {array}\n'.format(sp=sp*1, array=site_vocab['tocL1'])) f.write('{sp}name: {name}\n'.format(sp=sp*1, name=site_vocab['tocL2'])) f.write('{sp}assembly:\n'.format(sp=sp*1)) # create a list of nodes for this site nodes = list_nodes(site) # for each node, if it is one of interest as defined above, create an assembly entry for k, v in ASSEMBLY.items(): # find the nodes that correspond to this assembly assembly = k node = sorted(set(v) & set(nodes)) # skip if we don't have any nodes in this assembly if not node: continue # if we have nodes, create the assembly entry for node_vocab in VOCAB: if node_vocab.get('refdes') == site + '-' + node[0]: break # for each node, create the list of sensors for n in node: f.write('{sp}- type: {assembly}\n'.format(sp=sp * 2, assembly=assembly)) f.write('{sp}name: {name}\n'.format(sp=sp * 3, name=node_vocab['tocL3'])) # if we need to further distinguish, add the assembly code name for l, w in SUBASSEMBLY.items(): if n in w: f.write('{sp}subassembly: {name}\n'.format(sp=sp * 3, name=l)) f.write('{sp}instrument:\n'.format(sp=sp * 3)) sensors = list_sensors(site, n) sensors = filter_stream(sensors, SENSOR_EXCLUDES) # remove sensors of no interest if not sensors: continue for sensor in sensors: for sensor_vocab in VOCAB: if sensor_vocab.get('refdes') == site + '-' + n + '-' + sensor: break instrument = (sensor[3:8]).lower() f.write('{sp}- class: {instrument}\n'.format(sp=sp*4, instrument=instrument)) f.write('{sp}instrument_name: {name}\n'.format(sp=sp*5, name=sensor_vocab['instrument'])) f.write('{sp}instrument_model: {model}\n'.format(sp=sp*5, model=sensor_vocab['model'])) f.write('{sp}instrument_manufacturer: {manu}\n'.format(sp=sp*5, manu=sensor_vocab['manufacturer'])) f.write('{sp}mindepth: {mindepth}\n'.format(sp=sp*5, mindepth=sensor_vocab['mindepth'])) f.write('{sp}maxdepth: {maxdepth}\n'.format(sp=sp*5, maxdepth=sensor_vocab['maxdepth'])) f.write('{sp}node: {node}\n'.format(sp=sp*5, node=n)) f.write('{sp}sensor: {sensor}\n'.format(sp=sp*5, sensor=sensor)) f.write('{sp}stream:\n'.format(sp=sp*5)) methods = list_methods(site, n, sensor) if not methods: f.write('{sp}unknown: null\n'.format(sp=sp*6,)) continue for method in methods: if method in METHODS: streams = list_streams(site, n, sensor, method) streams = filter_stream(streams, STREAM_EXCLUDES) if len(streams) == 1: f.write('{sp}{method}: {streams}\n'.format(sp=sp*6, method=method, streams=streams[0])) else: f.write('{sp}{method}:\n'.format(sp=sp*6, method=method)) for stream in streams: f.write('{sp}- {stream}\n'.format(sp=sp*7, stream=stream))
5,350,688
def _check_whitelist_members(rule_members=None, policy_members=None): """Whitelist: Check that policy members ARE in rule members. If a policy member is NOT found in the rule members, add it to the violating members. Args: rule_members (list): IamPolicyMembers allowed in the rule. policy_members (list): IamPolicyMembers in the policy. Return: list: Policy members NOT found in the whitelist (rule members). """ violating_members = [] for policy_member in policy_members: # check if policy_member is found in rule_members if not any(r.matches(policy_member) for r in rule_members): violating_members.append(policy_member) return violating_members
5,350,689
def get_mac(): """This function returns the first MAC address of the NIC of the PC without colon""" return ':'.join(re.findall('..', '%012x' % uuid.getnode())).replace(':', '')
5,350,690
async def get_clusters(session, date): """ :param session: :return: """ url = "%s/file/clusters" % BASE_URL params = {'date': date} return await get(session, url, params)
5,350,691
def extract_attributes_from_entity(json_object): """ returns the attributes from a json representation Args: @param json_object: JSON representation """ if json_object.has_key('attributes'): items = json_object['attributes'] attributes = recursive_for_attribute_v2(items) return attributes else: return None
5,350,692
def get_config_with_api_token(tempdir, get_config, api_auth_token): """ Get a ``_Config`` object. :param TempDir tempdir: A temporary directory in which to create the Tahoe-LAFS node associated with the configuration. :param (bytes -> bytes -> _Config) get_config: A function which takes a node directory and a Foolscap "portnum" filename and returns the configuration object. :param bytes api_auth_token: The HTTP API authorization token to write to the node directory. """ FilePath(tempdir.join(b"tahoe", b"private")).makedirs() config = get_config(tempdir.join(b"tahoe"), b"tub.port") config.write_private_config(b"api_auth_token", api_auth_token) return config
5,350,693
def make_journal_title(): """ My journal is weekly. There's a config option 'journal_day' that lets me set the day of the week that my journal is based on. So, if I don't pass in a specific title, it will just create a new journal titled 'Journal-date-of-next-journal-day.md'. """ #TODO: Make the generated journal title a configurable pattern daymap = { 'monday':0, 'tuesday':1, 'wednesday':2, 'thursday':3, 'friday':4, 'saturday':5, 'sunday':6 } today = datetime.date.today() journal_day = today + datetime.timedelta( (daymap[settings.JOURNAL_DAY.lower()]-today.weekday()) % 7 ) return 'Journal {0}'.format(journal_day);
5,350,694
def setrange(y1, y2, container=None): """Changes the range of the current container""" if container is None: _checkContainer() container = current.container container.setRange(y1, y2)
5,350,695
def calc_base_matrix_1qutrit_y_01() -> np.ndarray: """Return the base matrix corresponding to the y-axis w.r.t. levels 0 and 1.""" l = [[0, -1j, 0], [1j, 0, 0], [0, 0, 0]] mat = np.array(l, dtype=np.complex128) return mat
5,350,696
def get_feature_names_small(ionnumber): """ feature names for the fixed peptide length feature vectors """ names = [] names += ["pmz", "peplen"] for c in ["bas", "heli", "hydro", "pI"]: names.append("sum_" + c) for c in ["mz", "bas", "heli", "hydro", "pI"]: names.append("mean_" + c) names.append("mz_ion") names.append("mz_ion_other") names.append("mean_mz_ion") names.append("mean_mz_ion_other") for c in ["bas", "heli", "hydro", "pI"]: names.append("{}_ion".format(c)) names.append("{}_ion_other".format(c)) names.append("endK") names.append("endR") names.append("nextP") names.append("nextK") names.append("nextR") for c in ["bas", "heli", "hydro", "pI", "mz"]: for pos in ["i", "i-1", "i+1", "i+2"]: names.append("loc_" + pos + "_" + c) names.append("charge") for i in range(ionnumber): for c in ["bas", "heli", "hydro", "pI", "mz"]: names.append("P_%i_%s"%(i, c)) names.append("P_%i_P"%i) names.append("P_%i_K"%i) names.append("P_%i_R"%i) return names
5,350,697
def vectorproduct(a,b): """ Return vector cross product of input vectors a and b """ a1, a2, a3 = a b1, b2, b3 = b return [a2*b3 - a3*b2, a3*b1 - a1*b3, a1*b2 - a2*b1]
5,350,698
def test_accept_friend_request_makes_users_follow_each_other(actor, requester): """ Friends should follow each other when a friendship is initiated. """ send_friend_request(actor=requester, to_user=actor) accept_friend_request(actor, requester) assert actor.following.is_connected(requester) assert requester.following.is_connected(actor) assert actor.followed_by.is_connected(requester) assert requester.followed_by.is_connected(actor)
5,350,699