content
stringlengths
22
815k
id
int64
0
4.91M
def CreateMnemonicsC(mnemonicsIds): """ Create the opcodes arrays for C header files. """ opsEnum = "typedef enum {\n\tI_UNDEFINED = 0, " pos = 0 l2 = sorted(mnemonicsIds.keys()) for i in l2: s = "I_%s = %d" % (i.replace(" ", "_").replace(",", ""), mnemonicsIds[i]) if i != l2[-1]: s += "," pos += len(s) if pos >= 70: s += "\n\t" pos = 0 elif i != l2[-1]: s += " " opsEnum += s opsEnum += "\n} _InstructionType;" # Mnemonics are sorted by insertion order. (Psuedo mnemonics depend on this!) # NOTE: EXTRA BACKSLASHES FORE RE.SUB !!! s = "const unsigned char _MNEMONICS[] =\n\"\\\\x09\" \"UNDEFINED\\\\0\" " l = list(zip(mnemonicsIds.keys(), mnemonicsIds.values())) l = sorted(l, key=functools.cmp_to_key(lambda x, y: x[1] - y[1])) for i in l: s += "\"\\\\x%02x\" \"%s\\\\0\" " % (len(i[0]), i[0]) if len(s) - s.rfind("\n") >= 76: s += "\\\\\n" s = s[:-1] + ";" # Ignore last space. # Return enum & mnemonics. return (opsEnum, s)
5,351,300
def create_windows(c_main, origin, J=None, I=None, depth=None, width=None): """ Create windows based on contour and windowing parameters. The first window (at arc length = 0) is placed at the spline origin. Note: to define the windows, this function uses pseudo-radial and pseudo-angular coordinates. The pseudo-radial coordinate is based on the distance transform of the rasterized version of the continuous spline that defines the contour of the cell. The pseudo-angular coordinate for layer j is based on the distance transform of the discrete contour of layer j. So there is a bit of an inconsistency between continuous and discrete contours. Parameters ---------- c_main: 2d array A rasterized version of the contour, as obtained by spline_to_param_image. origin: tuple (y, x) coordinates of the origin of the curve. J: int Number of window layers. I: list of int Vector of dimension J specifying the number of windows per layer. depth: int Desired depth of the windows. width: int Desired width of the windows. Returns ------- w: 3d list w[i][j][0] and w[i][j][1] are 1d arrays representing lists of x,y indices of pixels belonging to window in i'th layer in j'th window J: int number of layers (calculated if not provided as input) I: list of int number of windows per layer (calculated if not provided as input) """ origin = [origin[1], origin[0]] # Compute the distance transform of the main contour D_main = distance_transform_edt(-1 == c_main) # Compute the mask corresponding to the main contour mask_main = binary_fill_holes( -1 < c_main ) # Maybe not necessary? Can't we just use the segmented mask here? # Divide the radial coordinate into J layers with specified depth Dmax = np.amax(D_main * mask_main) if J is None: J = int(math.ceil(Dmax / depth)) b = np.linspace( 0, Dmax, J + 1 ) # Boundaries of the layers in terms of distances to the main contour if I is None: compute_num_win = True I = [] else: compute_num_win = False w = [] for j in range(J): w.append([]) # The mask containing the interior of the cell starting from # the j-th layer mask = (b[j] <= D_main) * mask_main # Extract the contour of the mask # We must fix certain frames where multiple contours are returned. # So we choose the longest contour. Some pixels may be lost in the process, # i.e., the windows may not cover the entire cell. clist = find_contours(mask, 0, fully_connected="high") cvec = np.asarray( clist[np.argmax([cel.shape[0] for cel in clist])], dtype=np.int ) # An alternative fix using OpenCV's findContours routine---doesn't solve the problem # contours, hierarchy = cv.findContours(np.asarray(mask, dtype=np.uint8), cv.RETR_LIST, cv.CHAIN_APPROX_NONE) # cvec = np.asarray(contours[np.argmax([cel.shape[0] for cel in contours])], dtype=np.int) # cvec = cvec.reshape((cvec.shape[0], cvec.shape[2])) # cvec = cvec[::-1, [1,0]] # Sort boundary pixels in clockwise direction and switch (x, y) coordinates # Lvec = compute_discrete_arc_length(cvec) # c = create_arc_length_image(mask.shape, cvec, Lvec) # plt.figure() # plt.imshow(c, 'gray', vmin=-Lvec[-1], vmax=Lvec[-1]) # plt.plot(origin[1], origin[0], 'or') # # plt.show() # Adjust the origin of the contour: # on the discrete contour cvec, find the closest point to the origin, # then apply a circular shift to cvec to make this point the first one. n0 = np.argmin(np.linalg.norm(cvec - origin, axis=1)) cvec = np.roll(cvec, -n0, axis=0) # Compute the discrete arc length along the contour Lvec = compute_discrete_arc_length(cvec) # Create an image of the contour where the intensity is the arc length arc = create_arc_length_image(mask.shape, cvec, Lvec) # Compute the feature transform of this image: # for each pixel position, we get the coordinates of the closest pixel on the contour F = distance_transform_edt( -1 == arc, return_distances=False, return_indices=True ) # Fill array with arc lengths of closest points on the contour # L = np.zeros(c.shape) # for u in range(c.shape[0]): # for v in range(c.shape[1]): # L[u, v] = c[F[0, u, v], F[1, u, v]] # gridx, gridy = np.meshgrid(range(c.shape[1]), range(c.shape[0])) # L = c[F[0,:,:][gridy, gridx], F[1,:,:][gridy, gridx]] L = arc[F[0, :, :], F[1, :, :]] # Create sampling windows for the j-th layer if compute_num_win: I.append(int(math.ceil(Lvec[-1] / width))) w_borders = np.linspace(0, Lvec[-1], I[j] + 1) for i in range(I[j]): # w[-1].append(np.where(mask & (s1[i] <= L) & (L < s1[i+1]) & (b[0] <= D) & (D < b[1]))) w[-1].append( np.where( mask & (w_borders[i] <= L) & (L < w_borders[i + 1]) & (b[j] <= D_main) & (D_main < b[j + 1]) ) ) # plt.figure() # plt.imshow(w[j][i]) # plt.show() # # Compute positions on the contour that will be used for the displacement estimation # if j == 0: # t = define_contour_positions(Lvec, I[0], cvec, c_main) return w, J, I
5,351,301
def extractsms(htmlsms) : """ extractsms -- extract SMS messages from BeautifulSoup tree of Google Voice SMS HTML. Output is a list of dictionaries, one per message. """ msgitems = [] # accum message items here # Extract all conversations by searching for a DIV with an ID at top level. tree = BeautifulSoup.BeautifulSoup(htmlsms) # parse HTML into tree conversations = tree.findAll("div",attrs={"id" : True},recursive=False) for conversation in conversations : # For each conversation, extract each row, which is one SMS message. rows = conversation.findAll(attrs={"class" : "gc-message-sms-row"}) for row in rows : # for all rows # For each row, which is one message, extract all the fields. msgitem = {"id" : conversation["id"]} # tag this message with conversation ID spans = row.findAll("span",attrs={"class" : True}, recursive=False) for span in spans : # for all spans in row cl = span["class"].replace('gc-message-sms-', '') msgitem[cl] = (" ".join(span.findAll(text=True))).strip() # put text in dict msgitems.append(msgitem) # add msg dictionary to list return msgitems
5,351,302
def load_ch_wubi_dict(dict_path=e2p.E2P_CH_WUBI_PATH): """Load Chinese to Wubi Dictionary. Parameters --------- dict_path : str the absolute path to chinese2wubi dictionary. In default, it's E2P_CH_WUBI_PATH. Returns ------- dict : Dictionary a mapping between Chinese to Wubi Code """ return load_dict(dict_path)
5,351,303
def middle_flow(middle_inputs: Tensor) -> Tensor: """ Middle flow Implements the second of the three broad parts of the model :param middle_inputs: middle_inputs: Tensor output generate by the Entry Flow, having shape [*, new_rows, new_cols, 728] :return: Output tensor of shape [*, new_rows, new_cols, 728] """ # Block 4 - Conv B (Green) middle_outputs = middle_inputs for _ in range(8): res = middle_outputs for _ in range(3): middle_outputs = separable_convolutional_unit(middle_outputs, 728) middle_outputs = Add()([res, middle_outputs]) return middle_outputs
5,351,304
async def test_button_failure( hass: HomeAssistant, load_int: ConfigEntry, monkeypatch: MonkeyPatch, get_data: SensiboData, ) -> None: """Test the Sensibo button fails.""" state_button = hass.states.get("button.hallway_reset_filter") with patch( "homeassistant.components.sensibo.util.SensiboClient.async_get_devices_data", return_value=get_data, ), patch( "homeassistant.components.sensibo.util.SensiboClient.async_reset_filter", return_value={"status": "failure"}, ): with raises(HomeAssistantError): await hass.services.async_call( BUTTON_DOMAIN, SERVICE_PRESS, { ATTR_ENTITY_ID: state_button.entity_id, }, blocking=True, )
5,351,305
def feature_extraction(sample_index, labels, baf, lrr, rawcopy_pred, data_shape, margin=10000, pad_val=-2): """ Extract features at sample index :param sample_index: sample index :param labels: break point labels :param baf: b-allele frequency values :param lrr: log r ratio values :param rawcopy_pred: rawcop predictions :param data_shape: shape of the data :param margin: margin to use :param pad_val: padding value for windows appearing on start or end of data sequence :return: """ window_size = margin * 4 if sample_index < margin * 2: running_idx = margin * 2 - sample_index running_idx2 = margin * 2 + sample_index if running_idx2 >= len(baf): running_idx2 = len(baf) - 1 ix = range(sample_index, sample_index + margin) baf_ix = range(0, running_idx2) baf_ = baf[baf_ix] baf = np.pad(baf_, (running_idx, 0), 'constant', constant_values=pad_val) lrr_ = lrr[baf_ix] lrr = np.pad(lrr_, (running_idx, 0), 'constant', constant_values=pad_val) elif sample_index + margin * 2 > data_shape[0]: running_idx = sample_index - margin * 2 ix = range(sample_index - margin, data_shape[0]) baf_ix = range(running_idx, data_shape[0]) baf_ = baf[baf_ix] baf = np.pad(baf_, (0, running_idx), 'constant', constant_values=pad_val) lrr_ = lrr[baf_ix] lrr = np.pad(lrr_, (0, running_idx), 'constant', constant_values=pad_val) else: ix = range(sample_index - margin, sample_index + margin) baf_ix = range(sample_index - margin * 2, sample_index + margin * 2) baf = baf[baf_ix] lrr = lrr[baf_ix] label = [] for l in labels[baf_ix]: if label == []: label.append(l) elif l != label[-1]: label.append(l) rc_pred = [] for l in rawcopy_pred[baf_ix]: if rc_pred == []: rc_pred.append(l) elif l != label[-1]: rc_pred.append(l) assert baf.shape[0] == window_size assert lrr.shape[0] == window_size feat = np.vstack((baf, lrr)) return feat, rc_pred, label, ix
5,351,306
def get_filename(file_fullpath): """ Returns the filename without the full path :param file_fullpath: :return: Returns the filename """ filename = file_fullpath.split("/")[-1].split(".")[0] return filename
5,351,307
def test_colocalization(col, row, alt): """ Test colocalization function using rpc """ data_folder = data_path() id_scene = "P1BP--2018122638935449CP" file_dimap = os.path.join(data_folder, f"rpc/PHRDIMAP_{id_scene}.XML") fctrat = RPC.from_dimap_v1(file_dimap) row_coloc, col_coloc, _ = coloc_rpc(fctrat, fctrat, row, col, alt) assert row == pytest.approx(row_coloc, abs=1e-1) assert col == pytest.approx(col_coloc, abs=1e-1)
5,351,308
def create_and_assign_household(humans_with_same_house, housetype, conf, city, allocated_humans): """ Creates a residence and allocates humans in `humans_with_same_house` to the same. Args: humans_with_same_house (list): a list of `Human` objects which are to be allocated to the same residence of type `type`. housetype (HouseType): type of allocation conf (dict): yaml configuration of the experiment city (covid19sim.location.City): simulator's city object allocated_humans (list): a list of humans that have been allocated a household Returns: allocated_humans (list): a list of humans that have been allocated a household """ assert all(human not in allocated_humans for human in humans_with_same_house), f"reassigning household to human" res = Household( env=city.env, rng=np.random.RandomState(city.rng.randint(2 ** 16)), conf=conf, name=f"HOUSEHOLD:{len(city.households)}", location_type="HOUSEHOLD", lat=city.rng.randint(*city.x_range), lon=city.rng.randint(*city.y_range), area=None, capacity=None, ) for human in humans_with_same_house: allocated_humans = _assign_household(human, res, allocated_humans) res.allocation_type = housetype city.households.add(res) return allocated_humans
5,351,309
def Rdf2Marc(**kwargs): """Runs rdf2marc on a BF Instance URL""" task_instance = kwargs["task_instance"] instance_uri = task_instance.xcom_pull(task_ids="sqs-sensor") instance_path = urlparse(instance_uri).path instance_id = path.split(instance_path)[-1] sinopia_env = kwargs.get("sinopia_env", "dev") rdf2marc_lambda = f"{getenv('RDF2MARC_LAMBDA')}_{sinopia_env.upper()}" s3_bucket = f"{getenv('MARC_S3_BUCKET')}_{sinopia_env.upper()}" s3_record_path = f"airflow/{instance_id}/record" marc_path = f"{s3_record_path}.mar" marc_text_path = f"{s3_record_path}.txt" marc_err_path = f"{s3_record_path}.err" lambda_hook = AwsLambdaHook( rdf2marc_lambda, log_type="None", qualifier="$LATEST", invocation_type="RequestResponse", config=None, aws_conn_id="aws_lambda_connection", ) params = { "instance_uri": instance_uri, "bucket": s3_bucket, "marc_path": marc_path, "marc_txt_path": marc_text_path, "error_path": marc_err_path, } result = lambda_hook.invoke_lambda(payload=json.dumps(params)) print(f"RESULT = {result['StatusCode']}") if result["StatusCode"] == 200: return instance_id logging.error( f"RDF2MARC conversion failed for {instance_uri}: {result['FunctionError']}" ) raise Exception()
5,351,310
def make_positions(tensor, padding_idx): """Replace non-padding symbols with their position numbers. Position numbers begin at padding_idx+1. Padding symbols are ignored. """ # The series of casts and type-conversions here are carefully # balanced to both work with ONNX export and XLA. In particular XLA # prefers ints, cumsum defaults to output longs, and ONNX doesn't know # how to handle the dtype kwarg in cumsum. mask = tensor.ne(padding_idx).int() return ( torch.cumsum(mask, dim=1).type_as(mask) * mask ).long() + padding_idx
5,351,311
def init(): """Top level command handler.""" @click.command() @click.option('--policy-servers', type=cli.LIST, required=True, help='Warpgate policy servers') @click.option('--service-principal', type=str, default='host', help='Warpgate service principal.') @click.option('--policy', type=str, required=True, envvar='WARPGATE_POLICY', help='Warpget policy to use') @click.option('--tun-dev', type=str, required=True, help='Device to use when establishing tunnels.') @click.option('--tun-addr', type=str, required=False, help='Local IP address to use when establishing tunnels.') def warpgate(policy_servers, service_principal, policy, tun_dev, tun_addr): """Run warpgate connection manager. """ _LOGGER.info( 'Launch client => %s, tunnel: %s[%s], policy: %s, principal: %s', policy_servers, tun_dev, tun_addr, policy, service_principal, ) # Never exits client.run_client( policy_servers, service_principal, policy, tun_dev, tun_addr ) return warpgate
5,351,312
def create_color_visualizer(renderer, file_name, scalar_range): """Create color visualizer""" # Initialize variables reader = vtkStructuredGridReader() mapper = vtkDataSetMapper() actor = vtkActor() # Set reader reader.SetFileName(file_name) # Set lookup table lookup_table = create_lookup_table(1000, (0.0, 1.0)) # Set mapper mapper.SetInputConnection(reader.GetOutputPort()) mapper.SetLookupTable(lookup_table) mapper.SetScalarRange(scalar_range) mapper.ScalarVisibilityOn() # Set actor actor.SetMapper(mapper) # Add actor to the window renderer renderer.AddActor(actor)
5,351,313
def load(file): """unpickle an object from a file""" pik = Unpickler(file) pik._main = _main_module obj = pik.load() if type(obj).__module__ == getattr(_main_module, '__name__', '__main__'): # point obj class to main try: obj.__class__ = getattr(pik._main, type(obj).__name__) except (AttributeError,TypeError): pass # defined in a file #_main_module.__dict__.update(obj.__dict__) #XXX: should update globals ? return obj
5,351,314
def verify_password(password: str, salt: str, key: str) -> bool: """ Verify the given password against the given salt and key. :param password: The password to check. :param salt: The salt to use. Should be encoded in ascii. :param key: The key to use. Should be encoded in ascii. :returns: True if given a valid password, False otherwise. """ LOGGER.debug("Verifying password.") new_key = hashlib.pbkdf2_hmac( 'sha256', password.encode('utf-8'), salt.encode('ascii'), 100000 ) return binascii.hexlify(new_key).decode() == key
5,351,315
def _post_patch_ecr(version, repo, account, region, filepath='dockerfiles'): """ routine to build the docker image and push it to ECR """ if not os.path.isdir(repo + '/' + filepath): return print('Processing docker image...') account_region = account + '.dkr.ecr.' + region + '.amazonaws.com' # generic bash commands to be modified to correct version and account information for fn in os.listdir(repo + '/' + filepath): print(' processing docker %s' % fn) tag = account_region + '/' + fn + ':' + version path = repo + '/' + filepath + '/' + fn image = """ aws ecr get-login-password --region REGION | docker login --username AWS --password-stdin ACCOUNT_REGION docker build -t TAG PATH --no-cache docker push TAG """ # note that we are ALWAYS doing no-cache builds so that we can get updated base images whenever applicable cmd = image.replace('ACCOUNT_REGION', account_region).replace('REGION', region).replace('TAG', tag).replace('PATH', path) subprocess.check_call(cmd, shell=True)
5,351,316
def setup(app): """Set up the Sphinx extension.""" app.add_config_value( name="doctr_versions_menu_conf", default={}, rebuild="html", ) app.connect('builder-inited', ext.add_versions_menu_js_file) app.connect('build-finished', ext.cleanup) return { "version": __version__, "parallel_read_safe": True, "parallel_write_safe": True, }
5,351,317
def head(file): """Returns the first/head line of the file""" first = '' if os.path.isfile(file): with open_file_read(file) as f_in: try: first = f_in.readline().rstrip() except UnicodeDecodeError: pass return first else: raise AppArmorException(_('Unable to read first line from %s: File Not Found') % file)
5,351,318
def get_help_recursive(group, ctx, commands): """ Returns help for arbitrarily nested subcommands of the given click.Group. """ try: command_name = commands.pop(0) group = group.get_command(ctx, command_name) if not group: raise click.ClickException('Invalid command: {}'.format(command_name)) except IndexError: # end of subcommand chain return group.get_help(ctx) except AttributeError: # group is actually a command with no children return group.get_help(ctx) return get_help_recursive(group, ctx, commands)
5,351,319
def clump_tracker(fprefix, param=None, directory=None, nsmooth=32, verbose=True): """ Finds and tracks clumps over a simulation with multiple time steps and calculates various physical properties of the clumps. Runs all the steps necessary to find/track clumps, these are: get_fnames pFind_clumps pClump_properties pLink2 multilink build_clumps If the iord property is not found, the linking will only work if the number of particles remains constant through the simulation **ARGUMENTS** fprefix : str Prefix of the simulation outputs param : str (recommended) Filename of a .param file for the simulation directory : str (optional) Directory to search through. Default is current working directory nsmooth : int (optional) Number of nearest neighbors used for particle smoothing in the simulation. This is used in the definition of a density threshold for clump finding. verbose : bool (optional) Verbosity flag. Default is True **RETURNS** clump_list : list A list containing dictionaries for all clumps foujohn obryan fiddlend in the simulation See clump_properties for a list of the properties calculated for clumps """ # Get a list of all snapshot files fnames = get_fnames(fprefix, directory) nfiles = len(fnames) # Run the clump (halo) finder if verbose: print "\n\nRunning clump finder on {} files\n\n".format(nfiles) clumpnum_list = pFind_clumps(fnames, nsmooth, param, verbose=verbose) nclumps = np.zeros(nfiles, dtype=int) for i, clumpnums in enumerate(clumpnum_list): nclumps[i] = clumpnums.max() if nclumps.max() <= 0: if verbose: print 'No clumps found' return [] # Calculate the physical properties of the clumps if verbose: print "\n\nCalculating the physical of properties of clumps\n\n" properties = pClump_properties(fnames, clumpnum_list) # Link clumps on consecutive time-steps if verbose: print "\n\nLinking Clumps\n\n" link_list = pLink2(properties) # Link on multiple time-steps multilink_list = multilink(link_list) # Build the clumps clump_list = build_clumps(multilink_list, properties, fnames, param) return clump_list
5,351,320
def OptimizeGraph(config_proto, metagraph, verbose=True, graph_id=b'graph_to_optimize', cluster=None, strip_default_attributes=False): """Optimize the provided metagraph. For best results, the signature_def field in `metagraph` should be populated with information about input (feed) and output (fetch) tensors. Args: config_proto: a ConfigProto protobuf. metagraph: a MetagraphDef protobuf. verbose: whether to log optimization results. graph_id: a string identifying this graph. cluster: a grappler cluster object representing hardware resources available to run this graph. strip_default_attributes: whether graph node attributes having default values should be removed after all the optimization passes. This option is useful if the resulting graph will be executed by an older process that might not know some of the recently added attributes. """ if not isinstance(config_proto, config_pb2.ConfigProto): raise TypeError('Argument `config_proto` should be a tf.ConfigProto, ' f'received type: {type(config_proto).__name__}') if cluster is not None: out_graph = tf_opt.TF_OptimizeGraph(cluster.tf_cluster, config_proto.SerializeToString(), metagraph.SerializeToString(), verbose, graph_id, strip_default_attributes) else: # Currently Grappler assumes no more than 1 sessions alive globally. # See comments on SingleMachine::Provision(), hence we use the following # lock to prevent concurrent access to the following code. with _OPTIMIZE_GRAPH_CLUSTER_LOCK: cluster = gcluster.Cluster() try: out_graph = tf_opt.TF_OptimizeGraph(cluster.tf_cluster, config_proto.SerializeToString(), metagraph.SerializeToString(), verbose, graph_id, strip_default_attributes) finally: # Force the cleanup instead of waiting on python GC to cleanup the # temporary cluster we've created. Otherwise subsequent calls might # not have a clean slate because GC may not have run yet. cluster.Shutdown() return graph_pb2.GraphDef().FromString(out_graph)
5,351,321
def files_by_date(): """TODO --- responses: '200': description: TODO """ return redirect("https://explorer.ooni.org/search", 301)
5,351,322
def parse_imei(msg): """Parse an IMEI (in BCD format) into ASCII format.""" imei = '' for octet in msg[1:]: imei += imei_parse_nibble(ord(octet) & 0x0f) imei += imei_parse_nibble(ord(octet) >> 4) return imei
5,351,323
def get_blender_frame_time(skeleton, frame_id, rate, time_scale, actor_id): """Goes from multi-actor integer frame_id to modded blender float time.""" # stays within video frame limits frame_id2 = skeleton.mod_frame_id(frame_id=frame_id) # type: int time_ = skeleton.get_time(frame_id) if actor_id > 0: time_ = frame_id2 / rate print('time is {} for {} ({}), orig time: {}, rate: {}, ' 'time_scale: {}' .format(time_, frame_id, frame_id2, skeleton.get_time(frame_id), rate, time_scale)) frame_time = time_ * time_scale return frame_time
5,351,324
def parse_metar(metar_text, year, month, station_metadata=station_info): """Parse a METAR report in text form into a list of named tuples. Parameters ---------- metar_text : str The METAR report station_metadata : dict Mapping of station identifiers to station metadata year : int Reported year of observation for constructing 'date_time' month : int Reported month of observation for constructing 'date_time' Returns ------- metar : namedtuple Named tuple of parsed METAR fields Notes ----- Returned data has named tuples with the following attributes: * 'station_id': Station Identifier (ex. KLOT) * 'latitude': Latitude of the observation, measured in degrees * 'longitude': Longitude of the observation, measured in degrees * 'elevation': Elevation of the observation above sea level, measured in meters * 'date_time': Date and time of the observation, datetime object * 'wind_direction': Direction the wind is coming from, measured in degrees * 'wind_speed': Wind speed, measured in knots * 'wind_gust': Wind gust, measured in knots * 'current_wx1': Current weather (1 of 3) * 'current_wx2': Current weather (2 of 3) * 'current_wx3': Current weather (3 of 3) * 'skyc1': Sky cover (ex. FEW) * 'skylev1': Height of sky cover 1, measured in feet * 'skyc2': Sky cover (ex. OVC) * 'skylev2': Height of sky cover 2, measured in feet * 'skyc3': Sky cover (ex. FEW) * 'skylev3': Height of sky cover 3, measured in feet * 'skyc4': Sky cover (ex. CLR) * 'skylev4:': Height of sky cover 4, measured in feet * 'cloudcover': Cloud coverage measured in oktas, taken from maximum of sky cover values * 'temperature': Temperature, measured in degrees Celsius * 'dewpoint': Dewpoint, measured in degrees Celsius * 'altimeter': Altimeter value, measured in inches of mercury * 'current_wx1_symbol': Current weather symbol (1 of 3), WMO integer code from [WMO306]_ Attachment IV * 'current_wx2_symbol': Current weather symbol (2 of 3), WMO integer code from [WMO306]_ Attachment IV * 'current_wx3_symbol': Current weather symbol (3 of 3), WMO integer code from [WMO306]_ Attachment IV * 'visibility': Visibility distance, measured in meters * 'remarks': Remarks (unparsed) in the report """ from ..plots.wx_symbols import wx_code_to_numeric # Decode the data using the parser (built using Canopy) the parser utilizes a grammar # file which follows the format structure dictated by the WMO Handbook, but has the # flexibility to decode the METAR text when there are missing or incorrectly # encoded values tree = parse(metar_text) # Station ID which is used to find the latitude, longitude, and elevation station_id = tree.siteid.text.strip() # Extract the latitude and longitude values from 'master' dictionary try: info = station_metadata[station_id] lat = info.latitude lon = info.longitude elev = info.altitude except KeyError: lat = np.nan lon = np.nan elev = np.nan # Set the datetime, day, and time_utc try: day_time_utc = tree.datetime.text.strip() day = int(day_time_utc[0:2]) hour = int(day_time_utc[2:4]) minute = int(day_time_utc[4:6]) date_time = datetime(year, month, day, hour, minute) except ValueError: date_time = np.nan # Set the wind values wind_units = 'kts' try: # If there are missing wind values, set wind speed and wind direction to nan if ('/' in tree.wind.text) or (tree.wind.text == 'KT') or (tree.wind.text == ''): wind_dir = np.nan wind_spd = np.nan # If the wind direction is variable, set wind direction to nan but keep the wind speed else: wind_spd = float(tree.wind.wind_spd.text) if 'MPS' in tree.wind.text: wind_units = 'm/s' wind_spd = units.Quantity(wind_spd, wind_units).m_as('knots') if (tree.wind.wind_dir.text == 'VRB') or (tree.wind.wind_dir.text == 'VAR'): wind_dir = np.nan else: wind_dir = int(tree.wind.wind_dir.text) # If there are any errors, return nan except ValueError: wind_dir = np.nan wind_spd = np.nan # Parse out the wind gust field if 'G' in tree.wind.text: wind_gust = units.Quantity(float(tree.wind.gust.text.strip()[1:]), wind_units).m_as('knots') else: wind_gust = np.nan # Handle visibility try: if tree.vis.text.endswith('SM'): visibility = 0 # Strip off the SM and any whitespace around the value and any leading 'M' vis_str = tree.vis.text[:-2].strip().lstrip('M') # Case of e.g. 1 1/4SM if ' ' in vis_str: whole, vis_str = vis_str.split(maxsplit=1) visibility += int(whole) # Handle fraction regardless if '/' in vis_str: num, denom = vis_str.split('/', maxsplit=1) visibility += int(num) / int(denom) else: # Should be getting all cases of whole number without fraction visibility += int(vis_str) visibility = units.Quantity(visibility, 'miles').m_as('meter') # CAVOK means vis is "at least 10km" and no significant clouds or weather elif 'CAVOK' in tree.vis.text: visibility = 10000 elif not tree.vis.text or tree.vis.text.strip() == '////': visibility = np.nan else: # Only worry about the first 4 characters (digits) and ignore possible 'NDV' visibility = int(tree.vis.text.strip()[:4]) # If there are any errors, return nan except ValueError: visibility = np.nan # Set the weather symbols # If the weather symbol is missing, set values to nan current_wx = [] current_wx_symbol = [] if tree.curwx.text.strip() not in ('', '//', 'NSW'): current_wx = tree.curwx.text.strip().split() # Handle having e.g. '+' and 'TSRA' parsed into separate items if current_wx[0] in ('-', '+') and current_wx[1]: current_wx[0] += current_wx[1] current_wx.pop(1) current_wx_symbol = wx_code_to_numeric(current_wx).tolist() while len(current_wx) < 3: current_wx.append(np.nan) while len(current_wx_symbol) < 3: current_wx_symbol.append(0) # Set the sky conditions skyc = [np.nan] * 4 skylev = [np.nan] * 4 if tree.skyc.text[1:3] == 'VV': skyc[0] = 'VV' level = tree.skyc.text.strip()[2:5] skylev[0] = np.nan if '/' in level else 100 * int(level) else: for ind, part in enumerate(tree.skyc.text.strip().split(maxsplit=3)): cover = part[:3] level = part[3:6] # Strips off any ending text like in FEW017CB if '/' not in cover: skyc[ind] = cover if level and '/' not in level: with contextlib.suppress(ValueError): skylev[ind] = float(level) * 100 # Set the cloud cover variable (measured in oktas) if 'OVC' in tree.skyc.text or 'VV' in tree.skyc.text: cloudcover = 8 elif 'BKN' in tree.skyc.text: cloudcover = 6 elif 'SCT' in tree.skyc.text: cloudcover = 4 elif 'FEW' in tree.skyc.text: cloudcover = 2 elif ('SKC' in tree.skyc.text or 'NCD' in tree.skyc.text or 'NSC' in tree.skyc.text or 'CLR' in tree.skyc.text or 'CAVOK' in tree.vis.text): cloudcover = 0 else: cloudcover = 10 # Set the temperature and dewpoint temp = np.nan dewp = np.nan if tree.temp_dewp.text and tree.temp_dewp.text != ' MM/MM': with contextlib.suppress(ValueError): temp = float(tree.temp_dewp.temp.text[-2:]) if 'M' in tree.temp_dewp.temp.text: temp *= -1 with contextlib.suppress(ValueError): dewp = float(tree.temp_dewp.dewp.text[-2:]) if 'M' in tree.temp_dewp.dewp.text: dewp *= -1 # Set the altimeter value and sea level pressure if tree.altim.text: val = float(tree.altim.text.strip()[1:5]) altim = val / 100 if val > 1100 else units.Quantity(val, 'hPa').m_as('inHg') else: altim = np.nan # Strip off extraneous stuff off the remarks section remarks = tree.remarks.text.lstrip().rstrip('= ') if remarks.startswith('RMK'): remarks = remarks[3:].strip() # Returns a named tuple with all the relevant variables return Metar(station_id, lat, lon, elev, date_time, wind_dir, wind_spd, wind_gust, visibility, current_wx[0], current_wx[1], current_wx[2], skyc[0], skylev[0], skyc[1], skylev[1], skyc[2], skylev[2], skyc[3], skylev[3], cloudcover, temp, dewp, altim, current_wx_symbol[0], current_wx_symbol[1], current_wx_symbol[2], remarks)
5,351,325
def upload_ignition_files_to_s3(local_folder, s3_bucket, session: SessionProxy): """ Push Ignition files up to S3 :param session: Boto SessionProxy :param local_folder: The folder to upload :param s3_bucket: Name of the S3 Bucket :return None: """ files_to_upload = ['auth/kubeconfig', 'auth/kubeadmin-password', 'master.ign', 'worker.ign', 'bootstrap.ign'] for file in files_to_upload: s3_path = os.path.join(os.path.basename(local_folder), file) local_path = os.path.join(local_folder, file) upload_file_to_s3(s3_path, local_path, s3_bucket, session)
5,351,326
def update_facemap_material(self, context): """ Assign the updated material to all faces belonging to active facemap """ set_material_for_active_facemap(self.material, context) return None
5,351,327
def calculate_accuracy(y_true, y_pred): """Calculates the accuracy of the model. Arguments: y_true {numpy.array} -- the true labels corresponding to each input y_pred {numpy.array} -- the model's predictions Returns: accuracy {str} -- the accuracy of the model (%) """ correctpred, total = 0, 0 for index in range(len(y_pred)): if(y_pred[index] == y_true[index]): correctpred = correctpred + 1 total = total+1 return 'accuracy='+str((correctpred*100)/total)
5,351,328
def resolve_diff_args(args): """Resolve ambiguity of path vs base/remote for git: Cases: - No args: Use defaults - One arg: Either base or path, check with is_gitref. - Two args or more: Check if first two are base/remote by is_gitref """ base = args.base remote = args.remote paths = getattr(args, 'paths', None) if not paths: paths = None if remote is None and paths is None: # One arg only: if not is_gitref(base): paths = base base = 'HEAD' # Two or more args: elif paths is None: # Two exactly # - Two files (not git-mode, do nothing) # - Base gitref one file (remote=None, path = file) # - Base gitref remote gitref (do nothing) if is_gitref(base) and not is_gitref(remote): paths = remote remote = None elif base and remote: # Three or more if not is_gitref(base): paths = [base, remote] + paths base = remote = None elif is_gitref(base) and not is_gitref(remote): paths = [remote] + paths remote = None return base, remote, paths
5,351,329
def get_coco_metrics_from_gt_and_det(groundtruth_dict, detection_boxes_list, category=''): """ Get COCO metrics given dictionary of groundtruth dictionary and the list of detections. """ coco_wrapped_groundtruth = coco_tools.COCOWrapper(groundtruth_dict) coco_wrapped_detections = coco_wrapped_groundtruth.LoadAnnotations(detection_boxes_list) box_evaluator = coco_tools.COCOEvalWrapper(coco_wrapped_groundtruth, coco_wrapped_detections, agnostic_mode=False) box_metrics, box_per_category_ap = box_evaluator.ComputeMetrics( include_metrics_per_category=False, all_metrics_per_category=False, super_categories=None ) box_metrics.update(box_per_category_ap) box_metrics = {'DetectionBoxes_'+ category + key: value for key, value in iter(box_metrics.items())} return box_metrics
5,351,330
def test_notify_emby_plugin_notify(mock_post, mock_get, mock_logout, mock_login, mock_sessions): """ API: NotifyEmby.notify() """ # Disable Throttling to speed testing plugins.NotifyBase.request_rate_per_sec = 0 req = requests.Request() req.status_code = requests.codes.ok req.content = '' mock_get.return_value = req mock_post.return_value = req # This is done so we don't obstruct our access_token and user_id values mock_login.return_value = True mock_logout.return_value = True mock_sessions.return_value = {'abcd': {}} obj = Apprise.instantiate('emby://l2g:l2gpass@localhost?modal=False') assert isinstance(obj, plugins.NotifyEmby) assert obj.notify('title', 'body', 'info') is True obj.access_token = 'abc' obj.user_id = '123' # Test Modal support obj = Apprise.instantiate('emby://l2g:l2gpass@localhost?modal=True') assert isinstance(obj, plugins.NotifyEmby) assert obj.notify('title', 'body', 'info') is True obj.access_token = 'abc' obj.user_id = '123' # Test our exception handling for _exception in REQUEST_EXCEPTIONS: mock_post.side_effect = _exception mock_get.side_effect = _exception # We'll fail to log in each time assert obj.notify('title', 'body', 'info') is False # Disable Exceptions mock_post.side_effect = None mock_get.side_effect = None # Our login flat out fails if we don't have proper parseable content mock_post.return_value.content = u'' mock_get.return_value.content = mock_post.return_value.content # KeyError handling mock_post.return_value.status_code = 999 mock_get.return_value.status_code = 999 assert obj.notify('title', 'body', 'info') is False # General Internal Server Error mock_post.return_value.status_code = requests.codes.internal_server_error mock_get.return_value.status_code = requests.codes.internal_server_error assert obj.notify('title', 'body', 'info') is False mock_post.return_value.status_code = requests.codes.ok mock_get.return_value.status_code = requests.codes.ok mock_get.return_value.content = mock_post.return_value.content # Disable the port completely obj.port = None assert obj.notify('title', 'body', 'info') is True # An Empty return set (no query is made, but notification will still # succeed mock_sessions.return_value = {} assert obj.notify('title', 'body', 'info') is True # Tidy our object del obj
5,351,331
def createDataset(dataPath,dStr,sigScale=1): """ dStr from ["20K", "1M", "10M"] """ print("Loading D1B dataset...") ft1_d = loadD1B(dataPath,dStr,w=40) if dStr=="20K": ft1_d = ft1_d[:10000,:] print("Running PCA on D1B") pcaD1B = PCA(n_components=ft1_d.shape[1],random_state=0) ft1_d = pcaD1B.fit_transform(ft1_d) print("Loading FAS dataset") ft1_f, ft2_f, gt_f, pos1_f, pos2_f = loadFAS(dataPath) if dStr=="20K": ft1_f = ft1_f[:10000,:] ft2_f = ft2_f[:10000,:] print("Running PCA on FAS") pcaFAS = PCA(n_components=ft1_d.shape[1],random_state=0) ft1_f = pcaFAS.fit_transform(ft1_f) ft2_f = pcaFAS.transform(ft2_f) print("Re-scaling Variance of D1B using FAS data") ft1_d = np.std(ft1_f,axis=0)*ft1_d/np.std(ft1_d,axis=0) print("Computing a new version of D1B to be used as a query traverse") ftDiff = calcChange(dataPath) noiseVar = np.var(ftDiff,axis=0) noiseMean = np.mean(ftDiff,axis=0) print("\t Incorporating the 'change' from FAS along with some noise") ft1_n = addNoiseToFt(ft1_d,noiseMean,noiseVar,sigScale) print("Concatenating the two datasets") ft1 = np.concatenate([ft1_d,ft1_f],axis=0) ft2 = np.concatenate([ft1_n,ft2_f],axis=0) del ft1_d, ft1_n, ft1_f, ft2_f return ft1, ft2
5,351,332
def generate_interblock_leader(): """Generates the leader between normal blocks""" return b'\x55' * 0x2
5,351,333
def discover_handlers(entrypoint_group_name="databroker.handlers", skip_failures=True): """ Discover handlers via entrypoints. Parameters ---------- entrypoint_group_name: str Default is 'databroker.handlers', the "official" databroker entrypoint for handlers. skip_failures: boolean True by default. Errors loading a handler class are converted to warnings if this is True. Returns ------- handler_registry: dict A suitable default handler registry """ group = entrypoints.get_group_named(entrypoint_group_name) group_all = entrypoints.get_group_all(entrypoint_group_name) if len(group_all) != len(group): # There are some name collisions. Let's go digging for them. for name, matches in itertools.groupby(group_all, lambda ep: ep.name): matches = list(matches) if len(matches) != 1: winner = group[name] warnings.warn( f"There are {len(matches)} entrypoints for the " f"databroker handler spec {name!r}. " f"They are {matches}. The match {winner} has won the race." ) handler_registry = {} for name, entrypoint in group.items(): try: handler_class = entrypoint.load() except Exception as exc: if skip_failures: warnings.warn( f"Skipping {entrypoint!r} which failed to load. " f"Exception: {exc!r}" ) continue else: raise handler_registry[name] = handler_class return handler_registry
5,351,334
def create_highway_layer(highway_type, num_layer, unit_dim, window_size, activation, dropout, num_gpus, default_gpu_id, regularizer, random_seed, trainable): """create highway layer""" scope = "highway/{0}".format(highway_type) if highway_type == "highway": highway_layer = StackedHighway(num_layer=num_layer, unit_dim=unit_dim, activation=activation, dropout=dropout, num_gpus=num_gpus, default_gpu_id=default_gpu_id, regularizer=regularizer, random_seed=random_seed, trainable=trainable) elif highway_type == "conv_highway": highway_layer = StackedHighway(num_layer=num_layer, num_filter=unit_dim, window_size=window_size, activation=activation, dropout=dropout, num_gpus=num_gpus, default_gpu_id=default_gpu_id, regularizer=regularizer, random_seed=random_seed, trainable=trainable) else: raise ValueError("unsupported dense type {0}".format(highway_type)) return highway_layer
5,351,335
def compute_metrics(y_true, y_predicted, y_prob = None): """compute metrics for the prredicted labels against ground truth @args: y_true: the ground truth label y_predicted: the predicted label y_predicted_prob: probability of the predicted label @returns: various metrics: F1-score, AUC of ROC, brier-score, also plots AUC """ # plot AUC if y_prob: fpr, tpr, _ = roc_curve(y_true, y_prob) auc = roc_auc_score(y_true, y_prob) plt.plot(fpr, tpr, label="data 1, auc=" + str(auc)) plt.legend(loc=4) plt.show() # brier = brier_score_loss((y_true, y_prob)) # F1 score and brier score f1 = f1_score(y_true, y_predicted) # classification report plot_classification_report(classification_report(y_true, y_predicted)) return f1
5,351,336
def session_store(decoy: Decoy) -> SessionStore: """Get a mock SessionStore interface.""" return decoy.mock(cls=SessionStore)
5,351,337
def test_point_geometry_pass(pt_geo_plot, pd_gdf): """Check that the point geometry test recognizes correct points.""" pt_geo_plot.assert_points(points_expected=pd_gdf) plt.close("all")
5,351,338
def get_json_signed(asn_metadata): """ Given an ASN.1 object conforming to the new ASN.1 metadata definitions derived from Snapshot*.asn1, return a Python dictionary containing the same information, conformant to TUF's standard data specification for Snapshot metadata (tuf.formats.SNAPSHOT_SCHEMA). TUF internally does not use the ASN.1, converting it in and out of the standard Python dictionary formats defined in tuf.formats. """ pydict_signed = {} # TODO: Normalize this function's interface: the asn_metadata given is # actually both 'signed' and 'signatures', which is strange since the # get_asn_signed function takes only the contents of the 'signed' entry, and # this function only returns the contents of a corresponding 'signed' entry. # (It is confusingly inconsistent to take the full object, return a converted # partial object, and have parallel naming and placement with a function that # takes and returns a partial object.) # This change has to percolate across all modules, however. asn_signed = asn_metadata['signed'] # This should be the argument instead of asn_metadata. # Should check this from the ASN, but... the ASN definitions don't actually # USE a type, so I'm entirely basing the type encoded on the filename. This # is bad, I think. Could it be a security issue to not sign the metadata type # in there? The metadata types are pretty distinct, but... it's still best to # fix this at some point. pydict_signed['_type'] = 'Snapshot' pydict_signed['expires'] = datetime.utcfromtimestamp( asn_signed['expires']).isoformat()+'Z' pydict_signed['version'] = int(asn_signed['version']) # Next, extract the fileinfo for each role file described in the ASN.1 # Snapshot metadata. snapshot_metadata = asn_signed['body']['snapshotMetadata'] number_of_target_role_files = int( snapshot_metadata['numberOfTargetRoleFiles']) asn_target_fileinfos = snapshot_metadata['targetRoleFileInfos'] pydict_fileinfos = {} # Copy the Targets and delegated roles fileinfos: for i in range(number_of_target_role_files): asn_role_fileinfo = asn_target_fileinfos[i] filename = str(asn_role_fileinfo['filename']) pydict_fileinfos[filename] = {'version': int(asn_role_fileinfo['version'])} # Add in the Root role fileinfo: # In the Python dictionary format for Snapshot metadata, these all exist in # one dictionary. filename = str(snapshot_metadata['rootRoleFileInfo']['filename']) version = int(snapshot_metadata['rootRoleFileInfo']['version']) length = int(snapshot_metadata['rootRoleFileInfo']['length']) if filename in pydict_fileinfos: raise tuf.Error('ASN1 Conversion failure for Snapshot role: duplicate ' 'fileinfo entries detected: filename ' + str(filename) + ' identified ' 'both as Root role and Targets role in Snapshot metadata.') # Populate the hashes in the fileinfo describing the Root role. hashes = {} for i in range(snapshot_metadata['rootRoleFileInfo']['numberOfHashes']): asn_hash_info = snapshot_metadata['rootRoleFileInfo']['hashes'][i] # This is how we'd extract the name of the hash function from the # enumeration (namedValues) that is in the class (HashFunction), indexed by # the underlying "value" of asn_hash_info. The [0] at the end selects # the string description from a 2-tuple of e.g. ('sha256', 1), where 1 is # the value in the enum. # TODO: Should probably make this its own function. The following should # work: # def translate_pyasn_enum_to_value(asn_enum_value): # return asn_enum_value.namedValues[asn_enum_value][0] # hashtype = asn_hash_info['function'].namedValues[asn_hash_info['function']] hashval = hex_from_octetstring(asn_hash_info['digest']) hashes[hashtype] = hashval # Finally, add all the information gathered about the Root role. pydict_fileinfos[filename] = { 'version': version, 'length': length, 'hashes': hashes} pydict_signed['meta'] = pydict_fileinfos return pydict_signed
5,351,339
def extract_values(obj: Dict[str, Any], key: str, val: Any) -> List[Dict[str, Any]]: """ Pull all values of specified key from nested JSON. Args: obj (dict): Dictionary to be searched key (str): tuple of key and value. value (any): value, which can be any type Returns: list of matched key-value pairs """ return [elem for elem in extract(obj, key, val)]
5,351,340
def NameSignals(locals): """Search locals and name any signal by its key. N.B. This intended to be called by client code to name signals in the local scope of a function during module elaboration. """ for name in locals: if issubclass(type(locals[name]), SignalFrontend): locals[name].signal.meta.name = name if type(locals[name]) is InstanceOperator: locals[name].name = name
5,351,341
def createMeshPatches(ax, mesh, rasterized=False, verbose=True): """Utility function to create 2d mesh patches within a given ax.""" if not mesh: pg.error("drawMeshBoundaries(ax, mesh): invalid mesh:", mesh) return if mesh.nodeCount() < 2: pg.error("drawMeshBoundaries(ax, mesh): to few nodes:", mesh) return pg.tic() polys = [_createCellPolygon(c) for c in mesh.cells()] patches = mpl.collections.PolyCollection(polys, picker=True, rasterized=rasterized) if verbose: pg.info("Creation of mesh patches took = ", pg.toc()) return patches
5,351,342
def get_url_name(url_): """从url_中获取名字""" raw_res = url_.split('/', -1)[-1] raw_res = raw_res.split('.', 1)[0] res = raw_res[-15:] return res
5,351,343
def check_uuid_in_db(uuid_to_validate, uuid_type): """ A helper function to validate whether a UUID exists within our db. """ uuid_in_db = None if uuid_type.name == "SESSION": uuid_in_db = Sessions.query.filter_by(session_uuid=uuid_to_validate).first() elif uuid_type.name == "QUIZ": uuid_in_db = Scores.query.filter_by(quiz_uuid=uuid_to_validate).first() elif uuid_type.name == "USER": uuid_in_db = Users.query.filter_by(user_uuid=uuid_to_validate).first() if not uuid_in_db: raise DatabaseError(message=f"{uuid_type.name}_UUID is not in the db.") return uuid_in_db
5,351,344
def main(database, symbol, mode): """ :param database: :param symbol: :param mode: :return: """ symbol = symbol.upper() data = { 'financial_performance': { 'balance_sheets': { 'yearly': [ ]}, 'cash_flows': { 'yearly': [ ]}, 'income_statements': { 'yearly': [ ]}}, 'symbol': symbol} codes = [ 'INTANGIBLES', 'INVENTORY', 'ASSETS', 'ASSETSC', 'LIABILITIESC', 'LIABILITIES', 'EQUITY', 'CAPEX', 'DEPAMOR', 'NCFX', 'NCF', 'NCFF', 'NETINC', 'NCFI', 'NCFO', 'GP', 'TAXEXP', 'INTEXP', 'NETINCCMN', 'RND', 'SGNA', 'REVENUE', 'EBIT', 'RETEARN', 'PAYABLES', ] needed_quandl_codes = {} for code in codes: needed_quandl_codes[code] = '{d}/{s}_{c}_{m}'.format(d=database, s=symbol, c=code, m=mode) balance_sheet = { 'intengibles': needed_quandl_codes['INTANGIBLES'], 'inventory': needed_quandl_codes['INVENTORY'], 'total_current_assets': needed_quandl_codes['ASSETSC'], 'total_assets': needed_quandl_codes['ASSETS'], 'payables': needed_quandl_codes['PAYABLES'], 'total_current_liabilities': needed_quandl_codes['LIABILITIESC'], 'total_liabilities': needed_quandl_codes['LIABILITIES'], 'retained_earnings': needed_quandl_codes['RETEARN'], 'total_stockholder_equity': needed_quandl_codes['EQUITY'], 'total_liabilities_and_equity': needed_quandl_codes['LIABILITIES']+needed_quandl_codes['EQUITY'], 'year': 2016 } cash_flow = { 'capital_expenditures':needed_quandl_codes['CAPEX'], 'date_released': '2016-01-01', 'depreciation': needed_quandl_codes['DEPAMOR'], 'exchange_rate_effect': needed_quandl_codes['NCFX'], 'net_cash_flow': needed_quandl_codes['NCF'], 'net_income': needed_quandl_codes['NETINC'], 'net_investing_cash_flow': needed_quandl_codes['NCFI'], 'net_operating_cash_flow': needed_quandl_codes['NCFO'], 'net_financing_cash_flow': needed_quandl_codes['NCFF'], 'year': 2016} income_statement = { 'date_released': '2016-01-01', 'ebit': needed_quandl_codes['EBIT'], 'gross_profit': needed_quandl_codes['GP'], 'income_tax': needed_quandl_codes['TAXEXP'], 'interest_expense': needed_quandl_codes['INTEXP'], 'net_income': needed_quandl_codes['NETINC'], 'net_income_to_common_shares': needed_quandl_codes['NETINCCMN'], 'research_and_development': needed_quandl_codes['RND'], 'sales_general_and_admin': needed_quandl_codes['SGNA'], 'total_revenue': needed_quandl_codes['REVENUE'], 'year': 2016} values_from_quandl_file = os.path.join(os.path.dirname(__file__), '..', 'data', '{symbol}_quandl.json'.format( symbol=symbol)) # Lets check if the symbol data file exists otherwise call the quandl api if not os.path.isfile(values_from_quandl_file): # print(list(needed_quandl_codes.values())) response = quandl.get(list(needed_quandl_codes.values())).to_dict() quandl_data = {} for key, value in response.items(): timeseries = {} for timestamp, numeric_value in value.items(): time_key = '{}-{}-{}'.format(timestamp.day, timestamp.month, timestamp.year) timeseries[time_key] = numeric_value quandl_data[key] = timeseries with open(values_from_quandl_file, 'w') as file_: file_.write(json.dumps(quandl_data)) else: with open(values_from_quandl_file, 'r') as file_: quandl_data = json.loads(file_.read()) balance_sheets = [] income_statements = [] cash_flows = [] key, value = quandl_data.popitem() years = [v for v in value.keys()] quandl_data = flatten(quandl_data) for year in years: parsed_balance_sheet = {} parsed_income_statement = {} parsed_cash_flow = {} parsed_balance_sheet['year'] = year.split('-')[-1] parsed_income_statement['year'] = year.split('-')[-1] parsed_cash_flow['year'] = year.split('-')[-1] for element, value in balance_sheet.items(): if element == 'date_released': parsed_balance_sheet['date_released'] = year quandl_code = '{value}.{date_released}'.format(value=value, date_released=year) if quandl_code in quandl_data.keys(): parsed_balance_sheet[element] = quandl_data[quandl_code] else: if element != 'year' and element != 'date_released': parsed_balance_sheet[element] = 0.0 balance_sheets.append(parsed_balance_sheet) for element, value in income_statement.items(): if element == 'date_released': parsed_income_statement['date_released'] = year quandl_code = '{value}.{date_released}'.format(value=value, date_released=year) if quandl_code in quandl_data.keys(): parsed_income_statement[element] = quandl_data[quandl_code] else: if element != 'year' and element != 'date_released': parsed_income_statement[element] = 0.0 income_statements.append(parsed_income_statement) for element, value in cash_flow.items(): if element == 'date_released': parsed_cash_flow['date_released'] = year quandl_code = '{value}.{date_released}'.format(value=value, date_released=year) if quandl_code in quandl_data.keys(): parsed_cash_flow[element] = quandl_data[quandl_code] else: if element != 'year' and element != 'date_released': parsed_cash_flow[element] = 0.0 cash_flows.append(parsed_cash_flow) data['financial_performance']['balance_sheets']['yearly'] = balance_sheets data['financial_performance']['income_statements'][ 'yearly'] = income_statements data['financial_performance']['cash_flows']['yearly'] = cash_flows path = os.path.join(os.path.dirname(__file__), '..', 'data', '{symbol}.json'.format(symbol=symbol)) with open(path, 'w') as file_: file_.write(json.dumps(data))
5,351,345
def delete(uuid): """ Deletes stored entities and time them. Args: uuid: A str, unique identifier, a part of the keynames of entities. Returns: A tuple of two lists. A list of float times to delete all entities, and a list of errors. A zero value signifies a failure. """ timings = [] errors = [] for index in range(0, constants.NUM_SAMPLES): entity = None try: entity = TestModel.get_by_key_name(key_names=uuid + str(index)) if not entity: raise Exception("Unable to first fetch entity.") except Exception, exception: logging.exception(exception) errors.append(str(exception)) total_time = 0 timings.append(total_time) logging.error("Left over entity with keyname {0}".\ format(uuid + str(index))) continue start = time.time() try: entity.delete() total_time = time.time() - start except Exception, exception: logging.exception(exception) errors.append(str(exception)) total_time = 0 timings.append(total_time * constants.SECONDS_TO_MILLI) return (timings, errors)
5,351,346
def remove_prefix(string, prefix): """ This function removes the given prefix from a string, if the string does indeed begin with the prefix; otherwise, it returns the string unmodified. """ if string.startswith(prefix): return string[len(prefix):] else: return string
5,351,347
def action_probs_to_action(probs): """ Takes output of controller and converts to action in format [0,0,0,0] """ forward = probs[:, 0:2]; camera=probs[:, 2:5]; jump=probs[:,5:7]; action = [torch.distributions.Categorical(p).sample().detach().item() for p in [forward,camera,jump]] action.append(0) # not allowing any motion along side dimension return action
5,351,348
def solve_circuit(netlist): """ Generate and solve the Modified Nodal Analysis (MNA) equations for the circuit. The MNA equations are a linear system Ax = z. See http://lpsa.swarthmore.edu/Systems/Electrical/mna/MNA3.html Args: netlist (pandas.DataFrame): A netlist of circuit elements with format desc, node1, node2, value. Returns: (np.ndarray, np.ndarray): - V_node: Voltages of the voltage elements - I_batt: Currents of the current elements """ timer = pybamm.Timer() desc = np.array(netlist["desc"]).astype("<U16") node1 = np.array(netlist["node1"]) node2 = np.array(netlist["node2"]) value = np.array(netlist["value"]) nLines = netlist.shape[0] n = np.concatenate((node1, node2)).max() # Number of nodes (highest node number) m = 0 # "m" is the number of voltage sources, determined below. V_elem = ["V", "O", "E", "H"] for nm in desc: if nm[0] in V_elem: m += 1 # Construct the A matrix, which will be a (n+m) x (n+m) matrix # A = [G B] # [B.T D] # G matrix tracks the conductance between nodes (consists of floats) # B matrix tracks voltage sources between nodes (consists of -1, 0, 1) # D matrix is always zero for non-dependent sources # Construct the z vector with length (n+m) # z = [i] # [e] # i is currents and e is voltages # Use lil matrices to construct the A array G = sp.sparse.lil_matrix((n, n)) B = sp.sparse.lil_matrix((n, m)) D = sp.sparse.lil_matrix((m, m)) i = np.zeros([n, 1]) e = np.zeros([m, 1]) """ % We need to keep track of the number of voltage sources we've parsed % so far as we go through file. We start with zero. """ vsCnt = 0 """ % This loop does the bulk of filling in the arrays. It scans line by line % and fills in the arrays depending on the type of element found on the % current line. % See http://lpsa.swarthmore.edu/Systems/Electrical/mna/MNA3.html """ for k1 in range(nLines): n1 = node1[k1] - 1 # get the two node numbers in python index format n2 = node2[k1] - 1 elem = desc[k1][0] if elem == "R": # Resistance elements: fill the G matrix only g = 1 / value[k1] # conductance = 1 / R """ % Here we fill in G array by adding conductance. % The procedure is slightly different if one of the nodes is % ground, so check for those accordingly. """ if n1 == -1: # -1 is the ground node G[n2, n2] = G[n2, n2] + g elif n2 == -1: G[n1, n1] = G[n1, n1] + g else: G[n1, n1] = G[n1, n1] + g G[n2, n2] = G[n2, n2] + g G[n1, n2] = G[n1, n2] - g G[n2, n1] = G[n2, n1] - g elif elem == "V": # Voltage elements: fill the B matrix and the e vector if n1 >= 0: B[n1, vsCnt] = B[n1, vsCnt] + 1 if n2 >= 0: B[n2, vsCnt] = B[n2, vsCnt] - 1 e[vsCnt] = value[k1] vsCnt += 1 elif elem == "I": # Current elements: fill the i vector only if n1 >= 0: i[n1] = i[n1] - value[k1] if n2 >= 0: i[n2] = i[n2] + value[k1] # Construct final matrices from sub-matrices upper = sp.sparse.hstack((G, B)) lower = sp.sparse.hstack((B.T, D)) A = sp.sparse.vstack((upper, lower)) # Convert a to csr sparse format for more efficient solving of the linear system # csr works slighhtly more robustly than csc A_csr = sp.sparse.csr_matrix(A) z = np.vstack((i, e)) toc_setup = timer.time() lp.logger.debug(f"Circuit set up in {toc_setup}") # Scipy # X = solve(A, z).flatten() X = sp.sparse.linalg.spsolve(A_csr, z).flatten() # Pypardiso # X = pypardiso.spsolve(Aspr, z).flatten() # amg # ml = pyamg.smoothed_aggregation_solver(Aspr) # X = ml.solve(b=z, tol=1e-6, maxiter=10, accel="bicgstab") # include ground node (0V) # it is counter-intuitive that z is [i,e] while X is [V,I], but this is correct V_node = np.zeros(n + 1) V_node[1:] = X[:n] I_batt = X[n:] toc = timer.time() lp.logger.debug(f"Circuit solved in {toc - toc_setup}") lp.logger.info(f"Circuit set up and solved in {toc}") return V_node, I_batt
5,351,349
def search_for_subject(subject: Synset, num_urls: int, subscription_key: str, custom_config: str, host: str, path: str) -> Tuple[List[Tuple[str, str, str]], str, str]: """Perform the search phase for one particular subject.""" query = get_search_query(subject) logger.info(f"Subject {subject.name()} - Search query: `{query}`") urls: Set[str] = set() results: List[Tuple[str, str, str]] = [] wiki_links: List[str] = [] offset = 0 step = 0 while len(urls) < num_urls: search_result_json = bing_search(search_query=query, count=SEARCH_BATCH_SIZE, offset=offset, subscription_key=subscription_key, custom_config=custom_config, host=host, path=path) try: for url, title, snippet in parse_content_from_search_result(search_result_json): if url not in urls: urls.add(url) results.append((url, title, snippet)) if url.startswith(EN_WIKIPEDIA_PREFIX): wiki_links.append(url) if len(urls) >= num_urls: break except Exception: break offset += SEARCH_BATCH_SIZE step += 1 if step >= MAX_SEARCH_STEP: break if subject.name() in MANUAL_WN2WP: logger.info("Detected manual WordNet-Wikipedia linking") wiki = EN_WIKIPEDIA_PREFIX + quote_plus(MANUAL_WN2WP[subject.name()]["wikipedia"]).capitalize() wiki_map_source = MANUAL_WN2WP[subject.name()]["source"] else: if len(wiki_links) == 0: wiki_links = search_wiki(subject, subscription_key, custom_config, host, path) wiki = wiki_links[0] for w in wiki_links: w = unquote_plus(w) if "List_" in w: continue if "(disambiguation)" in w: continue if "Category:" in w: continue if "Template:" in w: continue wiki = w break wiki_map_source = "BING" # Add Wikipedia article if wiki.lower() not in set(url.lower() for url in urls): results[-1] = (wiki, "{} - Wikipedia".format(wiki[(wiki.rindex("/") + 1):]).capitalize(), "") return results, wiki, wiki_map_source
5,351,350
def get_kwargs(class_name: str) -> Kwargs: """Returns the specific kwargs for each field `class_name`""" default_kwargs = get_default_kwargs() class_kwargs = get_setting("COMMON_KWARGS", {}) use_kwargs = class_kwargs.get(class_name, default_kwargs) return use_kwargs
5,351,351
def minute_info(x): """ separates the minutes from time stamp. Returns minute of time. """ n2 = x.minute return n2/60
5,351,352
def remove_html_tags(text): """Removes HTML Tags from texts and replaces special spaces with regular spaces""" text = BeautifulSoup(text, 'html.parser').get_text() text = text.replace(u'\xa0', ' ') return text
5,351,353
def patchy(target, source=None): """ If source is not supplied, auto updates cannot be applied """ if isinstance(target, str): target = resolve(target) if isinstance(source, str): source = resolve(source) if isinstance(target, ModuleType): return PatchModule(target, source) elif isinstance(target, type) and source: return PatchClass(target, source)
5,351,354
def tests_fastapi(session: nox.sessions.Session, fastapi): """ Test against a specific FastAPI version """ tests(session, overrides={'fastapi': fastapi})
5,351,355
def _handle_special_addresses(lion): """ When there are special address codes/names, ensure that there is a duplicate row with the special name and code as the primary. Note: Only for special address type 'P' - addressable place names """ special = lion[ (lion['special_address_type'].isin(['P', 'B', 'G'])) & (lion['street'] != lion['special_address_street_name']) ].drop(columns=['street', 'street_code']) special['street'] = special['special_address_street_name'] special['street_code'] = special['special_address_street_code'] special['special_address_street_code'] = "" special['special_address_street_name'] = "" lion = pd.concat([lion, special], sort=True).reset_index(drop=True) return lion
5,351,356
def get_local(): """Construct a local population.""" pop = CosmicPopulation.simple(SIZE, generate=True) survey = Survey('perfect') surv_pop = SurveyPopulation(pop, survey) return surv_pop.frbs.s_peak
5,351,357
def get_session(): """<comment-ja> thread-localでセッションを取得します。 </comment-ja> <comment-en> TODO: English Comment </comment-en> """ return scoped_session( sessionmaker(bind=get_engine(), autoflush=False))
5,351,358
def xform(q=1,a=1,bb=1,bbi=1,cp=1,cpc=1,dph=1,eu=1,m="[float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float]",os=1,piv="[linear, linear, linear]",p=1,puv=1,rfl=1,rab=1,rao=1,rax=1,ray=1,raz=1,rft="float",r=1,ra="[angle, angle, angle]",roo="string",rp="[linear, linear, linear]",rt="[linear, linear, linear]",ro="[angle, angle, angle]",s="[float, float, float]",sp="[linear, linear, linear]",st="[linear, linear, linear]",sh="[float, float, float]",t="[linear, linear, linear]",ws=1,wd=1,ztp=1): """ http://help.autodesk.com/cloudhelp/2019/ENU/Maya-Tech-Docs/CommandsPython/xform.html ----------------------------------------- xform is undoable, queryable, and NOT editable. This command can be used query/set any element in a transformation node. It can also be used to query some values that cannot be set directly such as the transformation matrix or the bounding box. It can also set both pivot points to convenient values. All values are specified in transformation coordinates. (attribute-space) In addition, the attributes are applied/returned in the order in which they appear in the flags section. (which corresponds to the order they appear in the transformation matrix as given below) See also: move, rotate, scale ## Notes The transformation matrix for a node is built by post-multiplying the following matrices in the given order (Note: rotations are applied according to the rotation order parameter and the 6 different rotation possibilities are not shown below) -1 -1 [M] = [sp]x[s]x[sh]x[sp]x[st]x[rp]x[ar]x[ro]x[rp]x[rt]x[t] where: [sp] = | 1 0 0 0 | = scale pivot matrix | 0 1 0 0 | | 0 0 1 0 | | -spx -spy -spz 1 | [s] = | sx 0 0 0 | = scale matrix | 0 sy 0 0 | | 0 0 sz 0 | | 0 0 0 1 | [sh] = | 1 0 0 0 | = shear matrix | xy 1 0 0 | | xz yz 1 0 | | 0 0 0 1 | -1 [sp] = | 1 0 0 0 | = scale pivot inverse matrix | 0 1 0 0 | | 0 0 1 0 | | spx spy spz 1 | [st] = | 1 0 0 0 | = scale translate matrix | 0 1 0 0 | | 0 0 1 0 | | stx sty stz 1 | [rp] = | 1 0 0 0 | = rotate pivot matrix | 0 1 0 0 | | 0 0 1 0 | | -rpx -rpy -rpz 1 | [ar] = | * * * 0 | = axis rotation matrix | * * * 0 | (composite rotation, | * * * 0 | see [rx], [ry], [rz] | 0 0 0 1 | below for details) [rx] = | 1 0 0 0 | = rotate X matrix | 0 cos(x) sin(x) 0 | | 0 -sin(x) cos(x) 0 | | 0 0 0 1 | [ry] = | cos(y) 0 -sin(y) 0 | = rotate Y matrix | 0 1 0 0 | | sin(y) 0 cos(y) 0 | | 0 0 0 1 | [rz] = | cos(z) sin(z) 0 0 | = rotate Z matrix | -sin(z) cos(z) 0 0 | | 0 0 1 0 | | 0 0 0 1 | -1 [rp] = | 1 0 0 0 | = rotate pivot matrix | 0 1 0 0 | | 0 0 1 0 | | rpx rpy rpz 1 | [rt] = | 1 0 0 0 | = rotate translate matrix | 0 1 0 0 | | 0 0 1 0 | | rtx rty rtz 1 | [t] = | 1 0 0 0 | = translation matrix | 0 1 0 0 | | 0 0 1 0 | | tx ty tz 1 | ----------------------------------------- Return Value: None In query mode, return type is based on queried flag. ----------------------------------------- Flags: ----------------------------------------- a : absolute [boolean] [] perform absolute transformation (default) ----------------------------------------- bb : boundingBox [boolean] ['query'] Returns the bounding box of an object. The values returned are in the following order: xmin ymin zmin xmax ymax zmax. ----------------------------------------- bbi : boundingBoxInvisible [boolean] ['query'] Returns the bounding box of an object. This includes the bounding boxes of all invisible children which are not included using the boundingBox flag. The values returned are in following order: xmin ymin zmin xmax ymax zmax. ----------------------------------------- cp : centerPivots [boolean] [] Set pivot points to the center of the object's bounding box. (see -p flag) ----------------------------------------- cpc : centerPivotsOnComponents [boolean] [] Set pivot points to the center of the component's bounding box. (see -p flag) ----------------------------------------- dph : deletePriorHistory [boolean] [] If true then delete the construction history before the operation is performed. ----------------------------------------- eu : euler [boolean] [] modifer for -relative flag that specifies rotation values should be added to current XYZ rotation values. ----------------------------------------- m : matrix [[float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float]] ['query'] Sets/returns the composite transformation matrix. *Note* the matrix is represented by 16 double arguments that are specified in row order. ----------------------------------------- os : objectSpace [boolean] ['query'] treat values as object-space transformation values (only works for pivots, translations, rotation, rotation axis, matrix, and bounding box flags) ----------------------------------------- piv : pivots [[linear, linear, linear]] ['query'] convenience method that changes both the rotate and scale pivots simultaneously. (see -rp -sp flags for more info) ----------------------------------------- p : preserve [boolean] [] preserve overall transformation. used to prevent object from "jumping" when changing pivots or rotation order. the default value is true. (used with -sp, -rp, -roo, -cp, -ra) ----------------------------------------- puv : preserveUV [boolean] [] When true, UV values on rotated components are projected across the rotation in 3d space. For small edits, this will freeze the world space texture mapping on the object. When false, the UV values will not change for a selected vertices. Default is false. ----------------------------------------- rfl : reflection [boolean] [] To move the corresponding symmetric components also. ----------------------------------------- rab : reflectionAboutBBox [boolean] [] Sets the position of the reflection axis at the geometry bounding box ----------------------------------------- rao : reflectionAboutOrigin [boolean] [] Sets the position of the reflection axis at the origin ----------------------------------------- rax : reflectionAboutX [boolean] [] Specifies the X=0 as reflection plane ----------------------------------------- ray : reflectionAboutY [boolean] [] Specifies the Y=0 as reflection plane ----------------------------------------- raz : reflectionAboutZ [boolean] [] Specifies the Z=0 as reflection plane ----------------------------------------- rft : reflectionTolerance [float] [] Specifies the tolerance to findout the corresponding reflected components ----------------------------------------- r : relative [boolean] [] perform relative transformation ----------------------------------------- ra : rotateAxis [[angle, angle, angle]] ['query'] rotation axis orientation (when used with the -p flag the overall rotation is preserved by modifying the rotation to compensate for the axis rotation) ----------------------------------------- roo : rotateOrder [string] ['query'] rotation order (when used with the -p flag the overall rotation is preserved by modifying the local rotation to be quivalent to the old one) Valid values for this flag are <xyz | yzx | zxy | xzy | yxz | zyx> ----------------------------------------- rp : rotatePivot [[linear, linear, linear]] ['query'] rotate pivot point transformation (when used with the -p flag the overall transformation is preserved by modifying the rotation translation) ----------------------------------------- rt : rotateTranslation [[linear, linear, linear]] ['query'] rotation translation ----------------------------------------- ro : rotation [[angle, angle, angle]] ['query'] rotation transformation ----------------------------------------- s : scale [[float, float, float]] ['query'] scale transformation ----------------------------------------- sp : scalePivot [[linear, linear, linear]] ['query'] scale pivot point transformation (when used with the -p flag the overall transformation is preserved by modifying the scale translation) ----------------------------------------- st : scaleTranslation [[linear, linear, linear]] ['query'] scale translation ----------------------------------------- sh : shear [[float, float, float]] ['query'] shear transformation. The values represent the shear <xy,xz,yz> ----------------------------------------- t : translation [[linear, linear, linear]] ['query'] translation ----------------------------------------- ws : worldSpace [boolean] ['query'] (works for pivots, translations, rotation, rotation axis, matrix, and bounding box flags). Note that, when querying the scale, that this calculation is cumulative and is only valid if there are all uniform scales and no rotation. In a hierarchy with non-uniform scale and rotation, this value may not correspond entirely with the perceived global scale. ----------------------------------------- wd : worldSpaceDistance [boolean] ['query'] Values for -sp, -rp, -st, -rt, -t, -piv flags are treated as world space distances to move along the local axis. (where the local axis depends on whether the command is operating in local-space or object-space. This flag has no effect for world space. ----------------------------------------- ztp : zeroTransformPivots [boolean] reset pivot points and pivot translations without changing the overall matrix by applying these values into the translation channel. """
5,351,359
def mode_ratios(ratios): """Box type mode ratios.""" ratios.plot(kind='kde') plt.title('Boxes Type Mode Ratios') plt.show()
5,351,360
def calculate_y_pos(x, centre): """Calculates the y-coordinate on a parabolic curve, given x.""" centre = 80 y = 1 / centre * (x - centre) ** 2 + sun_radius return int(y)
5,351,361
def extract_flowlines(gdb_path, target_crs, extra_flowline_cols=[]): """ Extracts flowlines data from NHDPlusHR data product. Extract flowlines from NHDPlusHR data product, joins to VAA table, and filters out coastlines. Extracts joins between flowlines, and filters out coastlines. Parameters ---------- gdb_path : str path to the NHD HUC4 Geodatabase target_crs: GeoPandas CRS object target CRS to project NHD to for analysis, like length calculations. Must be a planar projection. extra_cols: list List of extra field names to extract from NHDFlowline layer Returns ------- tuple of (GeoDataFrame, DataFrame) (flowlines, joins) """ ### Read in flowline data and convert to data frame print("Reading flowlines") flowline_cols = FLOWLINE_COLS + extra_flowline_cols df = read_dataframe( gdb_path, layer="NHDFlowline", force_2d=True, columns=[flowline_cols] ) print("Read {:,} flowlines".format(len(df))) # Index on NHDPlusID for easy joins to other NHD data df.NHDPlusID = df.NHDPlusID.astype("uint64") df = df.set_index(["NHDPlusID"], drop=False) # convert MultiLineStrings to LineStrings (all have a single linestring) df.geometry = pg.get_geometry(df.geometry.values.data, 0) ### Read in VAA and convert to data frame # NOTE: not all records in Flowlines have corresponding records in VAA # we drop those that do not since we need these fields. print("Reading VAA table and joining...") vaa_df = read_dataframe(gdb_path, layer="NHDPlusFlowlineVAA", columns=[VAA_COLS]) vaa_df.NHDPlusID = vaa_df.NHDPlusID.astype("uint64") vaa_df = vaa_df.set_index(["NHDPlusID"]) df = df.join(vaa_df, how="inner") print("{:,} features after join to VAA".format(len(df))) # Simplify data types for smaller files and faster IO df.FType = df.FType.astype("uint16") df.FCode = df.FCode.astype("uint16") df.StreamOrde = df.StreamOrde.astype("uint8") df.Slope = df.Slope.astype("float32") df.MinElevSmo = df.MinElevSmo.astype("float32") df.MaxElevSmo = df.MaxElevSmo.astype("float32") ### Read in flowline joins print("Reading flowline joins") join_df = gp.read_file(gdb_path, layer="NHDPlusFlow")[ ["FromNHDPID", "ToNHDPID"] ].rename(columns={"FromNHDPID": "upstream", "ToNHDPID": "downstream"}) join_df.upstream = join_df.upstream.astype("uint64") join_df.downstream = join_df.downstream.astype("uint64") ### Label loops for easier removal later # WARNING: loops may be very problematic from a network processing standpoint. # Include with caution. print("Identifying loops") df["loop"] = (df.StreamOrde != df.StreamCalc) | (df.FlowDir.isnull()) idx = df.loc[df.loop].index join_df["loop"] = join_df.upstream.isin(idx) | join_df.downstream.isin(idx) ### Filter out coastlines and update joins # WARNING: we tried filtering out pipelines (FType == 428). It doesn't work properly; # there are many that go through dams and are thus needed to calculate # network connectivity and gain of removing a dam. print("Filtering out coastlines...") coastline_idx = df.loc[df.FType == 566].index df = df.loc[~df.index.isin(coastline_idx)].copy() # remove any joins that have coastlines as upstream # these are themselves coastline segments join_df = join_df.loc[~join_df.upstream.isin(coastline_idx)].copy() # set the downstream to 0 for any that join coastlines # this will enable us to mark these as downstream terminals in # the network analysis later join_df.loc[join_df.downstream.isin(coastline_idx), "downstream"] = 0 # drop any duplicates (above operation sets some joins to upstream and downstream of 0) join_df = join_df.drop_duplicates() print("{:,} features after removing coastlines".format(len(df))) ### Add calculated fields # Set our internal master IDs to the original index of the file we start from # Assume that we can always fit into a uint32, which is ~400 million records # and probably bigger than anything we could ever read in df["lineID"] = df.index.values.astype("uint32") + 1 join_df = ( join_df.join(df.lineID.rename("upstream_id"), on="upstream") .join(df.lineID.rename("downstream_id"), on="downstream") .fillna(0) ) for col in ("upstream", "downstream"): join_df[col] = join_df[col].astype("uint64") for col in ("upstream_id", "downstream_id"): join_df[col] = join_df[col].astype("uint32") ### Calculate size classes print("Calculating size class") drainage = df.TotDASqKm df.loc[drainage < 10, "sizeclass"] = "1a" df.loc[(drainage >= 10) & (drainage < 100), "sizeclass"] = "1b" df.loc[(drainage >= 100) & (drainage < 518), "sizeclass"] = "2" df.loc[(drainage >= 518) & (drainage < 2590), "sizeclass"] = "3a" df.loc[(drainage >= 2590) & (drainage < 10000), "sizeclass"] = "3b" df.loc[(drainage >= 10000) & (drainage < 25000), "sizeclass"] = "4" df.loc[drainage >= 25000, "sizeclass"] = "5" print("projecting to target projection") df = df.to_crs(target_crs) # Calculate length and sinuosity print("Calculating length and sinuosity") df["length"] = df.geometry.length.astype("float32") df["sinuosity"] = df.geometry.apply(calculate_sinuosity).astype("float32") # set join types to make it easier to track join_df["type"] = "internal" # set default join_df.loc[join_df.upstream == 0, "type"] = "origin" join_df.loc[join_df.downstream == 0, "type"] = "terminal" join_df.loc[(join_df.upstream != 0) & (join_df.upstream_id == 0), "type"] = "huc_in" # drop columns not useful for later processing steps df = df.drop(columns=["FlowDir", "StreamCalc"]) return df, join_df
5,351,362
def exists(awesome_title): """Check the awesome repository is cached Args: awesome_title: Awesome repository title Returns: True if exists, False otherwise """ awesome_cache_directory = os.path.join(CACHE_DIRECTORY, awesome_title) awesome_cached_readme = os.path.join(awesome_cache_directory, 'README.md') return os.path.exists(awesome_cached_readme)
5,351,363
def split_str_to_list(input_str, split_char=","): """Split a string into a list of elements. Args: input_str (str): The string to split split_char (str, optional): The character to split the string by. Defaults to ",". Returns: (list): The string split into a list """ # Split a string into a list using `,` char split_str = input_str.split(split_char) # For each element in split_str, strip leading/trailing whitespace for i, element in enumerate(split_str): split_str[i] = element.strip() return split_str
5,351,364
def generate_password(length): """ This will create a random password for the user Args: length - the user's preferred length for the password Return: It will return a random password of user's preferred length """ return Password.generate_pass(length)
5,351,365
def test_missing_input_type(): """Test that text input type is returned if field data don't have 'type' key. 1. Create a field parser for a dictionary without input type. 2. Parse input type. 3. Check that text input type is returned. """ actual_input_type = FieldParser(data={}).parse_input_type() assert actual_input_type == InputType.TEXT, "Wrong input type"
5,351,366
def export_csv(obj, file_name, point_type='evalpts', **kwargs): """ Exports control points or evaluated points as a CSV file. :param obj: a curve or a surface object :type obj: abstract.Curve, abstract.Surface :param file_name: output file name :type file_name: str :param point_type: ``ctrlpts`` for control points or ``evalpts`` for evaluated points :type point_type: str :raises IOError: an error occurred writing the file """ if not isinstance(obj, (abstract.Curve, abstract.Surface)): raise ValueError("Input object should be a curve or a surface") # Pick correct points from the object if point_type == 'ctrlpts': points = obj.ctrlpts elif point_type == 'evalpts' or point_type == 'curvepts' or point_type == 'surfpts': points = obj.evalpts else: raise ValueError("Please choose a valid point type option. Possible types: ctrlpts, evalpts") # Prepare CSV header dim = len(points[0]) line = "dim " for i in range(dim-1): line += str(i + 1) + ", dim " line += str(dim) + "\n" # Prepare values for pt in points: line += ",".join([str(p) for p in pt]) + "\n" # Write to file return exch.write_file(file_name, line)
5,351,367
async def test_error(hass): """Test entity is created.""" system = get_system() system.errors = [ Error("device_name", "title", "F152", "description", datetime.now()) ] assert await setup_vaillant(hass, system=system) assert "binary_sensor.vaillant_error_f152" in hass.states.async_entity_ids()
5,351,368
def formatRFC822Headers(headers): """ Convert the key-value pairs in 'headers' to valid RFC822-style headers, including adding leading whitespace to elements which contain newlines in order to preserve continuation-line semantics. """ munged = [] linesplit = re.compile(r'[\n\r]+?') for key, value in headers: vallines = linesplit.split(value) while vallines: if vallines[-1].rstrip() == '': vallines = vallines[:-1] else: break munged.append('%s: %s' % (key, '\r\n '.join(vallines))) return '\r\n'.join(munged)
5,351,369
def first_fixation_duration(trial: Trial, region_number: int) -> RegionMeasure: """ The duration of the first fixation in a region during first pass reading (i.e., before the reader fixates areas beyond the region). If this region is skipped during first pass, this measure is None. :: fp_fixations = get_first_pass_fixations(trial, region_number) if length of fp_fixations is 0: return None else: return duration of first fixation in fp_fixations """ region = region_exists(trial, region_number) fp_fixations = get_fp_fixations(trial, region_number) if not fp_fixations: return save_measure(trial, region, "first_fixation_duration", None, None) return save_measure( trial, region, "first_fixation_duration", fp_fixations[0].duration(), [fp_fixations[0]], )
5,351,370
def find_in_path(input_data, path): """Finds values at the path in input_data. :param input_data: dict or list :param path: the path of the values example: b.*.name :result: list of found data """ result = find(input_data, path.split('.')) return [value for _, value in result if value]
5,351,371
def collect_static_files(site_name=None): """ Collects django static files to where nginx can find them """ if not site_name: site_name = env.host source_folder = get_source_folder(env.user, site_name) with prefix('source {}/.env/bin/activate'.format(source_folder)): run('cd {} && python manage.py collectstatic --noinput'.format( source_folder))
5,351,372
def post_test_check(duthost, up_bgp_neighbors): """Post-checks the status of critical processes and state of BGP sessions. Args: duthost: Host DUT. skip_containers: A list contains the container names which should be skipped. Return: This function will return True if all critical processes are running and all BGP sessions are established. Otherwise it will return False. """ return check_all_critical_processes_status(duthost) and duthost.check_bgp_session_state(up_bgp_neighbors, "established")
5,351,373
def batch_iter(data, batch_size): """batch_iter""" src, dst, eid = data perm = np.arange(len(eid)) np.random.shuffle(perm) start = 0 while start < len(src): index = perm[start:start + batch_size] start += batch_size yield src[index], dst[index], eid[index]
5,351,374
def fill_with_mode(filename, column): """ Fill the missing values(NaN) in a column with the mode of that column Args: filename: Name of the CSV file. column: Name of the column to fill Returns: df: Pandas DataFrame object. (Representing entire data and where 'column' does not contain NaN values) (Filled with above mentioned rules) """ df=pd.read_csv(filename) mode = df[column].mode() df[column] = df[column].fillna(mode[0]) return df
5,351,375
def successorrevs(unfi, rev): """yield revision numbers for successors of rev""" assert unfi.filtername is None get_rev = unfi.changelog.index.get_rev for s in obsutil.allsuccessors(unfi.obsstore, [unfi[rev].node()]): r = get_rev(s) if r is not None: yield r
5,351,376
def add_workshift_context(request): """ Add workshift variables to all dictionaries passed to templates. """ if not request.user.is_authenticated(): return {} if Semester.objects.count() < 1: return {"WORKSHIFT_ENABLED": False} # Current semester is for navbar notifications try: current_semester = Semester.objects.get(current=True) except Semester.DoesNotExist: current_semester = None except Semester.MultipleObjectsReturned: current_semester = Semester.objects.filter(current=True).latest("start_date") workshift_emails = [] for pos in Manager.objects.filter(workshift_manager=True, active=True): if pos.email: workshift_emails.append(pos.email) elif pos.incumbent.email_visible and pos.incumbent.user.email: workshift_emails.append(pos.incumbent.user.email) if workshift_emails: workshift_email_str = " ({0})".format( ", ".join(["<a href=\"mailto:{0}\">{0}</a>".format(i) for i in workshift_emails]) ) else: workshift_email_str = "" messages.add_message( request, messages.WARNING, MESSAGES["MULTIPLE_CURRENT_SEMESTERS"].format( admin_email=settings.ADMINS[0][1], workshift_emails=workshift_email_str, )) today = localtime(now()).date() days_passed = None total_days = None semester_percentage = None standing = None happening_now = None workshift_profile = None if current_semester: # number of days passed in this semester days_passed = (today - current_semester.start_date).days # total number of days in this semester total_days = (current_semester.end_date - current_semester.start_date).days semester_percentage = round((days_passed / total_days) * 100, 2) # Semester is for populating the current page try: semester = request.semester except AttributeError: semester = current_semester try: workshift_profile = WorkshiftProfile.objects.get( semester=semester, user=request.user, ) except WorkshiftProfile.DoesNotExist: workshift_profile = None workshift_manager = utils.can_manage(request.user, semester=semester) upcoming_shifts = WorkshiftInstance.objects.filter( workshifter=workshift_profile, closed=False, date__gte=today, date__lte=today + timedelta(days=2), ) # TODO: Add a fudge factor of an hour to this? time = localtime(now()).time() happening_now = [] for shift in upcoming_shifts: if shift.week_long: happening_now.append(shift) continue if shift.date != today: continue if shift.start_time is None: if shift.end_time is not None: if time < shift.end_time: happening_now.append(shift) else: happening_now.append(shift) continue if shift.end_time is None: if shift.start_time is not None: if time > shift.start_time: happening_now.append(shift) else: happening_now.append(shift) continue if time > shift.start_time and time < shift.end_time: happening_now.append(shift) if workshift_profile: try: standing = workshift_profile.pool_hours.get(pool__is_primary=True).standing except (PoolHours.DoesNotExist, PoolHours.MultipleObjectsReturned): pass return { "WORKSHIFT_ENABLED": True, "SEMESTER": semester, "CURRENT_SEMESTER": current_semester, "WORKSHIFT_MANAGER": workshift_manager, "WORKSHIFT_PROFILE": workshift_profile, "STANDING": standing, "DAYS_PASSED": days_passed, "TOTAL_DAYS": total_days, "SEMESTER_PERCENTAGE": semester_percentage, "UPCOMING_SHIFTS": zip(upcoming_shifts, happening_now), }
5,351,377
def get_model_spec( model_zoo, model_def, model_params, dataset_fn, loss, optimizer, eval_metrics_fn, prediction_outputs_processor, ): """Get the model spec items in a tuple. The model spec tuple contains the following items in order: * The model object instantiated with parameters specified in `model_params`, * The `dataset_fn`, * The `loss`, * The `optimizer`, * The `eval_metrics_fn`, * The `prediction_outputs_processor`. Note that it will print warning if it's not inherited from `BasePredictionOutputsProcessor`. """ model_def_module_file = get_module_file_path(model_zoo, model_def) default_module = load_module(model_def_module_file).__dict__ model = load_model_from_module(model_def, default_module, model_params) prediction_outputs_processor = _get_spec_value( prediction_outputs_processor, model_zoo, default_module ) if prediction_outputs_processor and not isinstance( prediction_outputs_processor, BasePredictionOutputsProcessor ): logger.warning( "prediction_outputs_processor is not " "inherited from BasePredictionOutputsProcessor. " "Prediction outputs may not be processed correctly." ) return ( model, _get_spec_value(dataset_fn, model_zoo, default_module, required=True), _get_spec_value(loss, model_zoo, default_module, required=True), _get_spec_value(optimizer, model_zoo, default_module, required=True), _get_spec_value( eval_metrics_fn, model_zoo, default_module, required=True ), prediction_outputs_processor, )
5,351,378
def clean_meta(unclean_list): """ cleans raw_vcf_header_list for downstream processing :return: """ clean_list = [] for i in unclean_list: if "=<" in i: i = i.rstrip(">") i = i.replace("##", "") ii = i.split("=<", 1) else: i = i.replace("##", "") ii = i.split("=", 1) clean_list.append(ii) return clean_list
5,351,379
def test_dependencies_detection_recursive_different_steps(dummy_nb_config): """Test dependencies are detected even with a chain of functions calls.""" pipeline = Pipeline(dummy_nb_config) _source = [''' x = 5 def foo(): print(x) '''] pipeline.add_step(Step(name="step1", source=_source)) _source = [''' def bar(): foo() '''] pipeline.add_step(Step(name="step2", source=_source)) _source = ["bar()"] pipeline.add_step(Step(name="step3", source=_source)) pipeline.add_edge("step1", "step2") pipeline.add_edge("step2", "step3") dependencies.dependencies_detection(pipeline) assert sorted(pipeline.get_step("step1").ins) == [] assert sorted(pipeline.get_step("step1").outs) == ['foo', 'x'] assert sorted(pipeline.get_step("step2").ins) == ['foo', 'x'] assert sorted(pipeline.get_step("step2").outs) == ['bar', 'foo', 'x'] assert sorted(pipeline.get_step("step3").ins) == ['bar', 'foo', 'x'] assert sorted(pipeline.get_step("step3").outs) == []
5,351,380
def generate_result_table(models, data_info): # per idx (gene/transcript) """ Generate a table containing learned model parameters and statistic tests. Parameters ---------- models Learned models for individual genomic positions of a gene. group_labels Labels of samples. data_inf Dict Returns ------- table List of tuples. """ ### condition_names,run_names = get_ordered_condition_run_names(data_info) # information from the config file used for modelling. ### ### table = [] for key, (model,prefiltering) in models.items(): idx, position, kmer = key mu = model.nodes['mu_tau'].expected() # K sigma2 = 1./model.nodes['mu_tau'].expected(var='gamma') # K var_mu = model.nodes['mu_tau'].variance(var='normal') # K # mu = model.nodes['y'].params['mean'] # sigma2 = model.nodes['y'].params['variance'] w = model.nodes['w'].expected() # GK N = model.nodes['y'].params['N'].round() # GK N0 = N[:, 0].squeeze() N1 = N[:, 1].squeeze() w0 = w[:, 0].squeeze() coverage = np.sum(model.nodes['y'].params['N'], axis=-1) # GK => G # n_reads per group p_overlap, list_cdf_at_intersections = stats.calc_prob_overlapping(mu, sigma2) model_group_names = model.nodes['x'].params['group_names'] #condition_names if pooling, run_names otherwise. ### Cluster assignment ### conf_mu = [calculate_confidence_cluster_assignment(mu[0],model.kmer_signal),calculate_confidence_cluster_assignment(mu[1],model.kmer_signal)] cluster_idx = {} if conf_mu[0] > conf_mu[1]: cluster_idx['unmod'] = 0 cluster_idx['mod'] = 1 else: cluster_idx['unmod'] = 1 cluster_idx['mod'] = 0 mu_assigned = [mu[cluster_idx['unmod']],mu[cluster_idx['mod']]] sigma2_assigned = [sigma2[cluster_idx['unmod']],sigma2[cluster_idx['mod']]] conf_mu = [conf_mu[cluster_idx['unmod']],conf_mu[cluster_idx['mod']]] w_mod = w[:,cluster_idx['mod']] mod_assignment = [['higher','lower'][(mu[0]<mu[1])^cluster_idx['mod']]] ### calculate stats_pairwise stats_pairwise = [] for cond1, cond2 in itertools.combinations(condition_names, 2): if model.method['pooling']: cond1, cond2 = [cond1], [cond2] else: cond1, cond2 = list(data_info[cond1].keys()), list(data_info[cond2].keys()) if any(r in model_group_names for r in cond1) and any(r in model_group_names for r in cond2): w_cond1 = w[np.isin(model_group_names, cond1), cluster_idx['mod']].flatten() w_cond2 = w[np.isin(model_group_names, cond2), cluster_idx['mod']].flatten() n_cond1 = coverage[np.isin(model_group_names, cond1)] n_cond2 = coverage[np.isin(model_group_names, cond2)] z_score, p_ws = stats.z_test(w_cond1, w_cond2, n_cond1, n_cond2) # two=tailed w_mod_mean_diff = np.mean(w_cond1)-np.mean(w_cond2) stats_pairwise += [w_mod_mean_diff, p_ws, z_score] else: stats_pairwise += [None, None, None] if len(condition_names) > 2: ### calculate stats_one_vs_all stats_one_vs_all = [] for cond in condition_names: if model.method['pooling']: cond = [cond] else: cond = list(data_info[cond].keys()) if any(r in model_group_names for r in cond): w_cond1 = w[np.isin(model_group_names, cond), cluster_idx['mod']].flatten() w_cond2 = w[~np.isin(model_group_names, cond), cluster_idx['mod']].flatten() n_cond1 = coverage[np.isin(model_group_names, cond)] n_cond2 = coverage[~np.isin(model_group_names, cond)] z_score, p_ws = stats.z_test(w_cond1, w_cond2, n_cond1, n_cond2) w_mod_mean_diff = np.mean(w_cond1)-np.mean(w_cond2) stats_one_vs_all += [w_mod_mean_diff, p_ws, z_score] else: stats_one_vs_all += [None, None, None] ### w_mod_ordered, coverage_ordered = [], [] # ordered by conditon_names or run_names based on headers. if model.method['pooling']: names = condition_names else: names = run_names for name in names: if name in model_group_names: w_mod_ordered += list(w_mod[np.isin(model_group_names, name)]) coverage_ordered += list(coverage[np.isin(model_group_names, name)]) else: w_mod_ordered += [None] coverage_ordered += [None] ### ### prepare values to write row = [idx, position, kmer] row += stats_pairwise if len(condition_names) > 2: row += stats_one_vs_all # row += [p_overlap] # row += list_cdf_at_intersections row += list(w_mod_ordered) row += list(coverage_ordered) row += mu_assigned + sigma2_assigned + conf_mu + mod_assignment if prefiltering is not None: row += [prefiltering[model.method['prefiltering']['method']]] ### Filtering those positions with a nearly single distribution. cdf_threshold = 0.1 x_x1, y_x1, x_x2, y_x2 = list_cdf_at_intersections is_not_inside = ((y_x1 < cdf_threshold) & (x_x1 < cdf_threshold)) | ((y_x2 < cdf_threshold) & (x_x2 < cdf_threshold)) | (( (1-y_x1) < cdf_threshold) & ((1-x_x1) < cdf_threshold)) | (( (1-y_x2) < cdf_threshold) & ((1-x_x2) < cdf_threshold)) if (p_overlap <= 0.5) and (is_not_inside): table += [tuple(row)] return table
5,351,381
def test_parsers_gelfparser_parse_partially_invalid_file(caplog): """Tests the GELFParser with a file containing invalid JSON strings.""" with StringIO() as file: file.writelines( [ # This is invalid gelf but we assume it's valid in our case '{"short_message": "This seems valid."}\n', # Invalid json "{ This is not valid json and raises json.decoder.JSONDecodeError\n", # Valid json but invalid gelf raises KeyError: # key "short_message" not found "{}\n", # As above but raises TypeError: # list indices must be integers or slices, not str "[]\n", # Another assumed valid gelf '{"short_message": {"username": "This seems valid too."}}\n', ] ) file.seek(0) parser = GELFParser() events = list(parser.parse(file)) assert len(events) == 2 assert events[0] == "This seems valid." assert events[1] == {"username": "This seems valid too."} caplog.clear() with StringIO() as file: file.write("{ This is not valid json and raises json.decoder.JSONDecodeError\n") file.seek(0) parser = GELFParser() with caplog.at_level(logging.DEBUG): events = list(parser.parse(file)) assert len(events) == 0 assert ( "ralph.parsers", logging.ERROR, "Input event '{ This is not valid json and raises " "json.decoder.JSONDecodeError\n' " "is not a valid JSON string! It will be ignored.", ) in caplog.record_tuples assert ( "ralph.parsers", logging.DEBUG, "Raised error was: Expecting property name enclosed in double quotes: " "line 1 column 3 (char 2)", ) in caplog.record_tuples caplog.clear() with StringIO() as file: file.write("{}") file.seek(0) parser = GELFParser() with caplog.at_level(logging.DEBUG): events = list(parser.parse(file)) assert len(events) == 0 assert ( "ralph.parsers", logging.ERROR, "Input event '{}' doesn't comply with GELF format! It will be ignored.", ) in caplog.record_tuples assert ( "ralph.parsers", logging.DEBUG, "Raised error was: 'short_message'", ) in caplog.record_tuples caplog.clear() with StringIO() as file: file.write("[]") file.seek(0) parser = GELFParser() with caplog.at_level(logging.DEBUG): events = list(parser.parse(file)) assert len(events) == 0 assert ( "ralph.parsers", logging.ERROR, "Input event '[]' is not a valid JSON string! It will be ignored.", ) in caplog.record_tuples assert ( "ralph.parsers", logging.DEBUG, "Raised error was: list indices must be integers or slices, not str", ) in caplog.record_tuples
5,351,382
def get_href_kind(href, domain): """Return kind of href (internal or external)""" if is_internal_href(href, domain): kind = 'internal' else: kind = 'external' return kind
5,351,383
def main(infile, make_plot, split=False): """Read the input file and average the profiles within it.""" print('Reading file "{}"'.format(infile)) raw_data, raw_matrix, average_data, var_data = average_profiles(infile) print('Data sets: {}'.format(len(raw_matrix))) print('Variables in sets:') for i in raw_data: print('- "{}"'.format(i)) if make_plot: print('Plotting all averaged profiles.') plot_all_items(average_data, var_data) for ykey in raw_data: if ykey in ('coord1', 'chunk'): continue plot_all_sets(raw_data, ykey) xkey = 'coord1' plot_xy_data( average_data[xkey], average_data[ykey], yerror=np.sqrt(var_data[ykey]), xlabel=xkey, ylabel=ykey, ) write_output( 'averaged-{}.txt'.format(pathlib.Path(infile).stem), [i for i in average_data], average_data ) write_output_error( 'averaged-error-{}.txt'.format(pathlib.Path(infile).stem), [i for i in average_data], average_data, var_data, ) if split: for i in average_data: write_output_error( 'averaged-{}-{}.txt'.format( i, pathlib.Path(infile).stem ), [i], average_data, var_data, )
5,351,384
def get_input_file(req_id, file_number): """ Returns an uploaded input file, 404 if not yet uploaded. :param req_id: The id of the conversion. :param file_number: File number. :return: File as text. """ cr = db.retrieve(req_id) if cr is None: return jsonify({ 'status': 'errored', 'message': 'no job found matching request id: ' + req_id }), 404 if int(file_number) not in range(cr.file_count): return jsonify({ 'status': 'errored', 'message': 'file number must be in (0, ' + str(cr.file_count - 1) + ').' }), 404 if len(glob(config['FILE_STORE'] + str(req_id) + '/input/' + str(file_number) + '.*')) is 0: return jsonify({ 'status': 'errored', 'message': 'file number ' + str(file_number) + ' has not been uploaded yet.' }), 404 input_file = open(glob(config['FILE_STORE'] + str(req_id) + '/input/' + str(file_number) + '.*')[0], 'r') def generate(file): while True: l = file.readline() if l: yield l else: return return Response(generate(input_file), content_type='application/' + os.path.splitext( input_file.name)[1].strip('.')), 200
5,351,385
def check_mark(value): """Helper method to create an html formatted entry for the flags in tables.""" return format_html('&check;') if value == 1 else ''
5,351,386
def test_sleep(n): """Used only for testing -- example method with argument. """ logger = LMLogger.get_logger() logger.info("Starting test_sleep({}) in pid {}".format(n, os.getpid())) try: job = get_current_job() job.meta['sample'] = 'test_sleep metadata' job.meta['pid'] = int(os.getpid()) job.save_meta() time.sleep(n) logger.info("Completed test_sleep in pid {}".format(os.getpid())) return 0 except Exception as e: logger.error("Error on test_sleep in pid {}: {}".format(os.getpid(), e)) raise
5,351,387
def military_to_english_time(time, fmt="{0}:{1:02d}{2}"): """ assumes 08:33:55 and 22:33:42 type times will return 8:33am and 10:33pm (not we floor the minutes) """ ret_val = time try: h, m = split_time(time) ampm = "am" if h >= 12: ampm = "pm" if h >= 24: ampm = "am" h = h % 12 if h == 0: h = 12 ret_val = fmt.format(h, m, ampm) except: pass return ret_val
5,351,388
def test_inheritance_branch_override(root, branch, node_class): """Branches defined on the subclass take precedence over the baseclass. This precedence matches the MRO used for diamond inheritance. """ assert isinstance(root[branch]['local'], node_class)
5,351,389
def parse_ssh_config(text): """ Parse an ssh-config output into a Python dict. Because Windows doesn't have grep, lol. """ try: lines = text.split('\n') lists = [l.split(' ') for l in lines] lists = [filter(None, l) for l in lists] tuples = [(l[0], ''.join(l[1:]).strip().strip('\r')) for l in lists] return dict(tuples) except IndexError: raise Exception("Malformed input")
5,351,390
def visit_downloadlink_node_rst(self, node): """ Converts node *downloadlink* into :epkg:`rst`. """ logger = logging.getLogger("downloadlink") logger.info("[downloadlink] RST '{0}'".format(str(node))) if node['format']: self.add_text(":downloadlink:`{0} <{1}::{2}>`".format( node["anchor"], node["format"], node["filename"])) else: self.add_text(":downloadlink:`{0} <{0}::{1}>`".format( node["anchor"], node["filename"])) raise nodes.SkipNode
5,351,391
def snake_case(string: str) -> str: """Convert upper camelcase to snake case.""" return re.sub(r"(?<!^)(?=[A-Z])", "_", string).lower()
5,351,392
def positive_int(s: str) -> int: """Positive integer validator for `argparse.ArgumentParser`.""" i = int(s) if i < 0: raise argparse.ArgumentTypeError("A positive number is required") return i
5,351,393
def create_tempdir(suffix='', prefix='tmp', directory=None, delete=True): """Create a tempdir and return the path. This function registers the new temporary directory for deletion with the atexit module. """ tempd = tempfile.mkdtemp(suffix=suffix, prefix=prefix, dir=directory) if delete: atexit.register(_cleanup_tempdir, tempd) return tempd
5,351,394
def code( type_: ObjectTypes = typer.Option( ..., '--type', help='Generate filter, model, and object files for the provided type.' ), ): """Generate Args.""" type_ = utils.snake_string(type_.value) gen_filter(type_) gen_model(type_) gen_object(type_)
5,351,395
async def get_transactor_key(request): """Get transactor key out of request.""" id_dict = deserialize_api_key( request.app.config.SECRET_KEY, extract_request_token(request) ) next_id = id_dict.get("id") auth_data = await get_auth_by_next_id(next_id) encrypted_private_key = auth_data.get("encrypted_private_key") private_key = decrypt_private_key( request.app.config.AES_KEY, next_id, encrypted_private_key ) hex_private_key = binascii.hexlify(private_key) return Key(hex_private_key), next_id
5,351,396
def _get_plot_aeff_exact_to_ground_energy(parsed_ncsd_out_files): """Returns a list of plots in the form (xdata, ydata, const_list, const_dict), where A=Aeff is xdata, and ground energy is ydata """ a_aeff_to_ground_state_energy = get_a_aeff_to_ground_state_energy_map( parsed_ncsd_out_files=parsed_ncsd_out_files) a_to_ground_state_energy = dict() for a_aeff, e in a_aeff_to_ground_state_energy.items(): if a_aeff[0] == a_aeff[1]: a_to_ground_state_energy[a_aeff[0]] = e return map_to_arrays(a_to_ground_state_energy) + (list(), dict())
5,351,397
def plot(config): """Plot train and test accuracy.""" optimizer_cls = config["optimizer_cls"] problem_cls = config["problem_cls"] num_epochs = config["num_epochs"] summary = load_summary(problem_cls, optimizer_cls) epochs = list(range(num_epochs + 1)) train_acc_percent = [100 * val for val in summary["train_accuracies"]] test_acc_percent = [100 * val for val in summary["test_accuracies"]] checkpoint = (num_epochs - 1, 0) savepath = get_plot_savepath(checkpoint, problem_cls, optimizer_cls, SUBDIR) plt.figure() plt.xlabel("Epoch") plt.ylabel("Accuracy") plt.plot(epochs, train_acc_percent, linestyle="-", label="Train") plt.plot(epochs, test_acc_percent, linestyle="--", label="Test") plt.legend() TikzExport().save_fig(savepath, tex_preview=False) plt.close("all")
5,351,398
def get_twitter_auth(): """Setup Twitter connection return: API object""" parameters = set_parameters.take_auth_data() twitter_access_token = parameters['twitter_access_token'] twitter_secret_token = parameters['twitter_secret_token'] twitter_api_key = parameters['twitter_api_key'] twitter_secret_key = parameters['twitter_secret_key'] auth = OAuthHandler(twitter_api_key, twitter_secret_key) auth.set_access_token(twitter_access_token, twitter_secret_token) return auth
5,351,399