content
stringlengths
22
815k
id
int64
0
4.91M
def pointCoordsDP2LP(dpX, dpY, dptZero, lPix = 1.0): """Convert device coordinates into logical coordinates dpX - x device coordinate dpY - y device coordinate dptZero - device coordinates of logical 0,0 point lPix - zoom value, number of logical points inside one device point (aka pixel) return point in logical coordinates """ return point.fromXY(xDP2LP(dpX, dptZero, lPix), yDP2LP(dpY, dptZero, lPix))
5,349,200
def geocode_input(api_key, input, geolocator): """ Use parallel processing to process inputted addresses as geocode Parameters: api_key (string): Google API key input (string): user inputted addresses geolocator: object from Google Maps API that generate geocode of address Returns: string[]: List of incorrect addresses string[]: formatted addresses of the inputted addresses float[]: coordinates of each address string: original inputted addresses """ #lessThanOneInt = True #time.sleep(1) #print(input) faultyAddress = None coords = None address = None #print('1') # for every line of input, generate location object placeid = database.fetch_placeid(input) if len(placeid) == 0: try: location = geolocator.geocode(input + " NC") # IMPORTANT: NC must be changed for usage in different states. coords = (location[0]['geometry']['location']['lat'], location[0]['geometry']['location']['lng']) address = location[0]["formatted_address"] database.insert_data(input, location[0]['place_id'], coords[0], coords[1], address) except: faultyAddress = str(input) #print(faultyAddress) else: out_data = database.fetch_output_data(placeid[0][0]) address = out_data[0][2] coords = [float(out_data[0][0]), float(out_data[0][1])] # output data return (faultyAddress, address, coords, input)
5,349,201
def satContact(sat_R, gs_R): """ Determines if satellite is within sight of a Ground Station Parameters ---------- sat_R : numpy matrix [3, 1] - Input radius vector in Inertial System ([[X], [Y], [Y]]) gs_R : numpy matrix [3, 1] - Input radius vector in Inertial System ([[X], [Y], [Y]]) Returns ------- inContact : int - 1 or 0 if sat is in sight our out of sight, respectively See Also -------- Sun_Contact_Times : Determine if a orbit vector is illuminated Geo_Contact_Times : Determine if a orbit vector is in within a geometric boundary References ---------- [1] D. Vallado, `Fundamentals of Astrodynamics and Applications`. 4th ed., Microcosm Press, 2013. - Modified Alg. 35, pg. 308 """ # Simplifying equations mag_sat = np.linalg.norm(sat_R) mag_gs = np.linalg.norm(gs_R) dot_ss = np.dot(np.transpose(sat_R), gs_R) # Find minimum parametric value Tmin = (((mag_sat ** 2) - dot_ss) / ((mag_sat ** 2) + (mag_gs ** 2) - 2 * (dot_ss))) if Tmin < 0 or Tmin > 1: InContact = 1 # Satellite can see GS if Tmin > 0 and Tmin < 1: cTmin = (((1 - Tmin) * (mag_sat ** 2) + (dot_ss * Tmin)) / (6378.137 ** 2)) if cTmin > 1: InContact = 1 # Satellite can see GS if cTmin < 1: InContact = 0 # Satellite can't see GS return InContact
5,349,202
def save_metadata(hparams): """Saves FLAGS and hparams to output_dir.""" output_dir = os.path.expanduser(FLAGS.output_dir) if not tf.gfile.Exists(output_dir): tf.gfile.MakeDirs(output_dir) # Save FLAGS in txt file if hasattr(FLAGS, "flags_into_string"): flags_str = FLAGS.flags_into_string() t2t_flags_str = "\n".join([ "--%s=%s" % (f.name, f.value) for f in FLAGS.flags_by_module_dict()["tensor2tensor.utils.flags"] ]) else: flags_dict = FLAGS.__dict__["__flags"] flags_str = "\n".join( ["--%s=%s" % (name, str(f)) for (name, f) in flags_dict.items()]) t2t_flags_str = None flags_txt = os.path.join(output_dir, "flags.txt") with tf.gfile.Open(flags_txt, "w") as f: f.write(flags_str) if t2t_flags_str: t2t_flags_txt = os.path.join(output_dir, "flags_t2t.txt") with tf.gfile.Open(t2t_flags_txt, "w") as f: f.write(t2t_flags_str) # Save hparams as hparams.json hparams_fname = os.path.join(output_dir, "hparams.json") with tf.gfile.Open(hparams_fname, "w") as f: f.write(hparams.to_json(indent=0, sort_keys=True))
5,349,203
def processFolder(abfFolder): """call processAbf() for every ABF in a folder.""" if not type(abfFolder) is str or not len(abfFolder)>3: return files=sorted(glob.glob(abfFolder+"/*.abf")) for i,fname in enumerate(files): print("\n\n\n### PROCESSING {} of {}:".format(i,len(files)),os.path.basename(fname)) processAbf(fname,show=False) plt.show() return
5,349,204
def load_tlm_output_npz(pytorch_tlm: nn.Module, npz: str, embeddings_keys: List[str] = None, name: str = "TLM"): """Restore a TLM-like model (possibly a `nn.Module` for fine-tuning We just populate the `TransformerEncoderStack` and the embeddings from weights, all other values remain uninitialized :param pytorch_tlm: A TLM-like model :param npz: A file to restore the weights from :param embeddings_key: Name of embeddings to restore, defaults to `None` in which case we restore all embeddings :param name: A name for this primitive :return: """ d = np.load(npz) from_tlm_array(pytorch_tlm, d, embeddings_keys, name) if hasattr(pytorch_tlm, 'output_layer'): from_weight_array(pytorch_tlm.output_layer, d, f"{name}/output") else: from_weight_array(pytorch_tlm.output, d, f"{name}/output")
5,349,205
def insertOrUpdateTweetBatch( profileRecs, tweetsPerProfile=200, verbose=False, writeToDB=True, campaignRec=None, onlyUpdateEngagements=True, ): """ Get Twitter tweet data from the Twitter API for a batch of profiles and store their tweets in the database. The verbose and writeToDB flags can be used together to print tweet data which would be inserted into the database without actually inserting it. This can be used preview tweet data without increasing storage or using time to do inserts and updates. :param profileRecs: list of Profile objects, to create or update tweets for. This might be a list from the Profile table which has been filtered based on a job schedule, or Profiles which match criteria such as high follower count. :param tweetsPerProfile: Default 200. Count of tweets to get for each profile, as an integer. If this is 200 or less, then page limit is left at 1 and the items per page count is reduced. If this is more than 200, then the items per page count is left at 200 and page limit is adjusted to get a number of tweets as the next multiple of 200. e.g. 550 tweets needs 2 pages to get the first 400 tweets, plus a 3rd page to the additional 150 tweets. We simplify to get 200*3 = 600 tweets, to keep the count consistent on each query. Note that even if 200 tweets are requested, the API sometimes returns only 199 and the user may have posted fewer than the requested tweets. The limit for a single request to the API is 200, therefore any number up to 200 has the same rate limit cost. It may be useful to set a number here as 200 or less if we want to get through all the users quickly, as this takes fewer API queries and fewer db inserts or updates. Also, consider that a very low number may lead to deadtime, where the script takes a fixed time to get 200 or 1 tweets and now that is has processed the 1 requested and the window limit is hit, it has no Tweet processing to do while waiting for the next rate limited window. Thought a low value will mean less storage space is required. :param verbose: Default False. If True, print the data used to created a local Tweet record. This data can be printed regardless of whether the data is written to the db record or not. :param writeToDB: Default True. If True, write the fetched tweets to local database, otherwise print and discard them. This is useful when used in combination with verbose flag which prints the data. :param campaignRec: Campaign record to assign to the local Tweet records. Default None to not assign any Campaign. :param onlyUpdateEngagements: Default True to only update the favorite and retweet count of the tweet in the local db. If False, update other fields too. Those are expected to be static on the Twitter API, but if rules change on this repo then it is useful to apply them historically on existing Tweet records. This flag only affects existing records. :return: None """ APIConn = authentication.getAPIConnection() if tweetsPerProfile <= 200: tweetsPerPage = tweetsPerProfile pageLimit = 1 else: tweetsPerPage = 200 # Round up to get the last page which might have fewerb items pageLimit = math.ceil(tweetsPerProfile / tweetsPerPage) for p in profileRecs: try: fetchedTweets = _getTweets( APIConn, userID=p.guid, tweetsPerPage=tweetsPerPage, pageLimit=pageLimit ) except TweepError as e: print( "Could not fetch tweets for user: @{screenName}." " {type}. {msg}".format( screenName=p.screenName, type=type(e).__name__, msg=str(e) ) ) else: print("User: {0}".format(p.screenName)) if writeToDB: print("Inserting/updating tweets in db...") else: print("Displaying tweets but not inserting/updating...") added = errors = 0 for f in fetchedTweets: try: data, tweetRec = insertOrUpdateTweet( tweet=f, profileID=p.id, writeToDB=writeToDB, onlyUpdateEngagements=onlyUpdateEngagements, ) if tweetRec and campaignRec: try: campaignRec.addTweet(tweetRec) except DuplicateEntryError: # Ignore error if Tweet was already assigned. pass if verbose: if tweetRec: tweetRec.prettyPrint() else: # No record was created, so use data dict. m = data["message"] created = data["createdAt"] data["message"] = lib.text_handling.flattenText(m) data["createdAt"] = str(lib.set_tz(created)) # TODO: Check if this will raise an error # on unicode symbols in message. print(json.dumps(data, indent=4)) added += 1 except Exception as e: print( "Could not insert/update tweet `{id}` for user" " @{screenName}. {type}. {msg}".format( id=f.id, screenName=p.screenName, type=type(e).__name__, msg=str(e), ) ) errors += 1 total = added + errors # Print stats on every 10 processed and on the last item. if total % 10 == 0 or f == fetchedTweets[-1]: print( "Total: {total:2,d}. Added: {added:2,d}. " "Errors: {errors:2,d}.".format( total=total, added=added, errors=errors ) )
5,349,206
def determine_visible_field_names(hard_coded_keys, filter_string, ref_genome): """Determine which fields to show, combining hard-coded keys and the keys in the filter string. """ fields_from_filter_string = extract_filter_keys(filter_string, ref_genome) return list(set(hard_coded_keys) | set(fields_from_filter_string))
5,349,207
def get_data_day(data: pd.DataFrame): """Get weekday/weekend designation value from data. :param pandas.DataFrame data: the data to get day of week from. :return: (*numpy.array*) -- indicates weekend or weekday for every day. """ return np.array(data["If Weekend"])
5,349,208
def bisection(f, a, b, power, iter_guess="yes"): """Given f(x) in [`a`,`b`] find x within tolerance, `tol`. Root-finding method: f(x) = 0. Parameters ---------- f : expression Input function. a : float Left-hand bound of interval. b : float Right-hand bound of interval. power : float Signed, specified power of tolerance until satisfying method. iter_guess : string or integer Optional argument that is string by default. If integer, iterate for that integer. Returns ------- P : list Aggregate collection of evaluated points, `p`. ERROR : list Propogation of `error` through method. I : list Running collection of iterations through method. Raises ------ bad_iter : string If input for desired iterations was assigned not an integer. opposite_signs : string If initial guesses did not evaluate to have opposite signs. must_be_expression : string If input `f` was of array, list, tuple, etcetera... Warns ----- solution_found : string Inform user that solution was indeed found. solution_not_found : string If initial guess or tolerance were badly defined. Notes ----- Relying on the Intermediate Value Theorem, this is a bracketed, root-finding method. Generates a sequence {p_n}^{inf}_{n=1} to approximate a zero of f(x), `p` and converges by O(1 / (2**N)). Examples -------- If f(x) = x**3 + 4*x**2 = 10 => f(x) = x**3 + 4*x**2 - 10 = 0 """ a, b, tol = float(a), float(b), float(10**power) # calculate if expression if isinstance(f,(FunctionType, sp.Expr)): # check if f(a) and f(b) are opposite signs if f(a)*f(b) < 0: P, ERROR, I = [], [], [] # initialize lists if iter_guess == "yes": # if left unassigned, guess N = max_iterations(a, b, power, 'bisection') elif isinstance(iter_guess, int): # if defined as integer, use N = iter_guess # else, break for bad assignment else: sys.exit("ERROR! " + bad_iter) i, error = 0, tol*10 # initialize # exit by whichever condition is TRUE first while error >= tol and i <= N: x = (b - a)/2 p = a + x # new value, p P.append(p) if f(a)*f(p) > 0: a = p # adjust next bounds else: b = p error = abs(x) # error of new value, p ERROR.append(error); I.append(i) i += 1 # iterate to i + 1 if i < N: print('Congratulations! ', solution_found) else: print('Warning! ', solution_not_found) # abort if f(a) is not opposite f(b) else: sys.exit('ERROR! ' + opposite_signs) # abort if not expression else: sys.exit('ERROR! ' + must_be_expression) return P, ERROR, I
5,349,209
def make_pretty(image, white_level=50): """Rescale and clip an astronomical image to make features more obvious. This rescaling massively improves the sensitivity of alignment by removing background and decreases the impact of hot pixels and cosmic rays by introducing a white clipping level that should be set so that most of a star's psf is clipped. Arguments: white_level -- the clipping level as a multiple of the median-subtracted image's mean. For most images, 50 is good enough. """ pretty = (image - np.median(image)).clip(0) pretty /= np.mean(pretty) pretty = pretty.clip(0, white_level) return pretty
5,349,210
def inventory( network_report_file: Path = typer.Argument(...), output_file: Optional[Path] = typer.Option( None, "-o", "--output", help="Output file", ), print_to_console: bool = typer.Option( False, "--print", help="Print to console instead of output file" ), remote_user: str = typer.Option("root", "--remote-user", help="Remote user"), ) -> None: """Generate full inventory report based on network report""" if not output_file and not print_to_console: today = current_formatted_date() output_file = Path(f"/tmp/{today}_dice_admin_inventory_report.csv") network_report = read_list_data_from_csv(network_report_file) active_hosts = [host for host in network_report if host["status"] == "UP"] inventory_report = list(_inventory_from_network_report(active_hosts, remote_user)) if not print_to_console: write_list_data_as_dict_to_csv( inventory_report, INVENTORY_COLUMNS, cast(Path, output_file) ) admin_logger.info(f"Report saved to {output_file}")
5,349,211
def get_examples_version(idaes_version: str): """Given the specified 'idaes-pse' repository release version, identify the matching 'examples-pse' repository release version. Args: idaes_version: IDAES version, e.g. "1.5.0" or "1.5.0.dev0+e1bbb[...]" Returns: Examples version, or if there is no match, return None. """ # Fetch the idaes:examples version mapping from Github compat_file = 'idaes-compatibility.json' url = f"{GITHUB_API}/repos/{REPO_ORG}/{REPO_NAME}/contents/{compat_file}" headers = {'Accept': 'application/vnd.github.v3.raw'} _log.debug(f'About to call requests.get({url}, {headers})') res = requests.get(url, headers=headers) if not res.ok: _log.debug(f'Problem getting mapping file: {res.json()}') raise DownloadError(res.json()) try: compat_mapping = json.loads(res.text)['mapping'] except KeyError: # return the latest version instead _log.warning('Ill-formed compatibility mapping file for examples repository:') _log.debug(f'compat_mapping: {res.text}') _log.info('Defaulting to latest released version of examples.') return None idaes_version_num = idaes_version version_numbers = idaes_version.split('.') if len(version_numbers) > 3: idaes_version_num = '.'.join(version_numbers[:3]) click.echo(f"Warning: non-release version of IDAES detected. " f"Using IDAES {idaes_version_num} as reference; " f"examples version compatibility is not guaranteed.") try: examples_version = compat_mapping[idaes_version_num] except KeyError: # return the latest version instead, as above _log.warning('IDAES version not found in compatibility mapping file. \ Defaulting to latest released version of examples.') return None _log.debug(f'get_examples_version({idaes_version}: {examples_version}') return examples_version
5,349,212
def data_convert(): """ data iteration wrapper """ if os.path.isdir(fea_path) == False: os.mkdir(fea_path) for dataname in sorted(os.listdir(label_path)): data_y, data_box = load_data(dataname) if data_y is None or data_box is None: continue # Skip the last one since we don't use it in yenchen's code for i in xrange(data_y.shape[0]/50): idx = i*50 print idx, data_y.shape[0], data_box.shape[0] data_one_hot = nearest_box(data_y[idx:idx+50], data_box[idx:idx+50]) npy_save(fea_path + dataname + '/' + out_prefix + str(i+1).zfill(4), data_one_hot)
5,349,213
def hash(data: bytes) -> bytes: """ Compute the hash of the input data using the default algorithm Args: data(bytes): the data to hash Returns: the hash of the input data """ return _blake2b_digest(data)
5,349,214
def compute_cd_small_batch(gt, output,batch_size=50): """ compute cd in case n_pcd is large """ n_pcd = gt.shape[0] dist = [] for i in range(0, n_pcd, batch_size): last_idx = min(i+batch_size,n_pcd) dist1, dist2 , _, _ = distChamfer(gt[i:last_idx], output[i:last_idx]) cd_loss = dist1.mean(1) + dist2.mean(1) dist.append(cd_loss) dist_tensor = torch.cat(dist) cd_ls = (dist_tensor*10000).cpu().numpy().tolist() return cd_ls
5,349,215
def find_sub_supra(axon, stimulus, eqdiff, sub_value=0, sup_value=0.1e-3): """ 'find_sub_supra' computes boundary values for the bisection method (used to identify the threeshold) Parameters ---------- axon (AxonModel): axon model stimulus (StimulusModel): stimulus model eqdiff (function): function that defines the ODE system sub_value (float): initial guess of sub-threshold value (default is 0) sup_value (float): initial guess of supra-threshold value (default is 0.1e-3) Returns ------- sub_value (float): sub-threshold value sup_value (float): supra-threshold value """ # Identification of bound values flag = 1 print('\n------------------------------------------------------') print('Identifying sub and supra threshold values...') print('------------------------------------------------------') ts = timer() while flag: # update stimulus stimulus.magnitude = -sup_value stimulus.update_stimulus(axon) # callback to save solution at each iteration of the integration def solout(t, y): time.append(t) sol.append(y.copy()) # initialize solution variable time = [] sol = [] # define integrator r = ode(eqdiff).set_integrator('dopri5') # set initial conditions r.set_initial_value(axon.icond, 0).set_f_params(axon.Ga, axon.Gm, axon.Cm, stimulus.voltage_ext, axon.d, axon.l, axon.Vr) # store solution at each iteration step r.set_solout(solout) # integrate r.integrate(stimulus.tend) # get complete solution x = np.array(sol) # get number of nodes with voltage > 80 mV N80 = (np.max(x[:, 0:axon.node_num], axis=0) > 80e-3).sum() if N80 > 3: flag = 0 else: sub_value = 1*sup_value sup_value = 2 * sup_value te = timer() print('...done. (sub, sup) = ({},{})'.format(sub_value, sup_value)) print('\n elapsed time: {:3f} ms'.format(te - ts)) return sub_value, sup_value
5,349,216
def test_ca_file(kivy_clock, scheme): """Passing a `ca_file` should not crash on http scheme, refs #6946""" from kivy.network.urlrequest import UrlRequest import certifi obj = UrlRequestQueue([]) queue = obj.queue req = UrlRequest( f"{scheme}://httpbin.org/get", on_success=obj._on_success, on_progress=obj._on_progress, on_error=obj._on_error, on_redirect=obj._on_redirect, ca_file=certifi.where(), debug=True ) wait_request_is_finished(kivy_clock, req) if req.error and req.error.errno == 11001: pytest.skip('Cannot connect to get address') ensure_called_from_thread(queue) check_queue_values(queue)
5,349,217
def constantly(x): """constantly: returns the function const(x)""" @wraps(const) def wrapper(*args, **kwargs): return x return wrapper
5,349,218
def ToBaseBand(xc, f_offset, fs): """ Parametros: xc: Señal a mandar a banda base f_offset: Frecuencia que esta corrido fs: Frecuencia de muestreo """ if PLOT: PlotSpectrum(xc, "xc", "xc_offset_spectrum.pdf", fs) # Se lo vuelve a banda base, multiplicando por una exponencial con fase f_offset / fs x_baseband = xc * np.exp((-1.0j * 2.0 * np.pi * f_offset/fs) * np.arange(len(xc))) if PLOT: PlotSpectrum(x_baseband, "x baseband", "x_baseband_spectrum.pdf", fs) return x_baseband
5,349,219
def hvp( f: DynamicJaxFunction, x: TracerOrArray, v: TracerOrArray, ) -> TracerOrArray: """Hessian-vector product function""" return jax.grad(lambda y: jnp.vdot(jax.grad(f)(y), v))(x)
5,349,220
def concat_all_gather(tensor): """ Performs all_gather operation on the provided tensors. *** Warning ***: torch.distributed.all_gather has no gradient. """ return hvd.allgather(tensor.contiguous())
5,349,221
def run_modes(): """Run modes instrumented in config file""" for mode in RUN_MODES: if mode == "check_index": logging.info("Checking that TMDB Elasticsearch index is created") if index_exists_and_has_data(INDEX_READ): logging.info("Elasticsearch index has been created and indexed with data") else: logging.error("Elasticsearch index isn't created or hasn't been indexed with data") raise ValueError("Innapropriate value for count of data in Elasticsearch index") if mode == "processing": if not(index_exists_and_has_data(PROCESSED_INDEX)) or FORCE_REWRITE: logging.info("Starting processing the index") P.main() if mode == "searching": logging.info("Starting to search in the index processed") S.main() if mode == "nlp": logging.info("Starting the NLP pipeline before indexing to ES") N.main()
5,349,222
async def test_config_entry_not_ready( hass: HomeAssistant, mock_config_entry: MockConfigEntry, mock_elgato: MagicMock, ) -> None: """Test the Elgato configuration entry not ready.""" mock_elgato.info.side_effect = ElgatoConnectionError mock_config_entry.add_to_hass(hass) await hass.config_entries.async_setup(mock_config_entry.entry_id) await hass.async_block_till_done() assert len(mock_elgato.info.mock_calls) == 1 assert mock_config_entry.state is ConfigEntryState.SETUP_RETRY
5,349,223
def start_search(search): """Run the specified number of threads of the searcher""" #Make the queue of all of the threads to run my_queue = Queue.Queue() for i in range(search.threads): t = threading.Thread(target=search_instance, args=(search, )) my_queue.put(t) #Run all of the threads while not my_queue.empty(): try: my_queue.get().start() except Exception as e: print "Error: %s" % (e)
5,349,224
def get_config(): """Prints the current config""" print(manager.get_config())
5,349,225
def find_files( path: str, skip_folders: tuple, skip_files: tuple, extensions: tuple = (".py",), ) -> List[str]: """Find recursively all files in path. Parameters ---------- path : str Path to a folder to find files in. skip_folders : tuple Skip folders containing folder to skip skip_files : tuple Skip files. extensions : tuple, optional Extensions to filter by. Default is (".py", ) Returns ------- list Sorted list of found files. """ found_files = [] for root, _dirs, files in os.walk(path, topdown=False): for filename in files: fpath = os.path.join(root, filename) if any(folder in fpath for folder in skip_folders): continue if fpath in skip_files: continue if filename.endswith(extensions): found_files.append(fpath) return list(sorted(found_files))
5,349,226
def get_min_area_rect(points): """ 【得到点集的最小面积外接矩形】 :param points: 轮廓点集,n*1*2的ndarray :return: 最小面积外接矩形的四个端点,4*1*2的ndarray """ rect = cv2.minAreaRect(points) # 最小面积外接矩形 box = cv2.boxPoints(rect) # 得到矩形的四个端点 box = np.int0(box) box = box[:, np.newaxis, :] # 从4*2转化为4*1*2 return box
5,349,227
def the_volume_is_republished(volume_ctx): """the volume is re-published.""" volume_ctx[VOLUME_CTX_KEY] = publish_volume()
5,349,228
def vector_to_cyclic_matrix(vec): """vec is the first column of the cyclic matrix""" n = len(vec) if vec.is_sparse(): matrix_dict = dict((((x+y)%n, y), True) for x in vec.dict() for y in xrange(n)) return matrix(GF(2), n, n, matrix_dict) vec_list = vec.list() matrix_lists = [vec_list[-i:] + vec_list[:-i] for i in xrange(n)] return matrix(GF(2), n, n, matrix_lists)
5,349,229
def cfn_resource_helper(): """ A helper method for the custom cloudformation resource """ # Custom logic goes here. This might include side effects or # Producing a a return value used elsewhere in your code. logger.info("cfn_resource_helper logic") return True
5,349,230
def get_ts_code_and_list_date(engine): """查询ts_code""" return pd.read_sql('select ts_code,list_date from stock_basic', engine)
5,349,231
def balanced_eq(want, to_balance): """Run `to_balance` through the expander to get its tags balanced, and assert the result is `want`.""" expander = ForParser(to_balance) eq_(want, expander.to_unicode())
5,349,232
def modifySummary(filename, cost, leadTime, isManufacturable): """ Modifies an existing testbench_manifest file, changing the AnalysisStatus and Value fields. """ print filename with open(filename, "r") as file: summary = json.load(file) summary["TierLevel"] = 2 summary["TestBench"] = "Foundry" if cost and leadTime: summary["AnalysisStatus"] = "OK" for metric in summary["Metrics"]: if metric["Name"] == "Vehicle_Unit_Cost": metric["Value"] = str(cost) metric["Unit"] = "dollars" elif metric["Name"] == "Manufacturing_Lead_Time": metric["Value"] = str(leadTime) metric["Unit"] = "days" elif metric["Name"] == "Manufacturable": metric["Value"] = str(isManufacturable) metric["Unit"] = "" else: summary["AnalysisStatus"] = "FAILED" for metric in summary["Metrics"]: if metric["Name"] == "Vehicle_Unit_Cost": metric["Value"] = str(0.0) metric["Unit"] = "dollars" elif metric["Name"] == "Manufacturing_Lead_Time": metric["Value"] = str(0.0) metric["Unit"] = "days" elif metric["Name"] == "Manufacturable": metric["Value"] = "false" metric["Unit"] = "" with open(filename, "w") as file: json.dump(summary, file, indent=4)
5,349,233
def nested_cv_ridge( X, y, test_index, n_bins=4, n_folds=3, alphas = 10**np.linspace(-20, 20, 81), npcs=[10, 20, 40, 80, 160, 320, None], train_index=None, ): """ Predict the scores of the testing subjects based on data from the training subjects using ridge regression. Hyperparameters are chosen based on a nested cross-validation. The inner-loop of the nested cross-validation is a stratified k-fold cross-validation. Parameters ---------- X : ndarray of shape (n_samples, n_features) y : ndarray of shape (n_samples, ) test_idx : ndarray of shape (n_test_samples, ) Indices for the samples that are used for testing. n_bins : int Training data are divided into `n_bins` bins for stratified k-fold cross-validation. n_folds : int Number of folds for stratified k-fold cross-validation. alphas : {list, ndarray of shape (n_alphas, )} Choices of the regularization parameter for ridge regression. npcs : list Choices of the number of PCs used in the prediction model in increasing order. Each element in the list should be an integer or `None`. `None` means all PCs are used. train_idx : {None, ndarray of shape (n_training_samples, )} Indices for the samples that are used for training. If it is `None`, then all the samples except for the test samples are used. Returns ------- yhat : ndarray of shape (n_test_samples, ) Predicted scores for the test samples. alpha : float The chosen element of `alphas` based on nested cross-validation. npc : {int, None} The chosen element of `npcs` based on nested cross-validation. cost : float The cost based on the chosen hyperparameters, which is the minimum cost for training data among all hyperparameter choices. """ if train_index is None: train_index = np.setdiff1d(np.arange(X.shape[0], dtype=int), test_index) X_train, X_test = X[train_index], X[test_index] y_train, y_test = y[train_index], y[test_index] bin_limits = np.histogram(y_train, n_bins)[1] bins = np.digitize(y_train, bin_limits[:-1]) cv = StratifiedKFold(n_splits=n_folds) costs = [] for train, test in cv.split(X_train, bins): yhat = grid_ridge(X_train[train], X_train[test], y_train[train], alphas, npcs) cost = ((y_train[test][:, np.newaxis, np.newaxis] - yhat)**2).sum(axis=0) costs.append(cost) costs = np.sum(costs, axis=0) a, b = np.unravel_index(costs.argmin(), costs.shape) alpha = alphas[a] npc = npcs[b] yhat = ridge(X_train, X_test, y_train, alpha, npc) return yhat, alpha, npc, costs[a, b]
5,349,234
def absolute_time(time_delta, meta): """Convert a MET into human readable date and time. Parameters ---------- time_delta : `~astropy.time.TimeDelta` time in seconds after the MET reference meta : dict dictionary with the keywords ``MJDREFI`` and ``MJDREFF`` Returns ------- time : `~astropy.time.Time` absolute time with ``format='ISOT'`` and ``scale='UTC'`` """ time = time_ref_from_dict(meta) + time_delta return Time(time.utc.isot)
5,349,235
def create_app(object_name, env="prod"): """ Arguments: object_name: the python path of the config object, e.g. webapp.settings.ProdConfig env: The name of the current environment, e.g. prod or dev """ app = Flask(__name__) app.config.from_object(object_name) app.config['ENV'] = env # init the cache cache.init_app(app) # init SQLAlchemy db.init_app(app) login_manager.init_app(app) # register our blueprints from controllers.main import main from controllers.user import user app.register_blueprint(main) app.register_blueprint(user) return app
5,349,236
def test_parameter_name(): """ test_parameter_name """ ta = Tensor(np.ones([2, 3])) tb = Tensor(np.ones([1, 4])) n = Net(ta, tb) names = [] for m in n.parameters_and_names(): if m[0]: names.append(m[0]) assert names[0] == "mod1.weight" assert names[1] == "mod2.weight" assert names[2] == "mod3.mod1.weight" assert names[3] == "mod3.mod2.weight"
5,349,237
def yyyydoy_to_date(yyyydoy): """ Convert a string in the form of either 'yyyydoy' or 'yyyy.doy' to a datetime.date object, where yyyy is the 4 character year number and doy is the 3 character day of year :param yyyydoy: string with date in the form 'yyyy.doy' or 'yyyydoy' :return: datetime.date object :rtype: datetime.date """ try: if '.' in yyyydoy: if len(yyyydoy) != 8: raise ValueError('Invalid string: must be yyyydoy or yyyy.doy') yyyy, doy = yyyydoy.split('.') else: if len(yyyydoy) != 7: raise ValueError('Invalid string: must be yyyydoy or yyyy.doy') yyyy = yyyydoy[0:4] doy = yyyydoy[4:7] return datetime.date(int(yyyy), 1, 1) + datetime.timedelta(int(doy) - 1) except ValueError: raise ValueError('Invalid string: must be yyyydoy or yyyy.doy')
5,349,238
def create_selection(): """ Create a selection expression """ operation = Forward() nested = Group(Suppress("(") + operation + Suppress(")")).setResultsName("nested") select_expr = Forward() functions = select_functions(select_expr) maybe_nested = functions | nested | Group(var_val) operation <<= maybe_nested + OneOrMore(oneOf("+ - * /") + maybe_nested) select_expr <<= operation | maybe_nested alias = Group(Suppress(upkey("as")) + var).setResultsName("alias") full_select = Group( Group(select_expr).setResultsName("selection") + Optional(alias) ) return Group( Keyword("*") | upkey("count(*)") | delimitedList(full_select) ).setResultsName("attrs")
5,349,239
def move_nodes(source_scene, dest_scene): """ Moves scene nodes from the source scene to the destination scene. :type source_scene: fbx.FbxScene :type dest_scene: fbx.FbxScene """ source_scene_root = source_scene.GetRootNode() # type: fbx.FbxNode dest_scene_root = dest_scene.GetRootNode() # type: fbx.FbxNode for node in get_children(source_scene_root): dest_scene_root.AddChild(node) # Although the original nodes are attached to the destination Scene root node, they are still connected to the old one and # so the connections must to be removed. Since there could be lots of children, its better to disconnect the root node from the children. source_scene_root.DisconnectAllSrcObject() # Because the Scene Object also has connections to other types of FBX objects, they need to be moved too. # (I'm guessing) Also since there could be only a single mesh in the FBX, the scene has connections to that too. for index in range(source_scene.GetSrcObjectCount()): fbx_obj = source_scene.GetSrcObject(index) # type: fbx.FbxObject # Don't want to move the root node, the global settings or the Animation Evaluator (at this point) # The equality check is split as the root node is an instance of fbx.FbxNode type but other objects such as fbx.FbxGlobalSettings # are subclasses of the fbx.FbxNode type but NOT instances. A little weird but this works! # The == equality check could be used as fallback for isinstance() if necessary if isinstance(fbx_obj, type(source_scene_root)): continue elif issubclass(type(fbx_obj), (fbx.FbxGlobalSettings, fbx.FbxAnimEvaluator, fbx.FbxAnimStack, fbx.FbxAnimLayer)): continue else: fbx_obj.ConnectDstObject(dest_scene) # Now the scene can be disconnected as everything has been moved! (DO NOT FORGET THIS STEP) return source_scene.DisconnectAllSrcObject()
5,349,240
def extract_values(*args): """ Wrapper around `extract_value`; iteratively applies that method to all items in a list. If only one item was passed in, then we return that one item's value; if multiple items were passed in, we return a list of the corresponding item values. """ processed = [extract_value(arg) for arg in args] if len(processed) == 1: return processed[0] return processed
5,349,241
def disconnect(receiver, signal=Any, sender=Any, weak=True): """Disconnect receiver from sender for signal. Disconnecting is not required. The use of disconnect is the same as for connect, only in reverse. Think of it as undoing a previous connection.""" if signal is None: raise DispatcherError('signal cannot be None') if weak: receiver = safeRef(receiver) senderkey = id(sender) try: receivers = connections[senderkey][signal] except KeyError: raise DispatcherError('No receivers for signal %r from sender %s' % (signal, sender)) try: receivers.remove(receiver) except ValueError: raise DispatcherError('No connection to receiver %s for signal %r from sender %s' % (receiver, signal, sender)) _cleanupConnections(senderkey, signal)
5,349,242
def predefined_split(dataset): """Uses ``dataset`` for validiation in :class:`.NeuralNet`. Examples -------- >>> valid_ds = skorch.dataset.Dataset(X, y) >>> net = NeuralNet(..., train_split=predefined_split(valid_ds)) Parameters ---------- dataset: torch Dataset Validiation dataset """ return partial(_make_split, valid_ds=dataset)
5,349,243
def file_instruction(): """Prints log file setup instructions to the console """ print(''' Weather Log File ---------------- The weather log files are automatically created and saved in the weather_logs folder located in the same folder as this python file. The default filename given to log files is the date and time at the start of data aquisition in the form yyyy-mm-dd_hh-mm-ss.csv. To add a prefix to the filename, enter the prefix below and press return. To keep the default filename press return. ''') return
5,349,244
def writeBremDecay( # Might want a config later lhe, mAp, eps, zlims, seed, outdir, outname, nevents=10_000 ): """ Break A'->ee LHEs into brem and decay files and reformat/rescale """ # Create outdir if needed if not os.path.exists(outdir): os.makedirs(outdir) # Outfile names bremfile = f'{outdir}/{outname}_brem.lhe' decayfile = f'{outdir}/{outname}_decay.lhe' decay_vs = f'{outdir}/{outname}_decay.dat' print( f'Reformatting:\n{lhe}\nInto:\n{bremfile}\n{decayfile}') print( f'And verticies to:\n{decay_vs}') # Creation XYZ Sym = Symbol('q') x_rv = Uniform(Sym, -10 , 10 ) y_rv = Uniform(Sym, -40 , 40 ) #z_rv = Uniform(Sym, -0.175, 0.175) Xs = sample( x_rv, numsamples=nevents, seed=np.random.seed( seed ) ) Ys = sample( y_rv, numsamples=nevents, seed=np.random.seed( seed ) ) #Zs = sample( z_rv, numsamples=nevents, seed=np.random.seed( seed ) ) # Detector limits (pheno paper uses [270, 3200] and 4000 is a hard upperlim zmin = zlims[0] zmax = zlims[1] # Decay time t = Symbol('t') decay_width = phys_form.gamma_ap_tot(mAp, eps) tau = phys_form.tau(mAp, eps) decay_rv = Exponential(t, 1/tau) decay_t = sample( decay_rv, numsamples=nevents, seed=np.random.seed( seed ) ) # Will store information here nevents_used = 0 # Open original and output files with open(lhe, 'r') as ogfile, \ open(bremfile, 'w') as bremf, \ open(decayfile, 'w') as decayf, \ open(decay_vs, 'w') as decayvs: # Write lims to .dat (plus extra 0s to maintain array shape) decayvs.write( f'{zmin} {zmax} 0 0\n' ) ################################################## # Edit header (techincaly until </init> # Many conditions shouldn't check in events sec. ################################################## scaling_mass = False for line in ogfile: # ebeams if re.search(r'ebeam',line): line = phys_form.rescaleLine(line) # Masses if line[:10] == 'BLOCK MASS': scaling_mass = True # Indicate following lines should be scaled continue if line[0] == '#': scaling_mass = False if scaling_mass: line = phys_form.rescaleLine(line, tokens=[1]) # Decay Width if re.match(r'DECAY +622', line): line = phys_form.replaceNums(line, [1], [decay_width]) # Break from header/init if line == '</init>\n': bremf.write(line) decayf.write(line) break bremf.write(line) decayf.write(line) ################################################## # Edit events ################################################## event_num = 0 event_line = 0 current_line = 0 for line in ogfile: # Picks up where last loop leaves off current_line += 1 # Scale relevant lines if line == '<event>\n': event_num += 1 event_line = 0 event_brem_lines = [] event_decay_lines = ['<event>\n'] if event_num % 1000 == 0: print( 'Reformatting event: {}'.format(event_num) ) else: event_line += 1 if 1 < event_line < 9: line = phys_form.rescaleLine(line, tokens=range(6,11)) # Event info line if event_line ==1: # Correct particle number event_brem_lines.append( phys_form.replaceNums(line, [0], [5]) ) event_decay_lines.append(phys_form.replaceNums(line, [0], [2]) ) elif event_line < 7: # If first 5 write to bremfile event_brem_lines.append(line) if event_line == 6: # Take note of Ap info for projection px,py,pz = [ float(v) for v in phys_form.numsInLine(line)[6:9] ] Ap_3mom = np.array((px,py,pz)) elif event_line < 9: # decay electrons # Null parents event_decay_lines.append( phys_form.replaceNums(line, [2,3], [-1,-1]) ) # Skip mgrwt. add appropriate vertex, and end event elif event_line == 16 : # Prepare vertex samples #x,y,z,t = next(Xs), next(Ys), next(Zs), next(decay_t)*(en/mAp) x,y,z,t = next(Xs), next(Ys), 0, next(decay_t) c_vertex = np.array( (x,y,z) ) d_vertex = c_vertex + Ap_3mom*phys_form.c_speed / mAp * t # If not in allowed z, don't write event if not (zmin < d_vertex[2] < zmax): continue nevents_used += 1 # Else, count event as used # If it is allowed, catch up the writing for ln in event_brem_lines: bremf.write(ln) for ln in event_decay_lines: decayf.write(ln) # Then add the verticies bremf.write( '#vertex {} {} {}\n'.format(x,y,z) ) decayf.write( '#vertex {} {} {} {}\n'.format(*d_vertex,t) ) decayvs.write( '{} {} {} {}\n'.format(*d_vertex,t) ) # And end event bremf.write(line) decayf.write(line) # End both elif line == '</LesHouchesEvents>\n': bremf.write(line) decayf.write(line) print(f'Using {nevents_used} events') return bremfile, decayfile, nevents_used
5,349,245
def bgr_to_rgba(image: Tensor, alpha_val: Union[float, Tensor]) -> Tensor: """Convert an image from BGR to RGBA. Args: image (Tensor[B, 3, H, W]): BGR Image to be converted to RGBA. alpha_val (float, Tensor[B, 1, H, W]): A float number or tensor for the alpha value. Returns: rgba (Tensor[B, 4, H, W]): RGBA version of the image. Notes: Current functionality is NOT supported by Torchscript. """ if not isinstance(alpha_val, (float, Tensor)): raise TypeError(f"`alpha_val` must be a `float` or `Tensor`. " f"But got: {type(alpha_val)}.") # Convert first to RGB, then add alpha channel rgb = bgr_to_rgb(image) rgba = rgb_to_rgba(rgb, alpha_val) return rgba
5,349,246
def d_matrix_1d(n, r, v): """Initializes the differentiation matrices on the interval. Args: n: The order of the polynomial. r: The nodal points. v: The Vandemonde matrix. Returns: The gradient matrix D. """ vr = grad_vandermonde_1d(n, r) return np.linalg.lstsq(v.T, vr.T, rcond=None)[0].T
5,349,247
def compile_replace(pattern, repl, flags=0): """Construct a method that can be used as a replace method for sub, subn, etc.""" call = None if pattern is not None and isinstance(pattern, RE_TYPE): if isinstance(repl, (compat.string_type, compat.binary_type)): repl = ReplaceTemplate(pattern, repl, bool(flags & FORMAT)) call = Replace( functools.partial(_apply_replace_backrefs, repl=repl), repl.use_format, repl.pattern_hash ) elif isinstance(repl, Replace): if flags: raise ValueError("Cannot process flags argument with a compiled pattern!") if repl.pattern_hash != hash(pattern): raise ValueError("Pattern hash doesn't match hash in compiled replace!") call = repl elif isinstance(repl, ReplaceTemplate): if flags: raise ValueError("Cannot process flags argument with a ReplaceTemplate!") call = Replace( functools.partial(_apply_replace_backrefs, repl=repl), repl.use_format, repl.pattern_hash ) else: raise TypeError("Not a valid type!") else: raise TypeError("Pattern must be a compiled regular expression!") return call
5,349,248
def python_2_unicode_compatible(klass): """ From Django A decorator that defines __unicode__ and __str__ methods under Python 2. Under Python 3 it does nothing. To support Python 2 and 3 with a single code base, define a __str__ method returning text and apply this decorator to the class. """ if six.PY2: # pragma: no cover if '__str__' not in klass.__dict__: raise ValueError("@python_2_unicode_compatible cannot be applied " "to %s because it doesn't define __str__()." % klass.__name__) klass.__unicode__ = klass.__str__ klass.__str__ = lambda self: self.__unicode__().encode('utf-8') return klass
5,349,249
def ParseNewPingMsg(msg): """Attempt to parse the message for a ping (in the new format). Return the request and response strings (json-ified dict) if parsing succeeded. Return None otherwise. """ parsed = re.match(kNewPingMsgRe, msg) if not parsed: return None try: return (parsed.group(1), parsed.group(2)) except IndexError as e: logging.warning('RE matched "%s", but extracted wrong numbers of items: %r' % (msg, e)) return None
5,349,250
def parse_main_dict(): """Parses dict to get the lists of countries, cities, and fakers. Fakers allow generation of region specific fake data. Also generates total number of agents """ Faker.seed(seed) # required to generate reproducible data countries = main_dict.keys() cities = [v['city'] for v in main_dict.values()] fakers = [Faker(v['faker_abbrev']) for v in main_dict.values()] total_agents = sum([v['number_of_agents'] for v in main_dict.values()]) return fakers, countries, cities, total_agents
5,349,251
def showMIinstructions(window, miType, holdTime): """Presents instructions for the traditional motor imagery (TMI) response. Parameters ---------- window : obj Psychopy window object. miType : string Type of motor imagery which needs to be prompted Could be one of: m-i - motor imagery l-i - laryngeal imagery m-a - motor movement l-a - laryngeal actuation """ if miType == 'm-i': responseInstr = ["imagine raising your right arm", "rest and wait"] elif miType == 'l-i': responseInstr = ["imagine making a humming sound", "rest and wait"] elif miType == 'm-a': responseInstr = ["raise your right arm", "rest and wait"] elif miType == 'l-a': responseInstr = ["make a humming sound", "rest and wait"] #present a prompt that asks the participant to think about raising their right arm for yes and left arm for no tmiPromptText_1 = f"For YES, {responseInstr[0]} for {str(holdTime)} seconds." tmiPromptStim_1 = visual.TextStim(win=window, text=tmiPromptText_1, pos=(0.5, 0), wrapWidth=0.5) tmiPromptText_2 = f"For NO, {responseInstr[1]} for {str(holdTime)} seconds." tmiPromptStim_2 = visual.TextStim(win=window, text=tmiPromptText_2, pos=(-0.5, 0), wrapWidth=0.5) tmiPromptStim_1.draw() tmiPromptStim_2.draw()
5,349,252
def format_payload(svalue): """formats mqtt payload""" data = {"idx": IDX, "nvalue": 0, "svalue": svalue} return json.dumps(data)
5,349,253
def load_auth_client(): """Create an AuthClient for the portal No credentials are used if the server is not production Returns ------- globus_sdk.ConfidentialAppAuthClient Client used to perform GlobusAuth actions """ _prod = True if _prod: app = globus_sdk.ConfidentialAppAuthClient(GLOBUS_CLIENT, GLOBUS_KEY) else: app = globus_sdk.ConfidentialAppAuthClient('', '') return app
5,349,254
def start_python_console(namespace=None, noipython=False): """Start Python console binded to the given namespace. If IPython is available, an IPython console will be started instead, unless `noipython` is True. Also, tab completion will be used on Unix systems. """ if namespace is None: namespace = {} try: try: # use IPython if available if noipython: raise ImportError import IPython try: IPython.embed(user_ns=namespace) except AttributeError: shell = IPython.Shell.IPShellEmbed(argv=[], user_ns=namespace) shell() except ImportError: import code try: # readline module is only available on unix systems import readline except ImportError: pass else: import rlcompleter readline.parse_and_bind("tab:complete") code.interact(banner='', local=namespace) except SystemExit: # raised when using exit() in python code.interact pass
5,349,255
def test_our_signal_object_method_returns_qobject(optional_name_argument): """qtrio._core.Signal instance provides access to signal-hosting QObject.""" class NotQObject: signal = qtrio.Signal(int, **optional_name_argument) instance = NotQObject() assert isinstance(NotQObject.signal.object(instance=instance), QtCore.QObject)
5,349,256
def DCGAN_discriminator(img_dim, nb_patch, bn_mode, model_name="DCGAN_discriminator", use_mbd=True): """ Discriminator model of the DCGAN args : img_dim (tuple of int) num_chan, height, width pretr_weights_file (str) file holding pre trained weights returns : model (keras NN) the Neural Net model """ list_input = [Input(shape=img_dim, name="disc_input_%s" % i) for i in range(nb_patch)] if K.image_dim_ordering() == "th": bn_axis = 1 else: bn_axis = -1 nb_filters = 64 nb_conv = int(np.floor(np.log(img_dim[1]) / np.log(2))) list_filters = [nb_filters * min(8, (2 ** i)) for i in range(nb_conv)] # First conv x_input = Input(shape=img_dim, name="discriminator_input") # x = Convolution2D(list_filters[0], 3, 3, subsample=(2, 2), name="disc_conv2d_1", border_mode="same")(x_input) # x = BatchNormalization(mode=bn_mode, axis=bn_axis)(x) # x = LeakyReLU(0.2)(x) x = MaxPooling2D( pool_size=(2, 2), strides=(2, 2))(x_input) x = Convolution2D( list_filters[0]/8, 1, 1, activation='relu', init='glorot_uniform', border_mode='same', name='disc_conv2d_1')(x) x = BatchNormalization(mode=bn_mode, axis=bn_axis)(x) e1 = Convolution2D( list_filters[0]/2, 1, 1, activation='relu', init='glorot_uniform', border_mode='same')(x) e2 = Convolution2D( list_filters[0]/2, 3, 3, activation='relu', init='glorot_uniform', border_mode='same')(x) x = merge( [e1, e2], mode='concat', concat_axis=bn_axis) # Next convs for i, f in enumerate(list_filters[1:]): name = "disc_conv2d_fire_%s" % (i + 2) # x = Convolution2D(f, 3, 3, subsample=(2, 2), name=name, border_mode="same")(x) # x = BatchNormalization(mode=bn_mode, axis=bn_axis)(x) # x = LeakyReLU(0.2)(x) x = MaxPooling2D( pool_size=(2, 2), strides=(2, 2))(x) x = Convolution2D( f/8, 1, 1, activation='relu', init='glorot_uniform', border_mode='same', name=name)(x) x = BatchNormalization(mode=bn_mode, axis=bn_axis)(x) e1 = Convolution2D( f/2, 1, 1, activation='relu', init='glorot_uniform', border_mode='same')(x) e2 = Convolution2D( f/2, 3, 3, activation='relu', init='glorot_uniform', border_mode='same')(x) x = merge( [e1, e2], mode='concat', concat_axis=bn_axis) x_flat = Flatten()(x) x = Dense(2, activation='softmax', name="disc_dense")(x_flat) PatchGAN = Model(input=[x_input], output=[x, x_flat], name="PatchGAN") print("PatchGAN summary") PatchGAN.summary() x = [PatchGAN(patch)[0] for patch in list_input] x_mbd = [PatchGAN(patch)[1] for patch in list_input] if len(x) > 1: x = merge(x, mode="concat", name="merge_feat") else: x = x[0] if use_mbd: if len(x_mbd) > 1: x_mbd = merge(x_mbd, mode="concat", name="merge_feat_mbd") else: x_mbd = x_mbd[0] num_kernels = 100 dim_per_kernel = 5 M = Dense(num_kernels * dim_per_kernel, bias=False, activation=None) MBD = Lambda(minb_disc, output_shape=lambda_output) x_mbd = M(x_mbd) x_mbd = Reshape((num_kernels, dim_per_kernel))(x_mbd) x_mbd = MBD(x_mbd) x = merge([x, x_mbd], mode='concat') x_out = Dense(2, activation="softmax", name="disc_output")(x) discriminator_model = Model(input=list_input, output=[x_out], name=model_name) return discriminator_model
5,349,257
def common_stat_style(): """ The common style for info statistics. Should be used in a dash component className. Returns: (str): The style to be used in className. """ return "has-margin-right-10 has-margin-left-10 has-text-centered has-text-weight-bold"
5,349,258
def TourType_LB_rule(M, t): """ Lower bound on tour type :param M: Model :param t: tour type :return: Constraint rule """ return sum(M.TourType[i, t] for (i, s) in M.okTourType if s == t) >= M.tt_lb[t]
5,349,259
def build_auto_dicts(jsonfile): """Build auto dictionaries from json""" dicts = {} with open(jsonfile, "r") as jsondata: data = json.load(jsondata) for dicti in data: partialstr = data[dicti]["partial"] partial = bool(partialstr == "True") dictlist = data[dicti]["list"] autodict = AuDict(partial) tag = get_tag(dicti) autodict.set_base_tag(tag) for dictdata in dictlist: value = dictdata["value"] applicants = dictdata["applicants"] autodict.add_auto_value(value, applicants) dicts[tag.tag] = autodict return dicts
5,349,260
def print_status(message, status, status_text=None): """ Prints a status message in the form: <message>\t[<status>]. The first part of the status message is any message passed as a parameter. Following this with a tab distance and in boxed brackets is a status message text in a colour based on the specified status: - Status.SUCCESS: - text: "SUCCESS" - colour: BackgroundColours.SUCCESS (green) - Status.ERROR: - text: "FAILURE" - colour: BackgroundColours.ERROR (red) - Status.WARNING: - text: "WARNING" - colour: BackgroundColours.WARNING (yellow) - Unknown/else - text: "UNKNOWN" - colour: BackgroundColours.BLUE (blue) The default message can be overridden by specifying the additional parameter. :param message: str The message. :param status: Status constant The status. :param status_text: str If the value is not None this text overrides the default status message in the boxed brackets. """ if status == Status.SUCCESS: if status_text is None: status_text = "SUCCESS" status_colour = BackgroundColours.SUCCESS elif status == Status.ERROR: if status_text is None: status_text = "FAILURE" status_colour = BackgroundColours.ERROR elif status == Status.WARNING: if status_text is None: status_text = "WARNING" status_colour = BackgroundColours.WARNING else: if status_text is None: status_text = "UNKNOWN" status_colour = BackgroundColours.BLUE status_indicator = message_with_colour(status_text, status_colour) print(message, "\t", f"[{status_indicator}]")
5,349,261
def log_gammainv_pdf(x, a, b): """ log density of the inverse gamma distribution with shape a and scale b, at point x, using Stirling's approximation for a > 100 """ return a * np.log(b) - sp.gammaln(a) - (a + 1) * np.log(x) - b / x
5,349,262
def read_basin() -> gpd.GeoDataFrame: """Read the basin shapefile.""" basin = gpd.read_file(Path(ROOT, "HCDN_nhru_final_671.shp")) basin = basin.to_crs("epsg:4326") basin["hru_id"] = basin.hru_id.astype(str).str.zfill(8) return basin.set_index("hru_id").geometry
5,349,263
def cross_validate(estimator: BaseEstimator, X: np.ndarray, y: np.ndarray, scoring: Callable[[np.ndarray, np.ndarray, ...], float], cv: int = 5) -> Tuple[float, float]: """ Evaluate metric by cross-validation for given estimator Parameters ---------- estimator: BaseEstimator Initialized estimator to use for fitting the data X: ndarray of shape (n_samples, n_features) Input data to fit y: ndarray of shape (n_samples, ) Responses of input data to fit to scoring: Callable[[np.ndarray, np.ndarray, ...], float] Callable to use for evaluating the performance of the cross-validated model. When called, the scoring function receives the true- and predicted values for each sample and potentially additional arguments. The function returns the score for given input. cv: int Specify the number of folds. Returns ------- train_score: float Average train score over folds validation_score: float Average validation score over folds """ # raise NotImplementedError() # k_foldes = KFold(n_splits=cv) # k_foldes.get_n_splits(X) # # for train_index in k_foldes.split(X): # X, y = X[train_index], y[train_index] # m = y.size # shuffled_inds = np.arange(m) # np.random.shuffle(shuffled_inds) # X_shuffled, y_shuffled = X.astype('float64'), y.astype('float64') # kf_X = np.array_split(X_shuffled, 5, axis=0) # kf_y = np.array_split(y_shuffled, 5, axis=0) # kf_X = np.array_split(X, cv, axis=0) # kf_y = np.array_split(y, cv, axis=0) # # # for param in range(k): # what is k? # X_wo_fold = np.concatenate(kf_X[1:]) # y_wo_fold = np.concatenate(kf_y[1:]) # train_scores = [] # validation_score = [] # for fold in range(cv): # cur_fold = kf_X[fold] # cur_fold_y = kf_y[fold] # if len(kf_y[fold+1:]) == 0: # X_wo_fold = np.concatenate(kf_X[:-1]) # y_wo_fold = np.concatenate(kf_y[:-1]) # elif len(kf_X[:fold]) != 0: # X_wo_fold1, X_wo_fold2 = np.concatenate(kf_X[:fold]), np.concatenate(kf_X[fold+1:]) # X_wo_fold = np.concatenate((X_wo_fold1, X_wo_fold2)) # y_wo_fold1, y_wo_fold2 = np.concatenate(kf_y[:fold]), np.concatenate(kf_y[fold+1:]) # y_wo_fold = np.concatenate((y_wo_fold1, y_wo_fold2)) # h_i = estimator.fit(X_wo_fold.flatten(), y_wo_fold) # y_pred_test = h_i.predict(cur_fold.flatten()) # y_pred_train = h_i.predict(X_wo_fold.flatten()) # cur_train_score = scoring(y_wo_fold, y_pred_train) # train_scores.append(cur_train_score) # cur_validation_score = scoring(cur_fold_y, y_pred_test) # validation_score.append(cur_validation_score) # # return np.mean(train_scores), np.mean(validation_score) X = X.flatten() y = y.flatten() kf_X = np.array_split(X, cv, axis=0) kf_y = np.array_split(y, cv, axis=0) # for param in range(k): # what is k? X_wo_fold = np.concatenate(kf_X[1:]) y_wo_fold = np.concatenate(kf_y[1:]) train_scores = [] validation_score = [] for fold in range(cv): cur_fold = kf_X[fold] cur_fold_y = kf_y[fold] if len(kf_y[fold + 1:]) == 0: X_wo_fold = np.concatenate(kf_X[:-1]) y_wo_fold = np.concatenate(kf_y[:-1]) elif len(kf_X[:fold]) != 0: X_wo_fold1, X_wo_fold2 = np.concatenate( kf_X[:fold]), np.concatenate(kf_X[fold + 1:]) X_wo_fold = np.concatenate((X_wo_fold1, X_wo_fold2)) y_wo_fold1, y_wo_fold2 = np.concatenate( kf_y[:fold]), np.concatenate(kf_y[fold + 1:]) y_wo_fold = np.concatenate((y_wo_fold1, y_wo_fold2)) h_i = estimator.fit(X_wo_fold, y_wo_fold) y_pred_test = h_i.predict(cur_fold) y_pred_train = h_i.predict(X_wo_fold) cur_train_score = scoring(y_wo_fold, y_pred_train) train_scores.append(cur_train_score) cur_validation_score = scoring(cur_fold_y, y_pred_test) validation_score.append(cur_validation_score) return np.mean(train_scores), np.mean(validation_score)
5,349,264
def unpack_request(environ, content_length=0): """ Unpacks a get or post request query string. :param environ: whiskey application environment. :return: A dictionary with parameters. """ data = None if environ["REQUEST_METHOD"] == "GET": data = unpack_get(environ) elif environ["REQUEST_METHOD"] == "POST": data = unpack_post(environ, content_length) logger.debug("read request data: %s", data) return data
5,349,265
def GetMembership(name, release_track=None): """Gets a Membership resource from the GKE Hub API. Args: name: the full resource name of the membership to get, e.g., projects/foo/locations/global/memberships/name. release_track: the release_track used in the gcloud command, or None if it is not available. Returns: a Membership resource Raises: apitools.base.py.HttpError: if the request returns an HTTP error """ client = gkehub_api_util.GetApiClientForTrack(release_track) return client.projects_locations_memberships.Get( client.MESSAGES_MODULE.GkehubProjectsLocationsMembershipsGetRequest( name=name))
5,349,266
def receive_messages(queue, max_number, wait_time): """ Receive a batch of messages in a single request from an SQS queue. Usage is shown in usage_demo at the end of this module. :param queue: The queue from which to receive messages. :param max_number: The maximum number of messages to receive. The actual number of messages received might be less. :param wait_time: The maximum time to wait (in seconds) before returning. When this number is greater than zero, long polling is used. This can result in reduced costs and fewer false empty responses. :return: The list of Message objects received. These each contain the body of the message and metadata and custom attributes. """ try: messages = queue.receive_messages( MessageAttributeNames=['All'], MaxNumberOfMessages=max_number, WaitTimeSeconds=wait_time ) for msg in messages: logger.info("Received message: %s: %s", msg.message_id, msg.body) request = extract_request(msg.message_attributes) recommendations = get_recommendations(request) send_to_sns(request, recommendations) except ClientError as error: logger.exception("Couldn't receive messages from queue: %s", queue) raise error else: return messages
5,349,267
def get_snmp_community(device, find_filter=None): """Retrieves snmp community settings for a given device Args: device (Device): This is the device object of an NX-API enabled device using the Device class community (str): optional arg to filter out this specific community Returns: dictionary """ command = 'show snmp community' data = device.show(command) data_dict = xmltodict.parse(data[1]) c_dict = {} try: comm_table = data_dict['ins_api']['outputs']['output']['body'].get( 'TABLE_snmp_community')['ROW_snmp_community'] for each in comm_table: community = {} key = str(each['community_name']) community['group'] = str(each['grouporaccess']) community['acl'] = str(each['aclfilter']) c_dict[key] = community except (TypeError): community = {} key = str(each['community_name']) community['group'] = str(comm_table['grouporaccess']) community['acl'] = str(comm_table['aclfilter']) c_dict[key] = community except (KeyError, AttributeError): return c_dict if find_filter: find = c_dict.get(find_filter, None) if find_filter is None or find is None: return {} else: return find
5,349,268
def create_input_chunks(cs, partition, data_dir, file_format): """ cs: chunk shape file_format: file format data_dir: to store the file """ if file_format == "HDF5": file_manager = HDF5_manager() else: print("File format not supported yet. Aborting...") sys.exit(1) print(f"Creating input chunks at {data_dir}") create_empty_dir(data_dir) _slices = ((0,cs[0]), (0,cs[1]), (0,cs[2])) for i in range(partition[0]): for j in range(partition[1]): for k in range(partition[2]): print(f"Creating random array... shape: {cs}") arr = da.random.uniform(size=cs) print(f"Done, converting to float16...") arr = arr.astype(np.float16) out_filename = f'{i}_{j}_{k}.hdf5' print(f"Building {out_filename} with shape {cs}") outfilepath = os.path.join(data_dir, out_filename) print(f"Storing...") da.to_hdf5(outfilepath, '/data', arr, chunks=None, compression=None) # data = np.random.uniform(size=cs) # file_manager.write_data(i, j, k, data_dir, data, _slices, cs)
5,349,269
def get_header_size(tif): """ Gets the header size of a GeoTIFF file in bytes. The code used in this function and its helper function `_get_block_offset` were extracted from the following source: https://github.com/OSGeo/gdal/blob/master/swig/python/gdal-utils/osgeo_utils/samples/validate_cloud_optimized_geotiff.py Copyright (c) 2017, Even Rouault Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. Parameters ---------- tif: str A path to a GeoTIFF file of the currently processed NRB product. Returns ------- header_size: int The size of all IFD headers of the GeoTIFF file in bytes. """ def _get_block_offset(band): blockxsize, blockysize = band.GetBlockSize() for y in range(int((band.YSize + blockysize - 1) / blockysize)): for x in range(int((band.XSize + blockxsize - 1) / blockxsize)): block_offset = band.GetMetadataItem('BLOCK_OFFSET_%d_%d' % (x, y), 'TIFF') if block_offset: return int(block_offset) return 0 details = {} ds = gdal.Open(tif) main_band = ds.GetRasterBand(1) ovr_count = main_band.GetOverviewCount() block_offset = _get_block_offset(band=main_band) details['data_offsets'] = {} details['data_offsets']['main'] = block_offset for i in range(ovr_count): ovr_band = ds.GetRasterBand(1).GetOverview(i) block_offset = _get_block_offset(band=ovr_band) details['data_offsets']['overview_%d' % i] = block_offset headers_size = min(details['data_offsets'][k] for k in details['data_offsets']) if headers_size == 0: headers_size = gdal.VSIStatL(tif).size return headers_size
5,349,270
def format_x_ticks_as_dates(plot): """Formats x ticks YYYY-MM-DD and removes the default 'Date' label. Args: plot: matplotlib.AxesSubplot object. """ plot.xaxis.set_major_formatter(mpl.dates.DateFormatter('%Y-%m-%d')) plot.get_xaxis().get_label().set_visible(False) return plot
5,349,271
def operations(): """Gets the base class for the operations class. We have to use the configured base back-end's operations class for this. """ return base_backend_instance().ops.__class__
5,349,272
def send_data_without_firstname(page_new_contact, the_faker) -> None: """I send the data without the firstname.""" page_new_contact.contact = ContactFakerFactory(_faker=the_faker).initialize(config={}) p_action = ContactWriteAction(_page=page_new_contact) p_action.fill_lastname().fill_phone() del p_action p_action = ContactCreateAction(_page=page_new_contact) p_action.click()
5,349,273
def office_convert_get_page(request, repo_id, commit_id, path, filename): """Valid static file path inclueds: - index.html for spreadsheets and index_html_xxx.png for images embedded in spreadsheets - 77e168722458356507a1f373714aa9b575491f09.pdf """ if not HAS_OFFICE_CONVERTER: raise Http404 if not _OFFICE_PAGE_PATTERN.match(filename): return HttpResponseForbidden() path = '/' + path file_id = _office_convert_get_file_id(request, repo_id, commit_id, path) if filename.endswith('.pdf'): filename = "{0}.pdf".format(file_id) if CLUSTER_MODE: resp = cluster_get_office_converted_page(path, filename, file_id) else: resp = get_office_converted_page(request, filename, file_id) if filename.endswith('.page'): content_type = 'text/html' else: content_type = mimetypes.guess_type(filename)[0] or 'text/html' resp['Content-Type'] = content_type return resp
5,349,274
def combine_res_work_dcfc_lcoc(res_lcoc_file = 'outputs/cost-of-charging/residential/res_states_baseline.csv', wrk_lcoc_file = 'outputs/cost-of-charging/workplace-public-l2/work_pub_l2_states_baseline.csv', dcfc_lcoc_file = 'outputs/cost-of-charging/dcfc/dcfc_states_baseline.csv', res_wgt = 0.81, wrk_wgt = 0.14, dcfc_wgt = 0.05, outfile = 'outputs/cost-of-charging/comb/comb_states_baseline.csv'): """ Combines the LCOC for the three EV charging sites by the weights, res_wgt, wrk_gt, dcfc_wgt and outputs to outfile. """ assert res_wgt + wrk_wgt + dcfc_wgt == 1.0, "Sum of weights must be exactly 1.0!" # Load data, standardize res_df = pd.read_csv(res_lcoc_file) res_df.rename(columns={'lcoc_cost_per_kwh': 'res_lcoc'}, inplace=True) wrk_df = pd.read_csv(wrk_lcoc_file) wrk_df.rename(columns={'lcoc_cost_per_kwh': 'wrk_lcoc'}, inplace=True) dcfc_df = pd.read_csv(dcfc_lcoc_file) dcfc_df.rename(columns={'State': 'state', 'comb_lcoc': 'dcfc_lcoc'}, inplace=True) dcfc_df = dcfc_df[['state', 'dcfc_lcoc']] # Merge datasets comb_df = res_df.merge(wrk_df, how='inner', on='state') comb_df = comb_df.merge(dcfc_df, how='inner', on='state') # Calculate comb LCOC, write to file comb_df['lcoc_cost_per_kwh'] = comb_df['res_lcoc'] * res_wgt + comb_df['wrk_lcoc'] * wrk_wgt + comb_df['dcfc_lcoc'] * dcfc_wgt comb_df = comb_df[['state', 'lcoc_cost_per_kwh']] comb_df.to_csv(outfile, index=False) nat_lcoc = round(float(comb_df[comb_df.state=='US']['lcoc_cost_per_kwh']), 2) print("Combined LCOC calculation complete, national LCOC is ${}/kWh".format(nat_lcoc))
5,349,275
def tensorize_data( uvdata, corr_inds, ants_map, polarization, time, data_scale_factor=1.0, weights=None, nsamples_in_weights=False, dtype=np.float32, ): """Convert data in uvdata object to a tensor Parameters ---------- uvdata: UVData object UVData object containing data, flags, and nsamples to tensorize. corr_inds: list list of list of lists of 2-tuples. Hierarchy of lists is chunk group baseline - (int 2-tuple) ants_map: dict mapping integers to integers map between each antenna number to a unique index between 0 and Nants_data (typically the index of each antenna in ants_map) polarization: str pol-str of gain to extract. time: float time of data to convert to tensor. data_scale_factor: float, optional overall scaling factor to divide tensorized data by. default is 1.0 weights: UVFlag object, optional UVFlag weights object containing weights to use for data fitting. default is None -> use nsamples * ~flags if nsamples_in_weights or ~flags if not nsamples_in_weights nsamples_in_weights: bool, optional If True and weights is None, generate weights proportional to nsamples. default is False. dtype: numpy.dtype data-type to store in tensor. default is np.float32 Returns ------- data_r: list of tf.Tensor objects list of tf.Tensor objects. Each tensor has shape (ngrps, nbls, nfreqs) where ngrps, nbls are the dimensions of each sublist in corr_inds and contain the real components of the baselines specified by these 2-tuples. data_i: list of tf.Tensor objects list of tf.Tensor objects. Each tensor has shape (ngrps, nbls, nfreqs) where ngrps, nbls are the dimensions of each sublist in corr_inds and contain the imag components of the baselines specified by these 2-tuples. wgts: tf.Tensor object list of tf.Tensor objects. Each tensor has shape (ngrps, nbls, nfreqs) where ngrps, nbls are the dimensions of each sublist in corr_inds and contain the weights of the baselines specified by these 2-tuples. """ ants_map_inv = {ants_map[i]: i for i in ants_map} dshape = (uvdata.Nants_data, uvdata.Nants_data, uvdata.Nfreqs) data_r = np.zeros(dshape, dtype=dtype) data_i = np.zeros_like(data_r) wgts = np.zeros_like(data_r) wgtsum = 0.0 for chunk in corr_inds: for fitgrp in chunk: for (i, j) in fitgrp: ap = ants_map_inv[i], ants_map_inv[j] bl = ap + (polarization,) dinds1, dinds2, pol_ind = uvdata._key2inds(bl) if len(dinds1) > 0: dinds = dinds1 conjugate = False pol_ind = pol_ind[0] else: dinds = dinds2 conjugate = True pol_ind = pol_ind[1] dind = dinds[np.where(np.isclose(uvdata.time_array[dinds], time, rtol=0.0, atol=1e-7))[0][0]] data = uvdata.data_array[dind, 0, :, pol_ind].squeeze() iflags = ~uvdata.flag_array[dind, 0, :, pol_ind].squeeze() nsamples = uvdata.nsample_array[dind, 0, :, pol_ind].squeeze() data /= data_scale_factor if conjugate: data = np.conj(data) data_r[i, j] = data.real.astype(dtype) data_i[i, j] = data.imag.astype(dtype) if weights is None: wgts[i, j] = iflags if nsamples_in_weights: wgts[i, j] *= nsamples else: if ap in weights.get_antpairs(): dinds = weights.antpair2ind(*ap) else: dinds = weights.antpair2ind(*ap[::-1]) dind = dinds[np.where(np.isclose(weights.time_array[dinds], time, atol=1e-7, rtol=0.0))[0][0]] polnum = np.where( weights.polarization_array == uvutils.polstr2num(polarization, x_orientation=weights.x_orientation) )[0][0] wgts[i, j] = weights.weights_array[dind, 0, :, polnum].astype(dtype) * iflags if nsamples_in_weights: wgts[i, j] *= nsamples wgtsum += np.sum(wgts[i, j]) data_r = tf.convert_to_tensor(data_r, dtype=dtype) data_i = tf.convert_to_tensor(data_i, dtype=dtype) wgts = tf.convert_to_tensor(wgts / wgtsum, dtype=dtype) nchunks = len(corr_inds) data_r = [tf.gather_nd(data_r, corr_inds[cnum]) for cnum in range(nchunks)] data_i = [tf.gather_nd(data_i, corr_inds[cnum]) for cnum in range(nchunks)] wgts = [tf.gather_nd(wgts, corr_inds[cnum]) for cnum in range(nchunks)] return data_r, data_i, wgts
5,349,276
def _normalize_rows(t, softmax=False): """ Normalizes the rows of a tensor either using a softmax or just plain division by row sums Args: t (:obj:`batch_like`) Returns: Normalized version of t where rows sum to 1 """ if not softmax: # EPSILON hack avoids occasional NaNs row_sums = torch.sum(t, len(t.size())-1, keepdim=True) + EPSILON #return torch.exp(torch.log(t)-torch.log(row_sums+EPSILON).expand_as(t)) return torch.div(t, row_sums.expand_as(t)) else: s = nn.Softmax() return s(t.view(-1, t.size(len(t.size())-1))).view(t.size())
5,349,277
def calculate_baselines(baselines: pd.DataFrame) -> dict: """ Read a file that contains multiple runs of the same pair. The format of the file must be: workload id, workload argument, run number, tFC, tVM This function calculates the average over all runs of each unique pair of workload id and workload argument. """ if type(baselines) is not pd.DataFrame: raise TypeError("calculate_baselines: invalid object type passed.") processed_baselines = {} distinct_workloads = baselines[COLUMN_WORKLOAD].unique() for workload in distinct_workloads: # Filter for current workload workload_baseline = baselines.loc[baselines[COLUMN_WORKLOAD] == workload] # Get all the arguments workload_arguments = workload_baseline[COLUMN_ARGUMENT].unique() if workload not in processed_baselines: processed_baselines[workload] = {} for argument in workload_arguments: workload_argument_baseline = workload_baseline.loc[ workload_baseline[COLUMN_ARGUMENT] == argument] # Calculate the means of the timings for the workload-argument pair tVM = round(workload_argument_baseline[COLUMN_TIMEVM].mean()) tFC = round(workload_argument_baseline[COLUMN_TIMEFC].mean()) processed_baselines[workload][argument] = [tFC, tVM] return processed_baselines
5,349,278
def pick_ind(x, minmax): """ Return indices between minmax[0] and minmax[1]. Args: x : Input vector minmax : Minimum and maximum values Returns: indices """ return (x >= minmax[0]) & (x <= minmax[1])
5,349,279
def removedir(dirpath): """Remove dir if it does exist and is a dir.""" if exists(dirpath) and isdir(dirpath): shutil.rmtree(dirpath)
5,349,280
def read_files(file_prefix,start=0,end=100,nfmt=3,pixel_map=None): """ read files that have a numerical suffix """ images = [] format = '%' + str(nfmt) + '.' + str(nfmt) + 'd' for j in range(start,end+1): ext = format % j file = file_prefix + '_' + ext + '.tif' arr = read(file,pixel_map=pixel_map) images.append(arr) return images
5,349,281
def check_geometry(geometry): """ Checks if a node is valid geometry node and raise and exception if the node is not valid :param geometry: str, name of the node to be checked :return: bool, True if the give node is a geometry node """ if not is_geometry(geometry): raise exceptions.GeometryException(geometry)
5,349,282
def test_table_exists(db: DataBaseConn, create_table): """Test return True if table exists.""" assert db.table_exists("test")
5,349,283
def describing_function( F, A, num_points=100, zero_check=True, try_method=True): """Numerical compute the describing function of a nonlinear function The describing function of a nonlinearity is given by magnitude and phase of the first harmonic of the function when evaluated along a sinusoidal input :math:`A \\sin \\omega t`. This function returns the magnitude and phase of the describing function at amplitude :math:`A`. Parameters ---------- F : callable The function F() should accept a scalar number as an argument and return a scalar number. For compatibility with (static) nonlinear input/output systems, the output can also return a 1D array with a single element. If the function is an object with a method `describing_function` then this method will be used to computing the describing function instead of a nonlinear computation. Some common nonlinearities use the :class:`~control.DescribingFunctionNonlinearity` class, which provides this functionality. A : array_like The amplitude(s) at which the describing function should be calculated. zero_check : bool, optional If `True` (default) then `A` is zero, the function will be evaluated and checked to make sure it is zero. If not, a `TypeError` exception is raised. If zero_check is `False`, no check is made on the value of the function at zero. try_method : bool, optional If `True` (default), check the `F` argument to see if it is an object with a `describing_function` method and use this to compute the describing function. More information in the `describing_function` method for the :class:`~control.DescribingFunctionNonlinearity` class. Returns ------- df : array of complex The (complex) value of the describing function at the given amplitudes. Raises ------ TypeError If A[i] < 0 or if A[i] = 0 and the function F(0) is non-zero. """ # If there is an analytical solution, trying using that first if try_method and hasattr(F, 'describing_function'): try: return np.vectorize(F.describing_function, otypes=[complex])(A) except NotImplementedError: # Drop through and do the numerical computation pass # # The describing function of a nonlinear function F() can be computed by # evaluating the nonlinearity over a sinusoid. The Fourier series for a # static nonlinear function evaluated on a sinusoid can be written as # # F(A\sin\omega t) = \sum_{k=1}^\infty M_k(A) \sin(k\omega t + \phi_k(A)) # # The describing function is given by the complex number # # N(A) = M_1(A) e^{j \phi_1(A)} / A # # To compute this, we compute F(A \sin\theta) for \theta between 0 and 2 # \pi, use the identities # # \sin(\theta + \phi) = \sin\theta \cos\phi + \cos\theta \sin\phi # \int_0^{2\pi} \sin^2 \theta d\theta = \pi # \int_0^{2\pi} \cos^2 \theta d\theta = \pi # # and then integrate the product against \sin\theta and \cos\theta to obtain # # \int_0^{2\pi} F(A\sin\theta) \sin\theta d\theta = M_1 \pi \cos\phi # \int_0^{2\pi} F(A\sin\theta) \cos\theta d\theta = M_1 \pi \sin\phi # # From these we can compute M1 and \phi. # # Evaluate over a full range of angles (leave off endpoint a la DFT) theta, dtheta = np.linspace( 0, 2*np.pi, num_points, endpoint=False, retstep=True) sin_theta = np.sin(theta) cos_theta = np.cos(theta) # See if this is a static nonlinearity (assume not, just in case) if not hasattr(F, '_isstatic') or not F._isstatic(): # Initialize any internal state by going through an initial cycle for x in np.atleast_1d(A).min() * sin_theta: F(x) # ignore the result # Go through all of the amplitudes we were given retdf = np.empty(np.shape(A), dtype=complex) df = retdf # Access to the return array df.shape = (-1, ) # as a 1D array for i, a in enumerate(np.atleast_1d(A)): # Make sure we got a valid argument if a == 0: # Check to make sure the function has zero output with zero input if zero_check and np.squeeze(F(0.)) != 0: raise ValueError("function must evaluate to zero at zero") df[i] = 1. continue elif a < 0: raise ValueError("cannot evaluate describing function for A < 0") # Save the scaling factor to make the formulas simpler scale = dtheta / np.pi / a # Evaluate the function along a sinusoid F_eval = np.array([F(x) for x in a*sin_theta]).squeeze() # Compute the prjections onto sine and cosine df_real = (F_eval @ sin_theta) * scale # = M_1 \cos\phi / a df_imag = (F_eval @ cos_theta) * scale # = M_1 \sin\phi / a df[i] = df_real + 1j * df_imag # Return the values in the same shape as they were requested return retdf
5,349,284
def _read_point(asset: str, *args, **kwargs) -> List: """Read pixel value at a point from an asset""" with COGReader(asset) as cog: return cog.point(*args, **kwargs)
5,349,285
def test_copy(): """Ensure that the copy method makes a proper copy of the fermentable.""" recipe = RecipeStub() original = Fermentable( recipe=recipe, name='Test', amount=MassType(1, 'lb'), ftype='Grain', group='Smoked', producer='Crisp', origin='UK', fyield=PercentType(68, '%'), color=ColorType(45, 'SRM'), moisture=PercentType(3, '%'), diastaticPower=DiastaticPowerType(4, 'Lintner'), addAfterBoil=False, mashed=True, notes='A note', phi=5.6, bi=43.2 ) newRecipe = RecipeStub() copy = original.copy(newRecipe) assert isinstance(copy, Fermentable) assert copy.recipe == newRecipe assert copy.name == 'Test' assert isinstance(copy.amount, MassType) assert copy.amount is not original.amount # Should be a new instance of MassType. assert copy.amount.lb == 1 assert copy.ftype == 'Grain' assert copy.group == 'Smoked' assert copy.producer == 'Crisp' assert copy.origin == 'UK' assert copy.fyield is not original.fyield assert copy.fyield.percent == 68 assert copy.color is not original.color assert copy.color.SRM == 45 assert copy.moisture is not original.moisture assert copy.moisture.percent == 3 assert copy.diastaticPower is not original.diastaticPower assert copy.diastaticPower.Lintner == 4 assert copy.addAfterBoil is not None assert not copy.addAfterBoil assert copy.mashed is not None assert copy.mashed assert copy.notes == 'A note' assert copy.phi == 5.6 assert copy.bi == 43.2
5,349,286
def get_unquoted_text(token): """ :param token: Token :return: String """ if isinstance(token, UnquotedText): return token.value() else: raise exceptions.BugOrBroken( "tried to get unquoted text from " + token)
5,349,287
def image2tensor(image: np.ndarray, range_norm: bool, half: bool) -> torch.Tensor: """Convert ``PIL.Image`` to Tensor. Args: image (np.ndarray): The image data read by ``PIL.Image`` range_norm (bool): Scale [0, 1] data to between [-1, 1] half (bool): Whether to convert torch.float32 similarly to torch.half type. Returns: Normalized image data Examples: >>> image = cv2.imread("image.bmp", cv2.IMREAD_UNCHANGED).astype(np.float32) / 255. >>> tensor_image = image2tensor(image, range_norm=False, half=False) """ tensor = F.to_tensor(image) if range_norm: tensor = tensor.mul_(2.0).sub_(1.0) if half: tensor = tensor.half() return tensor
5,349,288
def panda_four_load_branch(): """ This function creates a simple six bus system with four radial low voltage nodes connected to \ a medium valtage slack bus. At every low voltage node the same load is connected. RETURN: **net** - Returns the required four load system EXAMPLE: import pandapower.networks as pn net_four_load = pn.panda_four_load_branch() """ pd_net = pp.create_empty_network() busnr1 = pp.create_bus(pd_net, name="bus1", vn_kv=10.) busnr2 = pp.create_bus(pd_net, name="bus2", vn_kv=.4) busnr3 = pp.create_bus(pd_net, name="bus3", vn_kv=.4) busnr4 = pp.create_bus(pd_net, name="bus4", vn_kv=.4) busnr5 = pp.create_bus(pd_net, name="bus5", vn_kv=.4) busnr6 = pp.create_bus(pd_net, name="bus6", vn_kv=.4) pp.create_ext_grid(pd_net, busnr1) pp.create_transformer(pd_net, busnr1, busnr2, std_type="0.25 MVA 10/0.4 kV") pp.create_line(pd_net, busnr2, busnr3, name="line1", length_km=0.05, std_type="NAYY 4x120 SE") pp.create_line(pd_net, busnr3, busnr4, name="line2", length_km=0.05, std_type="NAYY 4x120 SE") pp.create_line(pd_net, busnr4, busnr5, name="line3", length_km=0.05, std_type="NAYY 4x120 SE") pp.create_line(pd_net, busnr5, busnr6, name="line4", length_km=0.05, std_type="NAYY 4x120 SE") pp.create_load(pd_net, busnr3, 30, 10) pp.create_load(pd_net, busnr4, 30, 10) pp.create_load(pd_net, busnr5, 30, 10) pp.create_load(pd_net, busnr6, 30, 10) return pd_net
5,349,289
def word_flipper(our_string): """ Flip the individual words in a sentence Args: our_string(string): Strings to have individual words flip Returns: string: String with words flipped """ word_list = our_string.split(" ") for idx in range(len(word_list)): word_list[idx] = word_list[idx][::-1] # [index1:index2:step] return " ".join(word_list)
5,349,290
def load_json(ctx, param, value): """Decode and load json for click option.""" value = value[1:] return json.loads(base64.standard_b64decode(value).decode())
5,349,291
def hyperparam_search(model_config, train, test): """Perform hyperparameter search using Bayesian optimization on a given model and dataset. Args: model_config (dict): the model and the parameter ranges to search in. Format: { "name": str, "model": sklearn.base.BaseEstimator, "params": dict } train (pandas.DataFrame): training data test (pandas.DataFrame): test data """ X_train = train.drop("label", axis=1) y_train = train.label X_test = test.drop("label", axis=1) y_test = test.label opt = BayesSearchCV( model_config["model"], model_config["params"], n_jobs=4, cv=5, random_state=RANDOM_SEED, ) opt.fit(X_train, y_train) acc = opt.score(X_test, y_test) print(f"{model_config['name']} results:") print(f"Best validation accuracy: {opt.best_score_}") print(f"Test set accuracy: {acc}") print(f"Best parameters:") for param, value in opt.best_params_.items(): print(f"- {param}: {value}") return { "name": model_config["name"], "class": model_config["class"], "model": opt.best_estimator_, "params": opt.best_params_, "score": acc, }
5,349,292
def sendNotification(email, asset, dom_ids, cve_ids): """Send email notification about new scan results.""" sender = settings.EMAIL_HOST_USER cves = [v['name'] for v in VulnInstance.objects.filter(id__in=cve_ids).values('name')] doms = [d['fqdn'] for d in DomainInstance.objects.filter(id__in=dom_ids).values('fqdn')] changes = [] if len(doms) == 1: changes.append("domain") elif len(doms) > 1: changes.append("domains") if len(cves) == 1: changes.append("vulnerability") elif len(cves) > 1: changes.append("vulnerabilities") subject = f"[Pulsar] New results for {asset}" body = f"<html></body><h2>New {' and '.join(changes)} spotted on {asset}</h2>\n<hr />" body += "<h2>Last scan have identified:</h2>\n" for change in changes: if 'dom' in change: body += f"\n<h3><p><strong>{str(len(doms))}</strong> new {change}:</p></h3>\n" body += '<table>' body += '<tbody>' for dom in doms: body += '<tr>' body += f"<td><big>{dom}</big></td>\n" body += '</tr>' body += '</tbody>' body += '</table>\n' elif 'vuln' in change: body += f"<h3><p><strong>{str(len(cves))}</strong> new {change}:</p></h3>\n" body += '<table>' body += '<tbody>' for cve in cves: body += '<tr>' body += f"<td><big>{cve}</big></td>\n" body += '</tr>' body += '</tbody>' body += '</table>\n' body += '<p><em><span style="color: #808080;">' body += 'All findings are prone to <strong>false-positives</strong>. ' body += 'Plese log in to OpenOSINT dashboard to verify and mark them as such if needed. ' body += 'All marked items will be omitted in future scans.</span>\n</em></p></body></html>' plain = strip_tags(body) send_mail(subject, plain, sender, [email, ], html_message=body, fail_silently=False)
5,349,293
def profile_tags(profile): """ Get the tags from a given security profile. """ # TODO: This is going to be a no-op now, so consider removing it. return profile.id.split('_')
5,349,294
def _sawtooth_wave_samples(freq, rate, amp, num): """ Generates a set of audio samples taken at the given sampling rate representing a sawtooth wave oscillating at the given frequency with the given amplitude lasting for the given duration. :param float freq The frequency of oscillation of the sawtooth wave :param int rate The sampling rate :param float amp The amplitude of the sawtooth wave :param float num The number of samples to generate. :return List[float] The audio samples representing the signal as described above. """ return [utils._sawtooth_sample(amp, freq, rate, i) for i in range(num)]
5,349,295
def ResNet_UNet_Dropout(dim=512, num_classes=6, dropout=0.5, final_activation=True): """ Returns a ResNet50 Nework with a U-Net like upsampling stage. Inlcudes skip connections from previous ResNet50 layers. Uses a SpatialDrop on the final layer as introduced in https://arxiv.org/pdf/1411.4280.pdf, 2015. Input: dim - the size of the input image. Note that is should be a square of 2 so that downsampling and upsampling always match. ie. 128 -> 64 -> 32 -> 64 -> 128 This is only needed for training. num_classes - the number of classes in the whole problem. Used to determine the dimension of output map. i.e. model.predict() returns array that can be reshaped to (dim, dim, num_classes). Output: model - an uncompiled keras model. Check output shape before use. """ from keras.models import Model from keras.layers import Conv2D, SpatialDropout2D from keras.layers import UpSampling2D, Reshape, concatenate from keras.applications.resnet50 import ResNet50 # Import a headless ResNet50 resnet = ResNet50(input_shape = (None, None, 3), include_top=False) # Attached U-net from second last layer - activation_49 res_out = resnet.layers[-2].output # Standard U-Net upsampling 512 -> 256 -> 128 -> 64 # Upsampling 1 - 512 fs = 32 up1 = UpSampling2D(size=(2,2))(res_out) up1_conv = Conv2D(fs, 2, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(up1) prev_layer = resnet.get_layer("activation_40").output merge1 = concatenate([prev_layer,up1_conv], axis = 3) merge1_conv1 = Conv2D(fs, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(merge1) merge1_conv2 = Conv2D(fs, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(merge1_conv1) # Upsampling 2 - 256 fs = 32 up2 = UpSampling2D(size = (2,2))(merge1_conv2) up2_conv = Conv2D(fs, 2, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(up2) prev_layer = resnet.get_layer("activation_22").output merge2 = concatenate([prev_layer,up2_conv], axis = 3) merge2_conv1 = Conv2D(fs, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(merge2) merge2_conv2 = Conv2D(fs, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(merge2_conv1) # Upsampling 3 & 4 - 128 fs = 32 up3 = UpSampling2D(size = (2,2))(merge2_conv2) up3_conv1 = Conv2D(fs, 2, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(up3) up3_conv2 = Conv2D(fs, 2, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(up3_conv1) up4 = UpSampling2D(size = (2,2))(up3_conv2) up4_conv = Conv2D(fs, 2, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(up4) prev_layer = resnet.get_layer("activation_1").output merge3 = concatenate([prev_layer,up4_conv], axis = 3) merge3_conv1 = Conv2D(fs, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(merge3) merge3_conv2 = Conv2D(fs, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(merge3_conv1) # Upsample 5 - 64 fs = 32 up5 = UpSampling2D(size=(2,2))(merge3_conv2) up5_conv = Conv2D(fs, 2, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(up5) merge5_conv1 = Conv2D(fs, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(up5_conv) merge5_conv2 = Conv2D(fs, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(merge5_conv1) # Drop Out do = SpatialDropout2D(dropout)(merge5_conv2) # Activation and reshape for training if final_activation: activation = Conv2D(num_classes, 1, activation="softmax")(do) else: activation = Conv2D(num_classes, 1, activation=None)(do) output = Reshape((dim*dim, num_classes))(activation) # Build model model = Model(inputs=[resnet.input], outputs=[output]) return model
5,349,296
def test_bucket_names(sdc_builder, sdc_executor, gcp, test_name, bucket_name): """ Write data to Google cloud storage with different valid bucket names. The pipeline looks like: google_cloud_storage_origin >> wiretap """ pipeline_builder = sdc_builder.get_pipeline_builder() storage_client = gcp.storage_client google_cloud_storage = pipeline_builder.add_stage('Google Cloud Storage', type='origin') google_cloud_storage.set_attributes(bucket=bucket_name, common_prefix='gcs-test', prefix_pattern='**/*.txt', data_format='TEXT') wiretap = pipeline_builder.add_wiretap() google_cloud_storage >> wiretap.destination pipeline = pipeline_builder.build(title=f'Google Cloud Storage Origin Bucket Names {test_name}').configure_for_environment(gcp) sdc_executor.add_pipeline(pipeline) try: created_bucket = gcp.retry_429(storage_client.create_bucket)(bucket_name) data = [get_random_string(string.ascii_letters, 100) for _ in range(10)] blob = created_bucket.blob('gcs-test/sdc-test.txt') blob.upload_from_string('\n'.join(data)) logger.info('Starting GCS Origin pipeline and wait until the information is read ...') sdc_executor.start_pipeline(pipeline) sdc_executor.wait_for_pipeline_metric(pipeline, 'input_record_count', 10) sdc_executor.stop_pipeline(pipeline) rows_from_wiretap = [record.field['text'] for record in wiretap.output_records] assert len(data) == len(rows_from_wiretap) assert rows_from_wiretap == data finally: logger.info('Deleting bucket %s ...', created_bucket.name) gcp.retry_429(created_bucket.delete)(force=True)
5,349,297
def ldap_suffix(): """Returns ldap search base. return 'dc=xx,dc=com' """ assert False
5,349,298
def _parse_accounts_ce(database, uid, result_path): """Parse accounts_ce.db. Args: database (SQLite3): target SQLite3 database. uid (str): user id. result_path (str): result path. """ cursor = database.cursor() try: cursor.execute(query) except sqlite3.Error as exception: logger.error('Accounts not found! {0!s}'.format(exception)) results = cursor.fetchall() num_of_results = len(results) data = {} header = ('name', 'type', 'password') data['title'] = 'accounts_ce'+f'_{uid}' data['number_of_data_headers'] = len(header) data['number_of_data'] = num_of_results data['data_header'] = header data_list = [] if num_of_results >0: for row in results: data_list.append((row[0], row[1], row[2])) data['data'] = data_list else: logger.warning('NO Accounts found!') return data
5,349,299