content
stringlengths
22
815k
id
int64
0
4.91M
def pomodoro_timer(): """ 25 min timer popup window acting as a callback function to the work timer button """ global popup_1 popup_1 = tk.Toplevel(root) popup_1.title("Work Timer!") popup_1.geometry("370x120") round = 0 try: # Creating a continous loop of text of time on the screen for 25 mins t = 25*60 while t>-1: minute_count = t // 60 second_count = t % 60 timer = '{:02d}:{:02d}'.format(minute_count, second_count) time_display = tk.Label(popup_1, text = timer, bg = 'DodgerBlue4', fg = 'white', font = ('STIX', 90, 'bold')) time_display.place(x=0,y=0) popup_1.update() time.sleep(1) t -= 1 except: pass # Setting up an alarm sound and popup window to let user know when the time is up if t == -1: tk.messagebox.showinfo("Time's up!", "Pomodoro completed successfully!\nYou deserve a break!") popup_1.destroy() global pomo_count pomo_count += 1 pygame.mixer.music.load("./Pomodoro_GUI/beep.wav") pygame.mixer.music.play(loops=0)
5,347,500
def save_video_list_to_hdf5(video_list_path, save_path): """Store the unique videos in the given video list in an HDF5 file. :param video_list_path: Path to a video list :param save_path: The path to the HDF5 file to create """ with open(video_list_path, 'r') as f: lines = [line.strip() for line in f.readlines()] # Remove frame indexes if found lines = [line.split()[0] for line in lines] # Get unique lines unique_video_paths = np.unique(lines) # Check that the basenames are also unique unique_basenames = np.unique([os.path.basename(path) for path in unique_video_paths]) if len(unique_video_paths) != len(unique_basenames): raise RuntimeError('At least one duplicate video name was found') save_videos_to_hdf5(unique_video_paths, save_path)
5,347,501
def getCredibleInterval(df): """ compute 95% credible interval (bayesian approach) Args: dataframe: first column is name(string), second column is score(numeric) Return: dataframe(three columns:name,lower_bound,upper_bound) """ pass
5,347,502
def add_mutes(guild_id: int, role_id: int, user_id: int, author_id: int, datetime_to_parse: str): """ Add a temporary mute to a user. NOTE: datetime_to_parse should be a string like: "1 hour 30 minutes" """ with open("data/unmutes.json", "r+", newline='\n', encoding='utf-8') as temp_file: mutes = json.load(temp_file) new_mute_data = (user_id, role_id, guild_id) str_dt_obj = parse_times(datetime_to_parse) # if the script made it this far, this is real we have to store mute data if str_dt_obj not in mutes: mutes[str_dt_obj] = [] mutes[str_dt_obj].append(new_mute_data) mute_index = len(mutes[str_dt_obj]) - 1 if str(guild_id) not in mutes: mutes[str(guild_id)] = {} if str(user_id) in mutes[str(guild_id)]: mutes[str(guild_id)].pop(str(user_id)) if not str(user_id) in mutes[str(guild_id)]: mutes[str(guild_id)][str(user_id)] = [] mutes[str(guild_id)][str(user_id)] = [str_dt_obj, author_id, mute_index] json.dump(mutes, open("data/unmutes.json", "w+", newline='\n', encoding='utf-8')) return str_dt_obj # Don't worry I can't read this mess either.
5,347,503
def operation_dict(ts_epoch, request_dict): """An operation as a dictionary.""" return { "model": request_dict, "model_type": "Request", "args": [request_dict["id"]], "kwargs": {"extra": "kwargs"}, "target_garden_name": "child", "source_garden_name": "parent", "operation_type": "REQUEST_CREATE", }
5,347,504
def setdlopenflags(n): # real signature unknown; restored from __doc__ """ setdlopenflags(n) -> None Set the flags used by the interpreter for dlopen calls, such as when the interpreter loads extension modules. Among other things, this will enable a lazy resolving of symbols when importing a module, if called as sys.setdlopenflags(0). To share symbols across extension modules, call as sys.setdlopenflags(os.RTLD_GLOBAL). Symbolic names for the flag modules can be found in the os module (RTLD_xxx constants, e.g. os.RTLD_LAZY). """ pass
5,347,505
def unix_timestamp(s=None, p="yyyy-MM-dd HH:mm:ss"): """ :rtype: Column >>> import os, time >>> os.environ['TZ'] = 'Europe/Paris' >>> if hasattr(time, 'tzset'): time.tzset() >>> from pysparkling import Context, Row >>> from pysparkling.sql.session import SparkSession >>> spark = SparkSession(Context()) >>> spark.range(1).select(unix_timestamp(lit("2033-05-18 05:33:21"))).show() +--------------------------------------------------------+ |unix_timestamp(2033-05-18 05:33:21, yyyy-MM-dd HH:mm:ss)| +--------------------------------------------------------+ | 2000000001| +--------------------------------------------------------+ >>> spark.range(1).select(unix_timestamp(lit("2019-01-01"), "yyyy-MM-dd")).show() +--------------------------------------+ |unix_timestamp(2019-01-01, yyyy-MM-dd)| +--------------------------------------+ | 1546297200| +--------------------------------------+ """ if s is None: s = col(CurrentTimestamp()) return col(UnixTimestamp(ensure_column(s), lit(p)))
5,347,506
def memory_index(indices, t): """Location of an item in the underlying memory.""" memlen, itemsize, ndim, shape, strides, offset = t p = offset for i in range(ndim): p += strides[i] * indices[i] return p
5,347,507
def createExpData(f, xVals): """Asssumes f is an exponential function of one argument xVals is an array of suitable arguments for f Returns array containing results of applying f to the elements of xVals""" yVals = [] for i in range(len(xVals)): yVals.append(f(xVals[i])) return pylab.array(xVals), pylab.array(yVals)
5,347,508
def main() -> None: """ Calculate and output the solutions based on the real puzzle input. """ data = aocd.get_data(year=2016, day=25) print(f"Part 1: {find_starting_a_for_clock_output(data)}")
5,347,509
def run_macs2_ATAC(Configuration): """ run macs2 application """ logging.info("running macs2") cleaned_align_output_dir = os.path.join(Configuration.cleaned_alignments_dir, Configuration.file_to_process) filtered_align_file = cleaned_align_output_dir + f"/{Configuration.file_to_process}_align_filtered_macs2.bam" macs2_output_dir = os.path.join(Configuration.macs2_dir, Configuration.file_to_process) os.makedirs(macs2_output_dir, exist_ok = True) clean_dir(macs2_output_dir) subprocess.run(["macs2", "callpeak", "-f", "BAMPE", "-g", "hs", "--keep-dup", "all", "-n", Configuration.file_to_process, "-t", filtered_align_file, "--outdir", macs2_output_dir])
5,347,510
def discrete_one_samp_ks(distribution1: np.array, distribution2: np.array, num_samples: int) -> Tuple[float, bool]: """Uses the one-sample Kolmogorov-Smirnov test to determine if the empirical results in distribution1 come from the distribution represented in distribution2 :param distribution1: empirical distribution (numpy array) :param distribution2: reference distribution (numpy array) :param num_samples: number of samples used to generate distribution1 :return: a tuple (D, D<D_{alpha}) """ cutoff = 1.36 / math.sqrt(num_samples) ecdf1 = np.array([sum(distribution1[:i + 1]) for i in range(len(distribution1))]) ecdf2 = np.array([sum(distribution2[:i + 1]) for i in range(len(distribution2))]) max_diff = np.absolute(ecdf1 - ecdf2).max() return max_diff, max_diff < cutoff
5,347,511
def main(): """ Main """ deamonize("myserver-server") HOST, PORT = "localhost", 9999 LOG.info("** Starting myserver server on https://%s:%d **" % (HOST, PORT)) server = MyServer((HOST, PORT)) # For SSL - uncomment the following line #server.socket = ssl.wrap_socket(server.socket, keyfile='<path_to_keyfile>', certfile='<path_to_cert>', server_side=True) server.serve_forever()
5,347,512
def test_get_live_markets(): """Test get_live_markets.""" assert all(isinstance(market_info, MarketInfo) for market_info in get_live_markets())
5,347,513
def _get_product_refs(pkgs): """Returns a list of product references as declared in the specified packages list. Args: pkgs: A `list` of package declarations (`struct`) as created by `packages.create()`, `packages.pkg_json()` or `spm_pkg()`. Returns: A `list` of product reference (`string`) values. """ return [refs.create(ref_types.product, pkg.name, prd) for pkg in pkgs for prd in pkg.products]
5,347,514
def run_equilb_ensemble_gomc_command(job): """Run the gomc equilb_ensemble simulation.""" for run_equilb_ensemble_i in range( job.doc.equilb_design_ensemble_number, equilb_design_ensemble_max_number ): print("#**********************") print("# Started the run_equilb_ensemble_gomc_command function.") print("#**********************") if ( job.doc.equilb_design_ensemble_number >= equilb_design_ensemble_max_number ): job.doc.equilb_design_ensemble_max_number_under_limit = False elif ( job.doc.stable_equilb_design_ensemble is False and job.doc.equilb_design_ensemble_max_number_under_limit is True ): control_file_name_str = job.doc.equilb_design_ensemble_dict[ str(job.doc.equilb_design_ensemble_number) ]["output_name_control_file_name"] print(f"Running simulation job id {job}") run_command = "{}/{} +p{} {}.conf > out_{}.dat" "".format( str(job.doc.gomc_binary_path), str(job.doc.equilb_design_ensemble_gomc_binary_file), str(ff_info_dict.get(job.sp.forcefield_name).get("ncpu")), str(control_file_name_str), str(control_file_name_str), ) exec_run_command = subprocess.Popen( run_command, shell=True, stderr=subprocess.STDOUT ) os.waitpid(exec_run_command.pid, 0) # os.WSTOPPED) # 0) test_pymbar_stabilized_equilb_design_ensemble(job) if job.doc.stable_equilb_design_ensemble is False: # need to add equilb_design_ensemble_number by 1 so it is fixed to run the correct job # so it is rerun if restarted job.doc.equilb_design_ensemble_number += 1
5,347,515
def merge_intervals(interval_best_predictors): """ Merge intervals with the same best predictor """ predictor2intervals = defaultdict(set) for interval, best_predictor in interval_best_predictors.items(): predictor2intervals[best_predictor].update(interval) merged_intervals = {best_predictor: max(interval_points) - min(interval_points) for best_predictor, interval_points in predictor2intervals.items()} return merged_intervals
5,347,516
def build_pixel_sampler(cfg, **default_args): """Build pixel sampler for segmentation map.""" return build_module_from_cfg(cfg, PIXEL_SAMPLERS, default_args)
5,347,517
def solution_to_schedule(solution, events, slots): """Convert a schedule from solution to schedule form Parameters ---------- solution : list or tuple of tuples of event index and slot index for each scheduled item events : list or tuple of :py:class:`resources.Event` instances slots : list or tuple of :py:class:`resources.Slot` instances Returns ------- list A list of instances of :py:class:`resources.ScheduledItem` """ return [ ScheduledItem( event=events[item[0]], slot=slots[item[1]] ) for item in solution ]
5,347,518
def flip_tiles( tiles ): """ Initially all tiles are white. Every time, a tile is visited based on the directions, it is flipped (to black, or to white again). The directions are represented in (x,y) coordinates starting from reference tile at (0,0). Based on the given directions to each tile starting from the reference tile, the coordinates of the tile is found and added to the set of black tiles. If the tile is already a black tile, it is flipped and thus removed from the set. This function returns the set of black tiles. """ black_tiles = set() for directions_to_tile in tiles: x,y = (0,0) for direction in directions_to_tile: x,y = get_coordinates( x,y, direction ) found_tile = (x,y) if found_tile not in black_tiles: black_tiles.add( found_tile ) else: black_tiles.remove( found_tile ) return black_tiles
5,347,519
def version() -> int: """Return the version number of the libpq currently loaded. The number is in the same format of `~psycopg.ConnectionInfo.server_version`. Certain features might not be available if the libpq library used is too old. """ return impl.PQlibVersion()
5,347,520
def feature_predictors_from_ensemble(features, verbose=False): """generates a dictionary of the form {"offset":offset_predictor, "sigma":sigma_predictor, ...} where the predictors are generated from the center and spread statistics of the feature ensemble. features: list the feature objects """ lparams = np.asarray([f.profile.get_parameters() for f in features]) cent_wvs = np.asarray([f.wv for f in features]) rel_norms = np.asarray([f.relative_continuum for f in features]) delta_wvs = np.asarray([np.mean(scipy.gradient(f.data_sample.wv)) for f in features]) dwv_over_wv = delta_wvs/cent_wvs med_inv_r = np.median(dwv_over_wv) sig_over_wv = lparams[:, 1]/cent_wvs sig_med = np.median(sig_over_wv) sig_mad = np.median(np.abs(sig_over_wv-sig_med)) if verbose: print("sigma median", sig_med, "sigma mad", sig_mad) vel_offs = lparams[:, 0]/cent_wvs vel_med = np.median(vel_offs) vel_mad = np.median(np.abs(vel_offs - vel_med)) if verbose: print("velocity median", vel_med, "velocity mad", vel_mad) gam_med = np.median(np.abs(lparams[:, 2])) gam_mad = np.median(np.abs(lparams[:, 2]-gam_med)) if verbose: print("gamma median", gam_med, "gamma mad", gam_mad) rel_med = np.median(rel_norms) rel_mad = np.median(np.abs(rel_norms-rel_med)) if verbose: print("rel_norm median", gam_med, "rel_norm mad", gam_mad) predictors = {} offset_predictor = WavelengthScaledGaussianPredictor(vel_med, 1.4*vel_mad) sigma_predictor = WavelengthScaledGaussianPredictor(sig_med, 1.4*sig_mad + 0.5*med_inv_r) gamma_predictor = GaussianPredictor(gam_med, 1.4*gam_mad+0.1*np.median(delta_wvs)) rel_norm_predictor = GaussianPredictor(1.0, 0.01) predictors["offset"] = offset_predictor predictors["sigma"] = sigma_predictor predictors["gamma"] = gamma_predictor predictors["rel_norm"] = rel_norm_predictor return predictors
5,347,521
def remoteLoggingConfig(host, args, session): """ Called by the logging function. Configures remote logging (rsyslog). @param host: string, the hostname or IP address of the bmc @param args: contains additional arguments used by the logging sub command @param session: the active session to use @param args.json: boolean, if this flag is set to true, the output will be provided in json format for programmatic consumption """ url="https://"+host+"/xyz/openbmc_project/logging/config/remote" try: res = session.put(url + '/attr/Port', headers=jsonHeader, json = {"data": args.port}, verify=False, timeout=baseTimeout) res = session.put(url + '/attr/Address', headers=jsonHeader, json = {"data": args.address}, verify=False, timeout=baseTimeout) except(requests.exceptions.Timeout): return(connectionErrHandler(args.json, "Timeout", None)) return res.text
5,347,522
def _merge_3d_t1w(filename: Union[str, PathLike]) -> pathlib.Path: """ Merges T1w images that have been split into two volumes Parameters ---------- filename : str or pathlib.Path Path to T1w image that needs to be merged Returns ------- filename : pathlib.Path Path to merged T1w image """ import numpy as np filename = pathlib.Path(filename).resolve() img = nib.load(str(filename)) if not (len(img.shape) == 4 and img.shape[-1] > 1): return # split data along fourth dimension and then concatenate along third imdata = img.get_data() cat = [d.squeeze() for d in np.split(imdata, imdata.shape[-1], axis=-1)] imdata = np.concatenate(cat, axis=-1) new_img = img.__class__(imdata, img.affine, img.header) nib.save(new_img, filename) return filename
5,347,523
def write_model(outfile, best): """ write top individual out to model file """ functions = [str(b) for b in best] penalized_igs = [b.fitness.values[0] for b in best] lengths = [b.fitness.values[1] for b in best] model_df = pd.DataFrame(np.array([penalized_igs, lengths, functions]).T) model_df.columns = ["penalized_ig", "length", "model"] model_df.to_csv(outfile, sep = "\t", header = True, index = False)
5,347,524
def get_partial_results(case_name, list_of_variables): """ Get a dictionary with the variable names and the time series for `list_of_variables` """ reader = get_results(case_name) d = dict() read_time = True for v in list_of_variables: if read_time: d['time'] = reader.values(v)[0] read_time = False d[v] = reader.values(v)[1] return d
5,347,525
def take_with_time(self, duration, scheduler=None): """Takes elements for the specified duration from the start of the observable source sequence, using the specified scheduler to run timers. Example: res = source.take_with_time(5000, [optional scheduler]) Description: This operator accumulates a queue with a length enough to store elements received during the initial duration window. As more elements are received, elements older than the specified duration are taken from the queue and produced on the result sequence. This causes elements to be delayed with duration. Keyword arguments: duration -- {Number} Duration for taking elements from the start of the sequence. scheduler -- {Scheduler} Scheduler to run the timer on. If not specified, defaults to rx.Scheduler.timeout. Returns {Observable} An observable sequence with the elements taken during the specified duration from the start of the source sequence. """ source = self scheduler = scheduler or timeout_scheduler def subscribe(observer): def action(scheduler, state): observer.on_completed() disposable = scheduler.schedule_relative(duration, action) return CompositeDisposable(disposable, source.subscribe(observer)) return AnonymousObservable(subscribe)
5,347,526
def add_label(hdf5_filename, key, peak, label): """ Function that adds a label to a peak dataset in the hdf5 file.It has to be iterated over every single peak. Parameters: hdf5_filename (string): filename of experimental file key (string): key within `hdf5_filename` of experimental file peak (string): string name of 'Peak_0#" associated with the peak list containing tuples of the x_data (wavenumber) and y_data (counts) values of the peaks. label (string): string name of an individual label from internal function unknown_peak_assignment that is used in lineidplot. Returns: df (DataFrame): DataFrame which contains the peak fitted data and peak descriptors of each classified peak based on the fed-in known spectra. """ #Handling errors in inputs. if not isinstance(hdf5_filename, str): raise TypeError("""Passed value of `hdf5_filename` is not a string! Instead, it is: """ + str(type(hdf5_filename))) if not hdf5_filename.split('/')[-1].split('.')[-1] == 'hdf5': raise TypeError("""`hdf5_filename` is not type = .hdf5! Instead, it is: """ + hdf5_filename.split('/')[-1].split('.')[-1]) if not isinstance(key, str): raise TypeError("""Passed value of `key` is not a str! Instead, it is: """ + str(type(key))) if not isinstance(peak, str): raise TypeError("""Passed value of `peak` is not a string! Instead, it is: """ + str(type(peak))) if not isinstance(label, str): raise TypeError("""Passed value of `label` is not a string! Instead, it is: """ + str(type(label))) # open hdf5 file as read/write hdf5 = h5py.File(hdf5_filename, 'r+') # extract existing data from peak dataset peak_data = list(hdf5['{}/{}'.format(key, peak)][0])[:7] # print(peak_data) # make a new tuple that contains the orginal data as well as the label label_tuple = (label,) data = tuple(peak_data) +label_tuple # delete the old dataset so the new one can be saved del hdf5['{}/{}'.format(key, peak)] # define a custom datatype that allows for a string as the the last tuple element my_datatype = np.dtype([('fraction', np.float), ('center', np.float), ('sigma', np.float), ('amplitude', np.float), ('fwhm', np.float), ('height', np.float), ('area under the curve', np.float), ('label', h5py.special_dtype(vlen=str))]) # recreate the old dataset in the hdf5 file dataset = hdf5.create_dataset('{}/{}'.format(key, peak), (1,), dtype=my_datatype) # apply custom dtype to data tuple # print(dataset) # print(data) # print(my_datatype) data_array = np.array(data, dtype=my_datatype) # write new values to the blank dataset dataset[...] = data_array # print(dataset) hdf5.close() df = pd.DataFrame(data = data) return df
5,347,527
def test_aws_binary_which(host): """ Tests the output to confirm aws's binary location. """ assert host.check_output('which aws') == PACKAGE_BINARY
5,347,528
def _infer_color_variable_kind(color_variable, data): """Determine whether color_variable is array, pandas dataframe, callable, or scikit-learn (fit-)transformer.""" if hasattr(color_variable, "dtype") or hasattr(color_variable, "dtypes"): if len(color_variable) != len(data): raise ValueError( "color_variable and data must have the same length.") color_variable_kind = "scalars" elif hasattr(color_variable, "transform"): color_variable_kind = "transformer" elif hasattr(color_variable, "fit_transform"): color_variable_kind = "fit_transformer" elif callable(color_variable): color_variable_kind = "callable" elif color_variable is None: color_variable_kind = "none" else: # Assume color_variable is a selection of columns color_variable_kind = "else" return color_variable_kind
5,347,529
def ParseChromeosImage(chromeos_image): """Parse the chromeos_image string for the image and version. The chromeos_image string will probably be in one of two formats: 1: <path-to-chroot>/src/build/images/<board>/<ChromeOS-version>.<datetime>/ \ chromiumos_test_image.bin 2: <path-to-chroot>/chroot/tmp/<buildbot-build>/<ChromeOS-version>/ \ chromiumos_test_image.bin We parse these strings to find the 'chromeos_version' to store in the json archive (without the .datatime bit in the first case); and also the 'chromeos_image', which would be all of the first case, but only the part after '/chroot/tmp' in the second case. Args: chromeos_image: string containing the path to the chromeos_image that crosperf used for the test. Returns: version, image: The results of parsing the input string, as explained above. """ # Find the Chromeos Version, e.g. R45-2345.0.0..... # chromeos_image should have been something like: # <path>/<board-trybot-release>/<chromeos-version>/chromiumos_test_image.bin" if chromeos_image.endswith('/chromiumos_test_image.bin'): full_version = chromeos_image.split('/')[-2] # Strip the date and time off of local builds (which have the format # "R43-2345.0.0.date-and-time"). version, _ = os.path.splitext(full_version) else: version = '' # Find the chromeos image. If it's somewhere in .../chroot/tmp/..., then # it's an official image that got downloaded, so chop off the download path # to make the official image name more clear. official_image_path = '/chroot/tmp' if official_image_path in chromeos_image: image = chromeos_image.split(official_image_path, 1)[1] else: image = chromeos_image return version, image
5,347,530
def save_model(_clf, save_folder, filename, logger): """ Dumps a given classifier to the specific folder with the given name """ _path = os.path.join(save_folder, filename) logger.debug("save model to " + _path) with open(_path, 'wb') as handle: pickle.dump(_clf, handle, protocol=pickle.HIGHEST_PROTOCOL)
5,347,531
def update_alias(AliasName=None, TargetKeyId=None): """ Associates an existing AWS KMS alias with a different customer master key (CMK). Each alias is associated with only one CMK at a time, although a CMK can have multiple aliases. The alias and the CMK must be in the same AWS account and region. You cannot perform this operation on an alias in a different AWS account. The current and new CMK must be the same type (both symmetric or both asymmetric), and they must have the same key usage (ENCRYPT_DECRYPT or SIGN_VERIFY ). This restriction prevents errors in code that uses aliases. If you must assign an alias to a different type of CMK, use DeleteAlias to delete the old alias and CreateAlias to create a new alias. You cannot use UpdateAlias to change an alias name. To change an alias name, use DeleteAlias to delete the old alias and CreateAlias to create a new alias. Because an alias is not a property of a CMK, you can create, update, and delete the aliases of a CMK without affecting the CMK. Also, aliases do not appear in the response from the DescribeKey operation. To get the aliases of all CMKs in the account, use the ListAliases operation. The CMK that you use for this operation must be in a compatible key state. For details, see How Key State Affects Use of a Customer Master Key in the AWS Key Management Service Developer Guide . See also: AWS API Documentation Exceptions Examples The following example updates the specified alias to refer to the specified customer master key (CMK). Expected Output: :example: response = client.update_alias( AliasName='string', TargetKeyId='string' ) :type AliasName: string :param AliasName: [REQUIRED]\nIdentifies the alias that is changing its CMK. This value must begin with alias/ followed by the alias name, such as alias/ExampleAlias . You cannot use UpdateAlias to change the alias name.\n :type TargetKeyId: string :param TargetKeyId: [REQUIRED]\nIdentifies the CMK to associate with the alias. When the update operation completes, the alias will point to this CMK.\nThe CMK must be in the same AWS account and Region as the alias. Also, the new target CMK must be the same type as the current target CMK (both symmetric or both asymmetric) and they must have the same key usage.\nSpecify the key ID or the Amazon Resource Name (ARN) of the CMK.\nFor example:\n\nKey ID: 1234abcd-12ab-34cd-56ef-1234567890ab\nKey ARN: arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab\n\nTo get the key ID and key ARN for a CMK, use ListKeys or DescribeKey .\nTo verify that the alias is mapped to the correct CMK, use ListAliases .\n :return: response = client.update_alias( # The alias to update. AliasName='alias/ExampleAlias', # The identifier of the CMK that the alias will refer to after this operation succeeds. You can use the key ID or the Amazon Resource Name (ARN) of the CMK. TargetKeyId='1234abcd-12ab-34cd-56ef-1234567890ab', ) print(response) :returns: KMS.Client.exceptions.DependencyTimeoutException KMS.Client.exceptions.NotFoundException KMS.Client.exceptions.KMSInternalException KMS.Client.exceptions.KMSInvalidStateException """ pass
5,347,532
def Dense(name, out_dim, W_init=stax.glorot(), b_init=stax.randn()): """Layer constructor function for a dense (fully-connected) layer.""" def init_fun(rng, example_input): input_shape = example_input.shape k1, k2 = random.split(rng) W, b = W_init(k1, (out_dim, input_shape[-1])), b_init(k2, (out_dim,)) return W, b def apply_fun(params, inputs): W, b = params return np.dot(W, inputs) + b return core.Layer(name, init_fun, apply_fun).bind
5,347,533
def datetime_to_epoch(date_time: datetime) -> int: """Convert a datetime object to an epoch integer (seconds).""" return int(date_time.timestamp())
5,347,534
def parse_arguments(): """ Merge the scar.conf parameters, the cmd parameters and the yaml file parameters in a single dictionary. The precedence of parameters is CMD >> YAML >> SCAR.CONF That is, the CMD parameter will override any other configuration, and the YAML parameters will override the SCAR.CONF settings """ config_args = ConfigFileParser().get_properties() func_call, cmd_args = CommandParser().parse_arguments() if 'conf_file' in cmd_args['scar'] and cmd_args['scar']['conf_file']: yaml_args = FileUtils.load_yaml(cmd_args['scar']['conf_file']) # YAML >> SCAR.CONF merged_args = fdl.merge_conf(config_args, yaml_args) merged_args = fdl.merge_cmd_yaml(cmd_args, merged_args) else: # CMD >> SCAR.CONF merged_args = fdl.merge_conf(config_args, cmd_args) #self.cloud_provider.parse_arguments(merged_args) FileUtils.create_tmp_config_file(merged_args) return func_call
5,347,535
def write_pcd_results(network_manager, folder, year, pop_scenario, throughput_scenario, intervention_strategy, cost_by_pcd, lad_areas): """ Write postcode sector results to .csv file. """ suffix = _get_suffix(pop_scenario, throughput_scenario, intervention_strategy) if not os.path.exists(folder): os.mkdir(folder) metrics_filename = os.path.join(folder, 'pcd_metrics_{}.csv'.format(suffix)) if year == BASE_YEAR: metrics_file = open(metrics_filename, 'w', newline='') metrics_writer = csv.writer(metrics_file) metrics_writer.writerow( ('year', 'area_id', 'lad_id','cost', 'demand', 'demand_density', 'user_demand','site_density_macrocells','site_density_small_cells', 'capacity','capacity_deficit', 'population', 'area', 'pop_density', 'clutter_env')) else: metrics_file = open(metrics_filename, 'a', newline='') metrics_writer = csv.writer(metrics_file) for pcd in network_manager.postcode_sectors.values(): if pcd.lad_id in lad_areas: demand = pcd.demand demand_density = pcd.demand_density site_density_macrocells = pcd.site_density_macrocells site_density_small_cells = pcd.site_density_small_cells capacity = pcd.capacity capacity_deficit = capacity - demand population = pcd.population area = pcd.area pop_d = pcd.population_density cost = cost_by_pcd[pcd.id] user_demand = pcd.user_demand clutter_env = pcd.clutter_environment metrics_writer.writerow( (year, pcd.id, pcd.lad_id, cost, demand, demand_density, user_demand, site_density_macrocells, site_density_small_cells, capacity, capacity_deficit, population, area, pop_d, clutter_env) ) metrics_file.close()
5,347,536
def check_signature(stream: BinaryIO) -> str: """ Check signature of the model file and return characters used by the model. The characters returned are sorted in lexicographical order. """ uzmodel_tag = stream.read(8) if uzmodel_tag != b'UZMODEL ': raise IOError('invalid uzmodel_tag') uzmodel_version = read_int(stream) if uzmodel_version == 1: ssv = 0 elif uzmodel_version == 2: ssv = read_int(stream) else: raise IOError('invalid uzmodel_version') if ssv == 0: chars = ''.join(map(chr, chain(STD, AFT, EXA, EXB, SPC))) elif ssv == 1: chars = ''.join(map(chr, chain(STD, AFT, EXA, EXB))) else: raise ValueError('invalid ssv') bmarkov_tag = stream.read(8) if bmarkov_tag != b'BMARKOV ': raise IOError('invalid bmarkov_tag') bmarkov_version = read_int(stream) if bmarkov_version != 0: raise IOError('invalid bmarkov_version') return chars
5,347,537
def Validate(expected_schema, datum): """Determines if a python datum is an instance of a schema. Args: expected_schema: Schema to validate against. datum: Datum to validate. Returns: True if the datum is an instance of the schema. """ schema_type = expected_schema.type if schema_type == 'null': return datum is None elif schema_type == 'boolean': return isinstance(datum, bool) elif schema_type == 'string': return isinstance(datum, str) elif schema_type == 'bytes': return isinstance(datum, bytes) elif schema_type == 'int': return (isinstance(datum, int) and (INT_MIN_VALUE <= datum <= INT_MAX_VALUE)) elif schema_type == 'long': return (isinstance(datum, int) and (LONG_MIN_VALUE <= datum <= LONG_MAX_VALUE)) elif schema_type in ['float', 'double']: return (isinstance(datum, int) or isinstance(datum, float)) elif schema_type == 'fixed': return isinstance(datum, bytes) and (len(datum) == expected_schema.size) elif schema_type == 'enum': return datum in expected_schema.symbols elif schema_type == 'array': return (isinstance(datum, list) and all(Validate(expected_schema.items, item) for item in datum)) elif schema_type == 'map': return (isinstance(datum, dict) and all(isinstance(key, str) for key in datum.keys()) and all(Validate(expected_schema.values, value) for value in datum.values())) elif schema_type in ['union', 'error_union']: return any(Validate(union_branch, datum) for union_branch in expected_schema.schemas) elif schema_type in ['record', 'error', 'request']: return (isinstance(datum, dict) and all(Validate(field.type, datum.get(field.name)) for field in expected_schema.fields)) else: raise AvroTypeException('Unknown Avro schema type: %r' % schema_type)
5,347,538
def get_main_play_action(action: PlayerAction) -> PlayerAction: """ Gets the main play, e.g., FLYOUT or SINGLE :param action: :return: """ print("Searching for main play") # find out if the string contains any of the allowed actions for i in PlayerActionEnum: if i.value in action.action_text: print(f"\tFound {i.value}!") action.action_type = i action.action_text = action.action_text.replace(i.value, '') break return action
5,347,539
def inet_aton(s): """Convert a dotted-quad to an int.""" try: addr = list(map(int, s.split('.'))) addr = reduce(lambda a,b: a+b, [addr[i] << (3-i)*8 for i in range(4)]) except (ValueError, IndexError): raise ValueError('illegal IP: {0}'.format(s)) return addr
5,347,540
def welcome_and_instruct(): """ Prints welcome and instructions upon program start """ for x in range(5): print('\r\n') figlet = pyfiglet.figlet_format('Tweet Annotation Tool', font='slant') print(figlet) print('by Kris Bolton') print('v0.2.0-alpha') print('\r\n') print('INFORMATION') print('A new CSV file will be created with three columns: tweet ID, tweet and sentiment annotation.') print('Sentiment annotations are: negative (1), neutral (2) or positive (3).') print('Input the corresponding number to annotate the sentiment and press enter.') print('Entries are appended to the new CSV file.') print('\r\n')
5,347,541
def local_principals(context, principals): """ The idea behind this is to process __ac_local_roles__ (and a boolean __ac_local_roles_block__ to disable) and add local principals. This only works if you're in correct context, though, which does not seem to be the case. """ local_principals = set() block = False for location in lineage(context): if block: break block = getattr(location, '__ac_local_roles_block__', False) local_roles = getattr(location, '__ac_local_roles__', None) if local_roles and callable(local_roles): local_roles = local_roles() if not local_roles: continue for principal in principals: try: roles = local_roles[principal] except KeyError: pass else: if not is_nonstr_iter(roles): roles = [roles] local_principals.update(roles) if not local_principals: return principals local_principals.update(principals) if DEBUG_PERMISSIONS: PRINT("local_principals") PRINT(" context.collection=", context.collection) PRINT(" context.__acl__()=", context.__acl__()) PRINT(" context.collection.__ac_local_roles_()=", context.__ac_local_roles__()) PRINT("local_principals returning", local_principals) return local_principals
5,347,542
def dfs_paths(graph, start, end=None): """Find all paths in digraph, return a generator of tuples Input: adjacency list in a dict: { node1: set([node1, node2, node3]), node2: set([node2, node3]), node3: set(), } """ if not graph or start not in graph: return yield # Empty generator stack = [(start, [start])] while stack: node, path = stack.pop() if not graph.get(node, None): yield tuple(path) continue if end and node == end: yield tuple(path) continue for next in graph[node] - set(path): if not next: yield tuple(path) + (next,) else: stack.append((next, path + [next]))
5,347,543
def exportSchemaAsXSD(schema, versionNumber, filePath): """ Exports the given schema as an XSD document. Parameters ---------- schema : Schema The schema to export. versionNumber : string The version number of the schema. filePath : string The path to which to save the XSD file. Returns ------- None """ xsdExporter = XSDExporter() xsdExporter.exportSchema(schema, versionNumber, filePath)
5,347,544
def helper(): """I'm useful helper""" data = { "31 Dec 2019": "Wuhan Municipal Health Commission, China, reported a cluster of cases of pneumonia in Wuhan, Hubei Province. A novel coronavirus was eventually identified.", "1 January 2020": "WHO had set up the IMST (Incident Management Support Team) across the three levels of the organization: headquarters, regional headquarters and country level, putting the organization on an emergency footing for dealing with the outbreak.", "4 January 2020": "WHO reported on social media that there was a cluster of pneumonia cases – with no deaths – in Wuhan, Hubei province." } return data
5,347,545
def canonicalize_specification(expr, syn_ctx, theory): """Performs a bunch of operations: 1. Checks that the expr is "well-bound" to the syn_ctx object. 2. Checks that the specification has the single-invocation property. 3. Gathers the set of synth functions (should be only one). 4. Gathers the variables used in the specification. 5. Converts the specification to CNF (as part of the single-invocation test) 6. Given that the spec is single invocation, rewrites the CNF spec (preserving and sat) by introducing new variables that correspond to a uniform way of invoking the (single) synth function Returns a tuple containing: 1. A list of 'variable_info' objects corresponding to the variables used in the spec 2. A list of synth functions (should be a singleton list) 3. A list of clauses corresponding to the CNF specification 4. A list of NEGATED clauses 5. A list containing the set of formal parameters that all appearances of the synth functions are invoked with. """ check_expr_binding_to_context(expr, syn_ctx) clauses, cnf_expr = to_cnf(expr, theory, syn_ctx) synth_function_set = gather_synth_functions(expr) synth_function_list = list(synth_function_set) num_funs = len(synth_function_list) orig_variable_set = gather_variables(expr) orig_variable_list = [x.variable_info for x in orig_variable_set] orig_variable_list.sort(key=lambda x: x.variable_name) # check single invocation/separability properties if (not check_single_invocation_property(clauses, syn_ctx)): raise basetypes.ArgumentError('Spec:\n%s\nis not single-invocation!' % exprs.expression_to_string(expr)) (intro_clauses, intro_vars) = _intro_new_universal_vars(clauses, syn_ctx, synth_function_list[0]) # ensure that the intro_vars at the head of the list # Arjun: Why? Most likely not necessary variable_list = [x.variable_info for x in intro_vars] + orig_variable_list num_vars = len(variable_list) for i in range(num_vars): variable_list[i].variable_eval_offset = i num_funs = len(synth_function_list) for i in range(num_funs): synth_function_list[i].synth_function_id = i if len(intro_clauses) == 1: canon_spec = intro_clauses[0] else: canon_spec = syn_ctx.make_function_expr('and', *intro_clauses) canon_clauses = [] for ic in intro_clauses: if exprs.is_application_of(ic, 'or'): disjuncts = ic.children else: disjuncts = [ic] canon_clauses.append(disjuncts) return (variable_list, synth_function_list, canon_spec, canon_clauses, intro_vars)
5,347,546
def human_time(seconds, granularity=2): """Returns a human readable time string like "1 day, 2 hours".""" result = [] for name, count in _INTERVALS: value = seconds // count if value: seconds -= value * count if value == 1: name = name.rstrip("s") result.append("{} {}".format(int(value), name)) else: # Add a blank if we're in the middle of other values if len(result) > 0: result.append(None) if not result: if seconds < 1.0: return "%.2f seconds" % seconds else: if seconds == 1: return "1 second" else: return "%d seconds" % seconds return ", ".join([x for x in result[:granularity] if x is not None])
5,347,547
def main(): """Create the model and start the evaluation process.""" args = get_arguments() if args.data_path is not None: args.img_path = args.data_path + args.img_path # Prepare image. og = tf.image.decode_jpeg(tf.read_file(args.img_path), channels=3) # Compress and reconstruct if requested. if args.level > 0: compressor = get_model_for_level(args.level, latent=False) og = tf.cast(og, dtype=tf.uint8) og = tf.expand_dims(og, dim=0) og = compressor(og)[0] og = tf.squeeze(og) og.set_shape((None, None, 3)) # Convert RGB to BGR. img_r, img_g, img_b = tf.split(axis=2, num_or_size_splits=3, value=og) img = tf.cast(tf.concat(axis=2, values=[img_b, img_g, img_r]), dtype=tf.float32) # Extract mean. img -= IMG_MEAN # Create network. net = DeepLabResNetModel({'data': tf.expand_dims(img, dim=0)}, is_training=False, num_classes=args.num_classes) # Which variables to load. restore_var = tf.global_variables() # Predictions. raw_output = net.layers['fc1_voc12'] raw_output_up = tf.image.resize_bilinear(raw_output, tf.shape(img)[0:2,]) raw_output_up = tf.argmax(raw_output_up, dimension=3) pred = tf.expand_dims(raw_output_up, dim=3) # Set up TF session and initialize variables. if args.no_gpu: config = tf.ConfigProto(device_count = {'GPU': 0}) else: config = tf.ConfigProto() config.gpu_options.allow_growth = True sess = tf.Session(config=config) init = tf.global_variables_initializer() sess.run(init) # Load weights. loader = tf.train.Saver(var_list=restore_var) load(loader, sess, args.model_weights) # Perform inference. preds = sess.run(pred) msk = decode_labels(preds, num_classes=args.num_classes) im = Image.fromarray(msk[0]) if not os.path.exists(args.save_dir): os.makedirs(args.save_dir) output_file = args.save_dir + Path(args.img_path).stem if args.level > 0: output_file += f'_mask_anchor2_{args.level}.png' else: output_file += '_mask_anchor1.png' im.save(output_file) if args.save_original: if args.level > 0: if og.dtype.is_floating: og = tf.round(og) if og.dtype != tf.uint8: og = tf.saturate_cast(og, tf.uint8) og = sess.run(og) og = Image.fromarray(og) og_file = args.save_dir + Path(args.img_path).stem if args.level > 0: og_file += f'_{args.level}.png' else: og_file += '.jpg' og.save(og_file) print(f'The output file has been saved to {args.save_dir}')
5,347,548
def _galaxy_loc_iter(loc_file, galaxy_dt, need_remap=False): """Iterator returning genome build and references from Galaxy *.loc file. """ if "column" in galaxy_dt: dbkey_i = galaxy_dt["column"].index("dbkey") path_i = galaxy_dt["column"].index("path") else: dbkey_i = None if os.path.exists(loc_file): with open(loc_file) as in_handle: for line in in_handle: if line.strip() and not line.startswith("#"): parts = [x.strip() for x in line.strip().split("\t")] # Detect and report spaces instead of tabs if len(parts) == 1: parts = [x.strip() for x in line.strip().split(" ") if x.strip()] if len(parts) > 1: raise IOError("Galaxy location file uses spaces instead of " "tabs to separate fields: %s" % loc_file) if dbkey_i is not None and not need_remap: dbkey = parts[dbkey_i] cur_ref = parts[path_i] else: if parts[0] == "index": parts = parts[1:] dbkey = parts[0] cur_ref = parts[-1] yield (dbkey, cur_ref)
5,347,549
def cmorlet_wavelet(x, fs, freq_vct, n=6, normalization=True): """Perform the continuous wavelet (CWT) tranform using the complex Morlet wavelet. Parameters ---------- x : 1D array with shape (n_samples) or 2D array with shape (n_samples, n_channels) fs : Sampling frequency in Hz freq_vct : 1D array with frequencies to compute the CWT (Default = [1 : 1 : fs/2] ) n : Number of cicles inside the Gaussian curve (Default 6) normalization : Scale each wavelet to have energy equal to 1 (Default True) Returns ------- wcoef : Complex wavelet coefficients 2D array with shape [n_samples, n_freqs] if `x` is 1D array 3D array with shape [n_samples, n_freqs, n_channels] if `x` is 2D array wfam : 2D array with shape [n_wavelet_samples, n_freqs] where each column corresponds to the a member of the wavelet family """ # input 'x' as 2D matrix [samples, columns] try: x.shape[1] except IndexError: x = x[:, np.newaxis] # number of samples and number of channels n_samples, n_channels = x.shape # number of wavelets n_freqs = len(freq_vct) # number of samples for Wavetet family # This is equal to the number of samples needed to represent 2*n cycles # of a sine with frequency = fres(1)[Hz], sampled at fs [Hz]. # This is done to ensure that every wavelet in the wavalet family will be # close to 0 in the negative and positive edges n_samples_wav = np.round( (2*n/freq_vct[0])*fs ) # The wavelet will be symmetrical around 0 if np.mod(n_samples_wav,2) == 0: # even samples n_samples_wav = n_samples_wav + 1 # create time vector for Wavelet family half = np.floor(n_samples_wav/2) time = np.arange(-half, half+1)/fs # initialize Wavelet family matrix wfam = np.zeros([len(time), n_freqs], dtype=complex) # for each frequency defined in FREQ, create its respective Wavelet for iwav in range(n_freqs): s = n/(2*np.pi*freq_vct[iwav]) gaussian_win = np.exp((-time**2)/(2*s**2)) sinwave = np.exp(2*np.pi*1j*freq_vct[iwav]*time) if normalization: # each wavelet has unit energy sum(abs(wavelet).^2)) = 1 A = 1. / ((s**2) * np.pi) ** (1./4) else: A = 1. # Complex Morlet wavelet wfam[:, iwav] = A * sinwave * gaussian_win wcoef = np.zeros((n_samples, n_freqs, n_channels), dtype=complex) if n_channels == 1: # one channel tmp = conv_m(x, wfam, 'same') wcoef[:, :, 0] = tmp else: # convolution between signal X and the each Wavelt in the Wavelet family for i_channel in range(n_channels): x_tmp = x[:, i_channel] tmp = conv_m(x_tmp, wfam, 'same') wcoef[:, :, i_channel] = tmp return wcoef, wfam
5,347,550
def update(key=None, value=None, cache_type=None, file_path=None): """Set the cache that depends on the file access time :param key: the key for the cache :param value: the value in the cache :param cache_type: when we are using cache in different modules this param can protects from the overradings :param file_path: path to the file :return: True - if cache was setted successful False - cache wasn't setted successful """ global __cache_store __was_set = False try: with __lock: if cache_type not in __cache_store: __cache_store[cache_type] ={} if key not in __cache_store[cache_type]: __cache_store[cache_type][key] = {} if file_path not in __cache_store[cache_type][key]: __cache_store[cache_type][key][file_path] = { "access_time": None, "value": None } if os.path.exists(file_path): statbuf = os.stat(file_path) __cache_store[cache_type][key][file_path]['access_time'] = statbuf.st_mtime __cache_store[cache_type][key][file_path]['value'] = value __was_set = True except TypeError: # if key has unhashable type pass except Exception as error: raise RuntimeError(" Can't set key: %s type: %s because %s " % (key, cache_type, error)) return __was_set
5,347,551
def add_fake_planet( stack: np.ndarray, parang: np.ndarray, psf_template: np.ndarray, polar_position: Tuple[Quantity, Quantity], magnitude: float, extra_scaling: float, dit_stack: float, dit_psf_template: float, return_planet_positions: bool = False, interpolation: str = 'bilinear', ) -> Union[np.ndarray, Tuple[np.ndarray, List[Tuple[float, float]]]]: """ Add a fake planet to the given ``stack`` which, when derotating and merging the stack, will show up at the given ``position``. This function can also be used to *remove* planets from a stack by setting the ``psf_scaling`` to a negative number. If you simply want to use this function to generate a fake signal stack, set ``stack`` to all zeros, the ``magnitude`` to zero, both the ``dit_stack`` and ``dit_psf_template`` to 1 (or any other non-zero number), and use the `extra_scaling` factor to linearly control the "brightness" of the injected planet. This function is essentially a simplified port of the corresponding PynPoint function :py:func:`pynpoint.util.analysis.fake_planet()`. Args: stack: A 3D numpy array of shape `(n_frames, width, height)` which contains the stack of images / frames into which we want to inject a fake planet. parang: A 1D numpy array of shape `(n_frames,)` that contains the respective parallactic angle for every frame in `stack`. psf_template: A 2D numpy array that contains the (centered) PSF template which will be used for the fake planet. This should *not* be normalized to `(0, 1]` if we want to work with actual astrophysical magnitudes for the contrast. polar_position: A tuple `(separation, angle)` which specifies the position at which the planet will show up after de-rotating with ``parang``. ``separation`` needs to be a ``Quantity`` that can be converted to pixel; `angle` needs to be a ``Quantity`` that can be converted to radian. Additionally, ``angle`` should be using *astronomical* polar coordinates, that is, 0 degrees will be "up" (= North), not "right". This function will internally add 90° to the angles to convert them to mathematical pilar coordinates. magnitude: The magnitude difference used to scale the PSF. Note: This is the contrast ratio in *magnitudes*, meaning that increasing this value by a factor of 5 will result in a planet that is 100 times brighter. In case you want to keep things linear, set this value to 0 and only use the ``psf_scaling`` parameter. extra_scaling: An additional scaling factor that is used for the PSF template. This number is simply multiplied with the PSF template, meaning that it changes the brightness linearly, not on a logarithmic scale. For example, you could use `-1` to add a *negative* planet to remove an actual planet in the data. This can also be used to incorporate an additional dimming factor due to a neutral density (ND) filter. dit_stack: The detector integration time of the frames in the ``stack`` (in seconds). Necessary to compute the correct scaling factor for the planet that we inject. dit_psf_template: The detector integration time of the ``psf_template`` (in seconds). Necessary to compute the correct scaling factor for the planet that we inject. return_planet_positions: Whether to return the (Cartesian) positions at which the fake planet was injected, as a 2D numpy array of shape `(n_frames, 2)`. interpolation: ``interpolation`` argument that is passed to :py:func:`scipy.ndimage.shift` that is used internally. Returns: A 3D numpy array of shape `(n_frames, width, height)` which contains the original ``stack`` into which a fake planet has been injected, as well as a list of tuples `(x, y)` that, for each frame, contain the position at which the fake planet has been added. If desired (i.e., if ``return_planet_positions`` is ``True``), the function also returns a 2D numpy array of shape `(n_frames, 2)` containing the Cartesian positions at which the fake planet has been injected. """ # Make sure that the stack and the parallactic angles are compatible check_consistent_size(stack, parang) # Define shortcut for the number of frames and the frame_size n_frames, frame_size = stack.shape[0], (stack.shape[1], stack.shape[2]) # Split the target planet position into separation and angles, convert # the quantities to pixels / convert to mathematical polar coordinates rho = polar_position[0].to('pixel').value phi = np.radians(polar_position[1].to('degree').value + 90 - parang) # Convert `magnitude` from logarithmic contrast to linear flux ratio flux_ratio = 10.0 ** (-magnitude / 2.5) # Compute scaling factor that is due to the different integration times # for the science images and the PSF template dit_scaling = dit_stack / dit_psf_template # Combine all scaling factors and scale the PSF template scaling_factor = flux_ratio * dit_scaling * extra_scaling psf_scaled = scaling_factor * np.copy(psf_template) # Make sure that the PSF has a compatible shape, that is, either crop or # pad the PSF template to the same spatial shape as the `stack`. psf_scaled = crop_or_pad(psf_scaled, frame_size) # Compute the shift for each frame x_shift = rho * np.cos(phi) y_shift = rho * np.sin(phi) # Initialize the "pure signal" stack (can use empty() here, because all # values will be overwritten and allocation should be slightly faster) signal_stack = np.empty_like(stack) # For each frame, move the scaled PSF template to the correct position # Note: We use mode='constant' instead of 'reflect' here (unlike PynPoint) # because the latter just does not seem to make a lot of sense? for i in range(n_frames): signal_stack[i] = shift_image( image=psf_scaled, offset=(float(x_shift[i]), float(y_shift[i])), interpolation=interpolation, mode='constant', ) # Add the planet stack to the original input stack output_stack = stack + signal_stack # Either return only the output stack, or the output stack and # the planet positions if return_planet_positions: center = get_center(frame_size) planet_positions = np.column_stack( (x_shift + center[0], y_shift + center[1]) ) return output_stack, planet_positions return np.array(output_stack)
5,347,552
def base_subs_test(): """Base substitution.""" seq = 'ACGT' sub_pts = [0, 1, 2, 3] t_mat = [[0.0, 0.3, 0.3, 0.3], [0.3, 0.0, 0.3, 0.3], [0.3, 0.3, 0.0, 0.3], [0.3, 0.3, 0.3, 0.0]] rng = MockRng([0.5, 0.5, 0.5, 0.5]) # -> G G C C base_subbed = mitty.lib.util.base_subs(seq, sub_pts, t_mat, rng) assert ['G', 'G', 'C', 'C'] == base_subbed, base_subbed
5,347,553
def update_cluster_cli(args: Namespace): """Updates the cluster configuration of the databricks instance defined in the current profile :param Namespace args: The arguments from the cli :return: """ # Set the name cluster_name = args.name.lower() # Get the base profile profile, base_config = extract_profile(args) # Get the workspace groups groups = get_groups(profile) # Get the existing cluster clusters = extract_clusters(profile) # Get the clusters matching the desired name matching_clusters = [ cluster for cluster in clusters if cluster['name'].lower() == cluster_name ] # Create the cluster configuration cluster_config = create_config(cluster_name, profile) # Create the cluster if not matching_clusters: cluster_id = create_cluster(profile, cluster_config) cluster_status = 'PENDIING' # Terminate the newly started cluster if not args.r: terminate_cluster(cluster_id, cluster_name, profile) cluster_status = 'TERMINATED' # Add the new cluster to the configuration matching_clusters.append( { 'name': cluster_name, 'cluster_id': cluster_id, 'status': cluster_status } ) # Set access groups access_groups = { f'cluster-{cluster_name}-manage': 'CAN_MANAGE', f'cluster-{cluster_name}-restart': 'CAN_RESTART', f'cluster-{cluster_name}-attach': 'CAN_ATTACH_TO', } # Filter and create the missing groups missing_groups = [group for group in access_groups if group not in groups] if missing_groups: create_groups(missing_groups, profile) # Update the clusters for cluster in matching_clusters: set_acls(access_groups, cluster['cluster_id'], base_config) # Update the cluster configuration if cluster['status'] == 'TERMINATED' and args.e: cluster_config['cluster_id'] = cluster['cluster_id'] edit_cluster(profile, cluster_config) return
5,347,554
def migrate_all(src, dst, replace=True, nprocs=1): """Migrates entire dataset from source host to destination host using multiprocessing""" srchost, srcport, _ = parse_uri(src) srcr = redis.StrictRedis(host=srchost, port=srcport, charset='utf8') keyspace = srcr.info('keyspace') freeze_support() # for Windows support pool = Pool(processes=min(len(keyspace.keys()), nprocs)) pool.starmap(migrate, [(src, dst, int(db[2:]), replace, i) for i, db in enumerate(keyspace.keys())]) print('\n' * max(0, len(keyspace.keys())-1))
5,347,555
def make_cache(channel, subdir): """Reads and/or generates the cachefile and returns the cache""" # load cache channel_name = _get_channel_name(channel) cachefile = f"{channel_name}.{subdir}.cache.json" if os.path.exists(cachefile): print(f"Loading cache from {cachefile}") with open(cachefile) as f: cache = json.load(f) else: cache = {} # load repodata pkgs = _get_repodata_packages(channel, subdir) # add packages to cache needed = set(pkgs.keys()) - set(cache.keys()) for i, artifact in enumerate(tqdm.tqdm(needed)): _add_artifact_to_cache(cache, pkgs[artifact], channel, subdir, artifact) if i % 100 == 99: # save the state occasionally _save_cache(cache, cachefile, display=False) _save_cache(cache, cachefile) return cache
5,347,556
def _BD_from_Av_for_dereddening(line_lambdas, line_fluxes, A_v): """ Find the de-reddened Balmer decrement (BD) that would arise from "removing" an extinction of A_v (magnitudes) from the line_fluxes. line_lambdas, line_fluxes: As in the function "deredden". A_v: The extinction (magnitudes), as a scalar or array of extinction values. Returns the Balmer decrement dereddened_BD (F_Halpha / F_Hbeta), as a float or array of floats with the same shape as A_v. """ assert np.all(np.asarray(A_v) >= 0) initial_BD = _find_BD(line_lambdas, line_fluxes) # Calculate the Balmer decrement (BD) that would result from "removing" an # extinction of A_v, using an inverted form of Equation A14 in Vogt13. dereddened_BD = initial_BD / 10**(A_v / 8.55) return dereddened_BD
5,347,557
def otherEnd(contours, top, limit): """ top与end太近了,找另一个顶部的点,与top距离最远 """ tt = (0, 9999) for li in contours: for pp in li: p = pp[0] if limit(p[0]) and top[1] - p[1] < 15 and abs(top[0] - p[0]) > 50 and p[1] < tt[1]: tt = p return tt
5,347,558
def plivo_webhook(event, context): """ Receives SMS messages and forwards them to telegram """ CHAT_ID = int(os.environ['CHAT_ID']) bot = configure_telegram() logger.info('Plivo Event: {}'.format(event)) try: body = parse_plivo_msg(event) except AssertionError as e: logger.info(e) return ERROR_RESPONSE sender = body['From'] msg = body['Text'] text = "{}: {}".format(sender, msg) bot.send_message(chat_id=CHAT_ID, text=text) logger.info('Message sent') return OK_RESPONSE
5,347,559
def convert(input_format, output_format, input_path, output_path): """ Run: qemu-img -f <input-format> -O <output-format> <input-path> <output-path> """ print(f"qemu-img -f {input_format} -O {output_format} {input_path} {output_path}") if input_format not in FORMATS or output_format not in FORMATS: error("ERROR - Invalid format provided. Valid formats: {FORMATS}", exit=True) if not Path(input_path).is_file(): error(f"ERROR - File not found: {input_path}", exit=True) qemu_img.convert(input_format, output_format, input_path, output_path)
5,347,560
def test_award_update_contract_txn_with_list(): """Test optional parameter to update specific awards from txn contract.""" awards = mommy.make('awards.Award', _quantity=5) txn = mommy.make('awards.TransactionNormalized', award=awards[0]) mommy.make( 'awards.TransactionFPDS', transaction=txn, base_and_all_options_value=1000, base_exercised_options_val=100 ) # single award is updated count = update_contract_awards((awards[0].id,)) awards[0].refresh_from_db() assert count == 1 assert awards[0].base_and_all_options_value == 1000 # update multipe awards txn1 = mommy.make('awards.TransactionNormalized', award=awards[1]) mommy.make( 'awards.TransactionFPDS', transaction=txn1, base_and_all_options_value=4000, base_exercised_options_val=400 ) txn2 = mommy.make('awards.TransactionNormalized', award=awards[2]) mommy.make( 'awards.TransactionFPDS', transaction=txn2, base_and_all_options_value=5000, base_exercised_options_val=500 ) # multiple awards updated count = update_contract_awards((awards[1].id, awards[2].id)) awards[1].refresh_from_db() awards[2].refresh_from_db() assert count == 2 assert awards[1].base_and_all_options_value == 4000 assert awards[1].base_exercised_options_val == 400 assert awards[2].base_and_all_options_value == 5000 assert awards[2].base_exercised_options_val == 500
5,347,561
async def reset(dut, time=20): """ Reset the design """ dut.reset = 1 await Timer(time, units="ns") await RisingEdge(dut.clk) dut.reset = 0 await RisingEdge(dut.clk)
5,347,562
def castep_phonon_prerelax(computer, calc_doc, seed): """ Run a singleshot geometry optimisation before an SCF-style calculation. This is typically used to ensure phonon calculations start successfully. The phonon calculation will then be restarted from the .check file produced here. Parameters: computer (:obj:`ComputeTask`): the object that will be calling CASTEP. calc_doc (dict): the structure to run on. seed (str): root filename of structure. """ from matador.workflows.castep.common import castep_prerelax LOG.info('Performing CASTEP phonon pre-relax...') required = ["write_checkpoint"] forbidden = ['phonon_fine_kpoint_list', 'phonon_fine_kpoint_path', 'phonon_fine_kpoint_mp_spacing', 'phonon_fine_kpoint_path_spacing'] return castep_prerelax( computer, calc_doc, seed, required_keys=required, forbidden_keys=forbidden )
5,347,563
def get_external_links(soup): """Retrieve the different links from a `Lyric Wiki` page. The links returned can be found in the `External Links` page section, and usually references to other platforms (like Last.fm, Amazon, iTunes etc.). Args: soup (bs4.element.Tag): connection to the `Lyric Wiki` page. Returns: dict Examples:: >>> # Import packages >>> import bs4 # for web scrapping >>> import urllib.request # to connect >>> # Set Up: connect to a lyric wiki page >>> USER = 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.9.0.7) Gecko/2009021910 Firefox/3.0.7' >>> HEADERS = {'User-Agent': USER} >>> URL = 'https://lyrics.fandom.com/wiki/London_Grammar:Who_Am_I' >>> req = urllib.request.Request(URL, headers=HEADERS) >>> page = urllib.request.urlopen(req) >>> soup = bs4.BeautifulSoup(page, 'lxml') >>> # Retrieve links from the page >>> get_external_links(soup) {'Amazon': ['https://www.amazon.com/exec/obidos/redirect?link_code=ur2&tag=wikia-20&camp=1789&creative=9325&path=https%3A%2F%2Fwww.amazon.com%2Fdp%2FB00J0QJ84E'], 'Last.fm': ['https://www.last.fm/music/London+Grammar', 'https://www.last.fm/music/London+Grammar/If+You+Wait'], 'iTunes': ['https://itunes.apple.com/us/album/695805771'], 'AllMusic': ['https://www.allmusic.com/album/mw0002559862'], 'Discogs': ['http://www.discogs.com/master/595953'], 'MusicBrainz': ['https://musicbrainz.org/release-group/dbf36a9a-df02-41c4-8fa9-5afe599960b0'], 'Spotify': ['https://open.spotify.com/album/0YTj3vyjZmlfp16S2XGo50']} """ # Only add links from this set. Other are not relevant. links_keys = ['Amazon', 'Last.fm', 'iTunes', 'AllMusic', 'Discogs', 'MusicBrainz', 'Spotify', 'Bandcamp', 'Wikipedia', 'Pandora', 'Hype Machine'] links = {} # Scrape links from a page for external_tag in scrape_external_links(soup): # Get the respective kink / href for link_a in external_tag.findAll('a', attrs={'class', 'external text'}): # Add it to a dict key = external_tag.text.split(':')[0].strip() if key in links_keys: links.setdefault(key, []) links[key].append(link_a.get('href')) return links
5,347,564
def gcc(): """Return the current container, that is the widget holding the figure and all the control widgets, buttons etc.""" gcf() # make sure we have something.. return current.container
5,347,565
def axis_val_hud(*args): """ Toggle the presence of a heads-up-display (HUD) for viewing axes in Maya. :param args: :return: """ robots = get_robot_roots(1) if not robots: print 'No Robots in Scene' if pm.headsUpDisplay('a1_hud', exists=True): for i in range(6): pm.headsUpDisplay('a{}_hud'.format(i + 1), remove=True) return # Check if the HUD exists already if pm.headsUpDisplay('a1_hud', exists=True): # If so, remove it for i in range(6): pm.headsUpDisplay('a{}_hud'.format(i + 1), remove=True) # Turn Limit Meter off for robot in robots: limit_meter_ctrl_path = format_path('{0}|{1}robot_GRP|{1}limitMeter_CTRL', robot) limit_meter_grp_path = format_path('{0}|{1}robot_GRP|{1}limitMeter_GRP', robot) pm.setAttr(limit_meter_ctrl_path + '.v', 0) pm.setAttr(limit_meter_grp_path + '.v', 0) return else: # If not, create it for i in range(6): pm.headsUpDisplay('a{}_hud'.format(i + 1), section=5, block=7 - i, blockSize='small', label='A{}'.format(i + 1), labelFontSize='large', dataWidth=30, command=pm.Callback(get_axis_val, i + 1), # event='timeChanged') attachToRefresh=True) # Turn Limit Meter on for robot in robots: limit_meter_ctrl_path = format_path('{0}|{1}robot_GRP|{1}limitMeter_CTRL', robot) limit_meter_grp_path = format_path('{0}|{1}robot_GRP|{1}limitMeter_GRP', robot) pm.setAttr(limit_meter_ctrl_path + '.v', 1) pm.setAttr(limit_meter_grp_path + '.v', 1)
5,347,566
def preprocessing(string): """helper function to remove punctuation froms string""" string = string.replace(',', ' ').replace('.', ' ') string = string.replace('(', '').replace(')', '') words = string.split(' ') return words
5,347,567
def firetime(tstart, Tfire): """ Highly accurate sub-millisecond absolute timing based on GPSDO 1PPS and camera fire feedback. Right now we have some piecemeal methods to do this, and it's time to make it industrial strength code. """ raise NotImplementedError("Yes this is a priority, would you like to volunteer?")
5,347,568
def add_input_arguments(argument_parser_object): """Adds input args for this script to `argparse.ArgumentParser` object. :param argument_parser_object: `argparse.ArgumentParser` object, which may or may not already contain input args. :return: argument_parser_object: Same as input object, but with new input args added. """ argument_parser_object.add_argument( '--' + TRACKING_DIR_INPUT_ARG, type=str, required=False, default=DEFAULT_TRACKING_DIR_NAME, help=TRACKING_DIR_HELP_STRING) argument_parser_object.add_argument( '--' + TRACKING_SCALE_INPUT_ARG, type=int, required=False, default=echo_top_tracking.DUMMY_TRACKING_SCALE_METRES2, help=TRACKING_SCALE_HELP_STRING) argument_parser_object.add_argument( '--' + GRIDRAD_DIR_INPUT_ARG, type=str, required=False, default=DEFAULT_GRIDRAD_DIR_NAME, help=GRIDRAD_DIR_HELP_STRING) argument_parser_object.add_argument( '--' + OUTPUT_DIR_INPUT_ARG, type=str, required=False, default=DEFAULT_OUTPUT_DIR_NAME, help=OUTPUT_DIR_HELP_STRING) return argument_parser_object
5,347,569
def fullUnitSphere(res): """Generates a unit sphere in the same way as :func:`unitSphere`, but returns all vertices, instead of the unique vertices and an index array. :arg res: Resolution - the number of angles to sample. :returns: A ``numpy.float32`` array of size ``(4 * (res - 1)**2, 3)`` containing the ``(x, y, z)`` vertices which can be used to draw a unit sphere (using the ``GL_QUADS`` primitive type). """ u = np.linspace(-np.pi / 2, np.pi / 2, res, dtype=np.float32) v = np.linspace(-np.pi, np.pi, res, dtype=np.float32) cosu = np.cos(u) cosv = np.cos(v) sinu = np.sin(u) sinv = np.sin(v) vertices = np.zeros(((res - 1) * (res - 1) * 4, 3), dtype=np.float32) cucv = np.outer(cosu[:-1], cosv[:-1]).flatten() cusv = np.outer(cosu[:-1], sinv[:-1]).flatten() cu1cv = np.outer(cosu[1:], cosv[:-1]).flatten() cu1sv = np.outer(cosu[1:], sinv[:-1]).flatten() cu1cv1 = np.outer(cosu[1:], cosv[1:]) .flatten() cu1sv1 = np.outer(cosu[1:], sinv[1:]) .flatten() cucv1 = np.outer(cosu[:-1], cosv[1:]) .flatten() cusv1 = np.outer(cosu[:-1], sinv[1:]) .flatten() su = np.repeat(sinu[:-1], res - 1) s1u = np.repeat(sinu[1:], res - 1) vertices.T[:, ::4] = [cucv, cusv, su] vertices.T[:, 1::4] = [cu1cv, cu1sv, s1u] vertices.T[:, 2::4] = [cu1cv1, cu1sv1, s1u] vertices.T[:, 3::4] = [cucv1, cusv1, su] return vertices
5,347,570
def prompt_for_word_removal(words_to_ignore=None): """ Prompts the user for words that should be ignored in kewword extraction. Parameters ---------- words_to_ignore : str or list Words that should not be included in the output. Returns ------- ignore words, words_added : list, bool A new list of words to ignore and a boolean indicating if words have been added. """ if isinstance(words_to_ignore, str): words_to_ignore = [words_to_ignore] words_to_ignore = [w.replace("'", "") for w in words_to_ignore] words_added = False # whether to run the models again more_words = True while more_words: more_words = input("\nShould words be removed [y/n]? ") if more_words == "y": new_words_to_ignore = input("Type or copy word(s) to be removed: ") # Remove commas if the user has used them to separate words, # as well as apostraphes. new_words_to_ignore = [ char for char in new_words_to_ignore if char not in [",", "'"] ] new_words_to_ignore = "".join(new_words_to_ignore) if " " in new_words_to_ignore: new_words_to_ignore = new_words_to_ignore.split(" ") elif isinstance(new_words_to_ignore, str): new_words_to_ignore = [new_words_to_ignore] words_to_ignore += new_words_to_ignore words_added = True # we need to run the models again more_words = False elif more_words == "n": more_words = False else: print("Invalid input") return words_to_ignore, words_added
5,347,571
def wait_for_image_property(identifier, property, cmp_func, wait=20, maxtries=10): """Wait for an image to have a given property. Raises TimeoutError on failure. :param identifier: the image identifier :param property: the name of the property :param cmp_func: predicate function accepting current value of the property :param wait: time (in seconds) between polls :param maxtries: maximum number of attempts :returns: True """ logger.info('Waiting for {identifier} to be {property} using {cmp_func}' .format(**locals())) for _ in xrange(maxtries): output = image_show(identifier) current = openstack_parse_show(output, 'status') if cmp_func(current): return True else: time.sleep(wait) msg = 'Timeout while waiting for image {identifier} {property} using {fn}'\ .format(identifier=identifier, property=property, fn=cmp_func) logger.info(msg) raise TimeoutError(msg)
5,347,572
def make_sample_ensemble_seg_plot(model2, model3, sample_filenames, test_samples_fig, flag='binary'): """ "make_sample_ensemble_seg_plot(model2, model3, sample_filenames, test_samples_fig, flag='binary')" This function uses two trained models to estimate the label image from each input image It then uses a KL score to determine which one to return and returns both images and labels as a list, as well as a list of which model's output is returned INPUTS: * model: trained and compiled keras model * sample_filenames: [list] of strings * test_samples_fig [string]: filename to print figure to * flag [string]: either 'binary' or 'multiclass' OPTIONAL INPUTS: None GLOBAL INPUTS: None OUTPUTS: * imgs: [list] of images * lbls: [list] of label images * model_num: [list] of integers indicating which model's output was retuned based on CRF KL divergence """ plt.figure(figsize=(16,16)) imgs = [] lbls = [] model_num = [] for counter,f in enumerate(sample_filenames): image = seg_file2tensor(f)/255 est_label1 = model2.predict(tf.expand_dims(image, 0) , batch_size=1).squeeze() if flag is 'binary': est_label1[est_label1>0.5] = 1 est_label1 = (est_label1*255).astype(np.uint8) else: est_label1 = tf.argmax(est_label1, axis=-1) est_label2 = model3.predict(tf.expand_dims(image, 0) , batch_size=1).squeeze() if flag is 'binary': est_label2[est_label2>0.5] = 1 est_label2 = (est_label2*255).astype(np.uint8) else: est_label2 = tf.argmax(est_label2, axis=-1) label = est_label1.numpy().astype('int') img = (image.numpy()*255).astype(np.uint8) est_labelA, kl1 = crf_refine(label, img ) label = est_label2.numpy().astype('int') est_labelB, kl2 = crf_refine(label, img ) del label # plt.subplot(221); plt.imshow(image); plt.imshow(est_label1, alpha=0.5, cmap=plt.cm.bwr, vmin=0, vmax=3); plt.axis('off'); plt.title('Model 1 estimate', fontsize=6) # plt.subplot(222); plt.imshow(image); plt.imshow(est_label2, alpha=0.5, cmap=plt.cm.bwr, vmin=0, vmax=3); plt.axis('off'); plt.title('Model 2 estimate', fontsize=6) # plt.subplot(223); plt.imshow(image); plt.imshow(est_labelA, alpha=0.5, cmap=plt.cm.bwr, vmin=0, vmax=3); plt.axis('off'); plt.title('Model 1 CRF estimate ('+str(-np.log(-kl1))[:7]+')', fontsize=6) # plt.subplot(224); plt.imshow(image); plt.imshow(est_labelB, alpha=0.5, cmap=plt.cm.bwr, vmin=0, vmax=3); plt.axis('off'); plt.title('Model 2 CRF estimate ('+str(-np.log(-kl2))[:7]+')', fontsize=6) # plt.savefig('crf-example'+str(counter)+'.png', dpi=600, bbox_inches='tight'); plt.close('all') # if kl1<kl2: est_label = est_labelA.copy() model_num.append(1) else: est_label = est_labelB.copy() model_num.append(2) if flag is 'binary': plt.subplot(6,4,counter+1) else: plt.subplot(4,4,counter+1) name = sample_filenames[counter].split(os.sep)[-1].split('_')[0] plt.title(name, fontsize=10) plt.imshow(image) if flag is 'binary': plt.imshow(est_label, alpha=0.5, cmap=plt.cm.gray, vmin=0, vmax=1) else: plt.imshow(est_label, alpha=0.5, cmap=plt.cm.bwr, vmin=0, vmax=3) plt.axis('off') imgs.append(image) lbls.append(est_label) # plt.show() plt.savefig(test_samples_fig, dpi=200, bbox_inches='tight') plt.close('all') return imgs, lbls, model_num
5,347,573
def mock_requests_get_json_twice(mocker: MockerFixture) -> MagicMock: """Mock two pages of results returned from the parliament open data API.""" mock: MagicMock = mocker.patch("requests.get") mock.return_value.__enter__.return_value.json.side_effect = [ { "columnNames": ["column1", "column2"], "rowData": [["Lorem ipsum", "dolor sit amet"]], "hasMore": True, }, { "columnNames": ["column1", "column2"], "rowData": [["eripuit principes intellegam", "eos id"]], "hasMore": False, }, ] return mock
5,347,574
def _get_rating_accuracy_stats(population, ratings): """ Calculate how accurate our ratings were. :param population: :param ratings: :return: """ num_overestimates = 0 num_underestimates = 0 num_correct = 0 for employee, rating in zip(population, ratings): if rating < employee: num_underestimates += 1 elif rating > employee: num_overestimates += 1 else: num_correct += 1 return num_underestimates, num_correct, num_overestimates
5,347,575
def genb58seed(entropy=None): """ Generate a random Family Seed for Ripple. (Private Key) entropy = String of any random data. Please ensure high entropy. ## Note: ecdsa library's randrange() uses os.urandom() to get its entropy. ## This should be secure enough... but just in case, I added the ability ## to include your own entropy in addition. """ if entropy == None: entropy = int2data(ecdsa.util.randrange(2 ** 128), 16) else: entropy = hashlib.sha256(entropy + int2data(ecdsa.util.randrange(2 ** 128), 16)).digest()[:16] b58seed = data_to_address(entropy, 33) return b58seed
5,347,576
def convert_nhwc_to_nchw(data: np.array) -> np.array: """Convert data to NCHW.""" return np.transpose(data, [0, 3, 1, 2])
5,347,577
def get_mfcc_features(wave_data: pd.Series, n_mfcc): """ mfcc_feature """ x = wave_data.apply(lambda d: (d-np.mean(d))/(np.std(d))) # x = wave_data x, max_length = utils.padding_to_max(x) features = [] for i in range(x.shape[0]): t1 = mfcc(x[i], sr=16000, n_mfcc=n_mfcc) t2 = utils.diff(t1, axis=0) t3 = utils.diff(t1, axis=0, delta=2) t = np.concatenate([t1.T, t2.T, t3.T], axis=1).flatten() features.append(t) return np.array(features)
5,347,578
def project_screen_group(): """ Mengelola screen pada spesifik project. """
5,347,579
def _upload_to_s3(md5: str, local_file_path: str, metadata: Dict[str, str]) -> None: """Upload the binary contents to S3 along with the given object metadata. Args: md5: CarbonBlack MD5 key (used as the S3 object key). local_file_path: Path to the file to upload. metadata: Binary metadata to attach to the S3 object. Returns: The newly added S3 object key (based on CarbonBlack's MD5). """ s3_object_key = 'carbonblack/{}'.format(md5) LOGGER.info('Uploading to S3 with key %s', s3_object_key) with open(local_file_path, 'rb') as target_file: S3_BUCKET.put_object(Body=target_file, Key=s3_object_key, Metadata=metadata)
5,347,580
def run( tag, env, parallel, runner, is_async, node_names, to_nodes, from_nodes, from_inputs, load_version, pipeline, config, params, ): """Run the pipeline.""" if parallel and runner: raise KedroCliError( "Both --parallel and --runner options cannot be used together. " "Please use either --parallel or --runner." ) runner = runner or "SequentialRunner" if parallel: runner = "ParallelRunner" runner_class = load_obj(runner, "kedro.runner") tag = _get_values_as_tuple(tag) if tag else tag node_names = _get_values_as_tuple(node_names) if node_names else node_names context = load_context(Path.cwd(), env=env, extra_params=params) context.run( tags=tag, runner=runner_class(is_async=is_async), node_names=node_names, from_nodes=from_nodes, to_nodes=to_nodes, from_inputs=from_inputs, load_versions=load_version, pipeline_name=pipeline, )
5,347,581
def test_repr_diagnostic_task(diagnostic_task): """Test printing a diagnostic task.""" diagnostic_task.name = 'diag_1/script_1' result = str(diagnostic_task) print(result) reference = textwrap.dedent(""" DiagnosticTask: diag_1/script_1 script: /some/where/esmvaltool/diag_scripts/test.py settings: {'profile_diagnostic': False, 'run_dir': '/output/run'} ancestors: None """) assert result.strip() == reference.strip()
5,347,582
def download_instance_func(instance_id): """Download a DICOM Instance as DCM""" file_bytes = client.orthanc.download_instance_dicom(instance_id) return flask.send_file(BytesIO(file_bytes), mimetype='application/dicom', as_attachment=True, attachment_filename=f'{instance_id}.dcm')
5,347,583
def test_cloud_vendor_azure_fetch_error(): """ Test failure fetching Azure data with HTTP """ tempdir = tempfile.mkdtemp() prefixes = Prefixes(cache_directory=tempdir) vendor = prefixes.get_vendor('azure') with patch('netlookup.network_sets.azure.AZURE_SERVICES_URL', INVALID_URL): with pytest.raises(NetworkError): vendor.fetch()
5,347,584
def generate_random_initial_params(n_qubits, n_layers=1, topology='all', min_val=0., max_val=1., n_par=0, seed=None): """Generate random parameters for the QCBM circuit (iontrap ansatz). Args: n_qubits (int): number of qubits in the circuit. n_layers (int): number of entangling layers in the circuit. If n_layers=-1, you can specify a custom number of parameters (see below). topology (str): describes topology of qubits connectivity. min_val (float): minimum parameter value. max_val (float): maximum parameter value. n_par (int): specifies number of parameters to be generated in case of incomplete layers (i.e. n_layers=-1). seed (int): initialize random generator Returns: numpy.array: the generated parameters, stored in a 1D array. """ gen = np.random.RandomState(seed) assert(topology == 'all') n_params_layer_zero = 2*n_qubits n_params_per_layer = int((n_qubits*(n_qubits-1))/2) if n_layers==-1: n_params=n_par else: assert(n_layers>0) if n_par!=0: raise ValueError("If n_layers is specified, n_par is automatically computed.") n_params = n_params_layer_zero+n_layers*n_params_per_layer params = gen.uniform(min_val, max_val, n_params) return(params)
5,347,585
def locate_alien(): """Locate an alien and add it to the DB Locating an alien in this implementation is as simple as naming it using a random uuid """ # Generate a name for the alien alien_name = base64.urlsafe_b64encode(uuid.uuid4().bytes) # Get DB session session = db.get_session() # Add to DB a = models.Alien() a.name = alien_name a.created = datetime.utcnow() session.add(a) session.commit()
5,347,586
def number_of_friends(user): """How many friends does this user have?""" user_id = user["id"] friend_ids = friendships[user_id] return len(friend_ids)
5,347,587
def get_serializer_class(format=None): """Convenience function returns serializer or raises SerializerNotFound.""" if not format: serializer = BaseSerializer() elif format == 'json-ld': serializer = JsonLDSerializer() elif format == 'json': serializer = JsonSerializer() else: raise SerializerNotFound(format) return serializer
5,347,588
def get_or_create_mpc_section( mp_controls: "MpConfigControls", section: str, subkey: Optional[str] = None # type: ignore ) -> Any: """ Return (and create if it doesn't exist) a settings section. Parameters ---------- mp_controls : MpConfigControls The MP Config database. section : str The section name (top level settings item) subkey : Optional[str], optional Optional subkey to create, by default None Returns ------- Any The settings at that section[subkey] location. """ curr_section = mp_controls.get_value(section) if curr_section is None: mp_controls.set_value(section, {}) curr_section = mp_controls.get_value(section) if subkey and subkey not in curr_section: mp_controls.set_value(f"{section}.{subkey}", {}) return mp_controls.get_value(f"{section}.{subkey}") return mp_controls.get_value(section)
5,347,589
def get_sql_table_headers(csv_dict_reader: csv.DictReader) -> str: """ This takes in a csv dictionary reader type, and returns a list of the headings needed to make a table """ column_names = [] for row in csv_dict_reader: for column in row: column_names.append('{} {} '.format(column, get_sql_type(row[column]))) return column_names
5,347,590
def greater_than(val1, val2): """Perform inequality check on two unsigned 32-bit numbers (val1 > val2)""" myStr = flip_string(val1) + flip_string(val2) call(MATH_32BIT_GREATER_THAN,myStr) return ord(myStr[0]) == 1
5,347,591
def upsampling_2x_blocks(n_speakers, speaker_dim, target_channels, dropout): """Return a list of Layers that upsamples the input by 2 times in time dimension. Args: n_speakers (int): number of speakers of the Conv1DGLU layers used. speaker_dim (int): speaker embedding size of the Conv1DGLU layers used. target_channels (int): channels of the input and the output.(the list of layers does not change the number of channels.) dropout (float): dropout probability. Returns: List[Layer]: upsampling layers. """ upsampling_convolutions = [ Conv1DTranspose( target_channels, target_channels, 2, stride=2, param_attr=I.Normal(scale=np.sqrt(1. / (2 * target_channels)))), Conv1DGLU( n_speakers, speaker_dim, target_channels, target_channels, 3, dilation=1, std_mul=1., dropout=dropout), Conv1DGLU( n_speakers, speaker_dim, target_channels, target_channels, 3, dilation=3, std_mul=4., dropout=dropout) ] return upsampling_convolutions
5,347,592
def actor_path(data, actor_id_1, goal_test_function): """ Creates the shortest possible path from the given actor ID to any actor that satisfies the goal test function. Returns a a list containing actor IDs. If no actors satisfy the goal condition, returns None. """ agenda = {actor_id_1,} seen = {actor_id_1,} relations = {} map_of_actors = mapped_actors(data) while agenda: # Get the children of the parent next_agenda = set() for i in agenda: for j in map_of_actors[i]: if j not in seen and j not in agenda: next_agenda.add(j) # Map child to parent relations[j] = i # If actor satisfies function condition, return constructed path for id_ in agenda: if goal_test_function(id_): final_path = construct_path(relations, id_, actor_id_1) return final_path for next_ in agenda: if next_ not in seen: seen.add(next_) # Update agenda to next bacon number/layer agenda = next_agenda # No path exists return None
5,347,593
def write_data_v1(people, filename): """Writes in-memory data objects about Peers or MPs to an external file.""" csv_file = open(filename, 'wb') writer = csv.writer(csv_file) headings = ['Member_Id', 'Dods_Id', 'Pims_Id', 'DisplayAs', 'ListAs', 'FullTitle', 'LayingMinisterName', 'DateOfBirth', 'DateOfDeath', 'Gender', 'Party', 'House', 'MemberFrom', 'HouseStartDate', 'HouseEndDate', 'CurrentStatus_Id', 'CurrentStatus_IsActive', 'CurrentStatus_Name', 'CurrentStatus_Reason', 'CurrentStatus_StartDate'] for i in range(1, 5): headings.append('Address' + str(i) + '_Type_Id') headings.append('Address' + str(i) + '_Type') headings.append('Address' + str(i) + '_IsPreferred') headings.append('Address' + str(i) + '_IsPhysical') headings.append('Address' + str(i) + '_Note') headings.append('Address' + str(i) + '_Address1') headings.append('Address' + str(i) + '_Address2') headings.append('Address' + str(i) + '_Address3') headings.append('Address' + str(i) + '_Address4') headings.append('Address' + str(i) + '_Address5') headings.append('Address' + str(i) + '_Postcode') headings.append('Address' + str(i) + '_Phone') headings.append('Address' + str(i) + '_Fax') headings.append('Address' + str(i) + '_Email') writer.writerow(headings) for person in people: row = [ person.member_id, person.dobs_id, person.pims_id, person.display_as, person.list_as, person.full_title, person.laying_minister_name, person.date_of_birth, person.date_of_death, person.gender, person.party, person.house, person.member_from, person.house_start_date, person.house_end_date, person.current_status_id, person.current_status_is_active, person.current_status_name, person.current_status_reason, person.current_status_start_date, ] for i in range(0, 4): if len(person.addresses) > i: row.append(person.addresses[i].type_id) row.append(person.addresses[i].type) row.append(person.addresses[i].is_preferred) row.append(person.addresses[i].is_physical) row.append(person.addresses[i].note) row.append(person.addresses[i].address_1) row.append(person.addresses[i].address_2) row.append(person.addresses[i].address_3) row.append(person.addresses[i].address_4) row.append(person.addresses[i].address_5) row.append(person.addresses[i].postcode) row.append(person.addresses[i].phone) row.append(person.addresses[i].fax) row.append(person.addresses[i].email) else: for x in range(1, 14): row.append(None) writer.writerow([(unicode(s).encode("utf-8") if s is not None else '') for s in row]) csv_file.close()
5,347,594
def create_script_run(snapshot_root_directory: Optional[Path] = None, entry_script: Optional[PathOrString] = None, script_params: Optional[List[str]] = None) -> ScriptRunConfig: """ Creates an AzureML ScriptRunConfig object, that holds the information about the snapshot, the entry script, and its arguments. :param entry_script: The script that should be run in AzureML. :param snapshot_root_directory: The directory that contains all code that should be packaged and sent to AzureML. All Python code that the script uses must be copied over. :param script_params: A list of parameter to pass on to the script as it runs in AzureML. If empty (or None, the default) these will be copied over from sys.argv, omitting the --azureml flag. :return: """ if snapshot_root_directory is None: print("No snapshot root directory given. All files in the current working directory will be copied to AzureML.") snapshot_root_directory = Path.cwd() else: print(f"All files in this folder will be copied to AzureML: {snapshot_root_directory}") if entry_script is None: entry_script = Path(sys.argv[0]) print("No entry script given. The current main Python file will be executed in AzureML.") elif isinstance(entry_script, str): entry_script = Path(entry_script) if entry_script.is_absolute(): try: # The entry script always needs to use Linux path separators, even when submitting from Windows entry_script_relative = entry_script.relative_to(snapshot_root_directory).as_posix() except ValueError: raise ValueError("The entry script must be inside of the snapshot root directory. " f"Snapshot root: {snapshot_root_directory}, entry script: {entry_script}") else: entry_script_relative = str(entry_script) script_params = _get_script_params(script_params) print(f"This command will be run in AzureML: {entry_script_relative} {' '.join(script_params)}") return ScriptRunConfig( source_directory=str(snapshot_root_directory), script=entry_script_relative, arguments=script_params)
5,347,595
def test_super_tensor_property(): """ Tensor: Super_tensor correctly tensors on underlying spaces. """ U1 = rand_unitary(3) U2 = rand_unitary(5) U = tensor(U1, U2) S_tens = to_super(U) S_supertens = super_tensor(to_super(U1), to_super(U2)) assert_(S_tens == S_supertens) assert_equal(S_supertens.superrep, 'super')
5,347,596
def test_merge_anime_info(): """Test_merge_anime_info.""" expected_data = { 'ageRating': 'R', 'amazon': 'https://www.amazon.com/gp/video/detail/B06VW8K7ZJ/', 'averageRating': '82.69', 'canonicalTitle': 'Cowboy Bebop', 'categories': [ 'science-fiction', 'space', 'drama', 'action', 'space-travel', 'post-apocalypse', 'other-planet', 'future', 'shipboard', 'detective', 'bounty-hunter', 'gunfights', 'adventure', 'comedy', ], 'createdAt': '2018-10-15T01:26:50.783Z', 'crunchyroll': 'http://www.crunchyroll.com/cowboy-bebop', 'endDate': '1999-04-24', 'episodeCount': 26, 'episodeLength': 25, 'favoritesCount': 4380, 'finishedAt': None, 'funimation': 'http://www.funimation.com/shows/43625', 'hulu': 'https://www.hulu.com/cowboy-bebop', 'id': '36121977', 'favoritesCount': 4380, 'finishedAt': None, 'funimation': 'http://www.funimation.com/shows/43625', 'hulu': 'https://www.hulu.com/cowboy-bebop', 'id': '36121977', 'nextRelease': None, 'notes': None, 'popularityRank': 24, 'posterImage': 'https://media.kitsu.io/anime/poster_images/1/original.jpg?1431697256', 'private': False, 'progress': 6, 'progressedAt': '2020-04-03T01:38:56.762Z', 'ratingRank': 73, 'ratingTwenty': None, 'showType': 'TV', 'slug': 'cowboy-bebop', 'startDate': '1998-04-03', 'startedAt': '2020-01-04T15:02:36.103Z', 'status': 'finished', 'subtype': None, 'synopsis': ('In the year 2071, humanity has colonized several of the planets ' 'and moons of the solar system leaving the now uninhabitable ' 'surface of planet Earth behind. The Inter Solar System Police ' 'attempts to keep peace in the galaxy, aided in part by outlaw ' 'bounty hunters, referred to as "Cowboys". The ragtag team aboard ' 'the spaceship Bebop are two such individuals. Mellow and ' 'carefree Spike Spiegel is balanced by his boisterous, pragmatic ' 'partner Jet Black as the pair makes a living chasing bounties ' 'and collecting rewards. Thrown off course by the addition of new ' 'members that they meet in their travels—Ein, a genetically ' 'engineered, highly intelligent Welsh Corgi; femme fatale Faye ' 'Valentine, an enigmatic trickster with memory loss; and the ' 'strange computer whiz kid Edward Wong—the crew embarks on ' "thrilling adventures that unravel each member's dark and " 'mysterious past little by little. Well-balanced with high ' 'density action and light-hearted comedy, Cowboy Bebop is a space ' 'Western classic and an homage to the smooth and improvised music ' 'it is named after. [Written by MAL Rewrite]'), 'totalLength': 626, 'tubitv': 'http://tubitv.com/series/2052/cowboy_bebop', 'updatedAt': '2020-04-03T01:38:56.763Z', 'userCount': 106156, 'watch_status': 'dropped', } anime_entry_data = LIB_ENTRY['data'][0] data = merge_anime_info(anime_entry_data, ANIME, STREAMS) # act assert data == expected_data
5,347,597
def M_to_E(M, ecc): """Eccentric anomaly from mean anomaly. .. versionadded:: 0.4.0 Parameters ---------- M : float Mean anomaly (rad). ecc : float Eccentricity. Returns ------- E : float Eccentric anomaly. """ with u.set_enabled_equivalencies(u.dimensionless_angles()): E = optimize.newton(_kepler_equation, M, _kepler_equation_prime, args=(M, ecc)) return E
5,347,598
def load_beijing(): """Load and return the Beijing air quality dataset.""" module_path = os.path.dirname(__file__) data = pd.read_csv( os.path.join(module_path, 'data', 'beijing_air_quality.csv')) return data
5,347,599