content
stringlengths
22
815k
id
int64
0
4.91M
def swig_py_object_2_list_int(object, size : int) -> List[int]: """ Converts SwigPyObject to List[float] """ y = (ctypes.c_float * size).from_address(int(object)) new_object = [] for i in range(size): new_object += [int(y[i])] return new_object
5,351,200
def ParseAttributesFromData(attributes_data, expected_param_names): """Parses a list of ResourceParameterAttributeConfig from yaml data. Args: attributes_data: dict, the attributes data defined in command_lib/resources.yaml file. expected_param_names: [str], the names of the API parameters that the API method accepts. Example, ['projectsId', 'instancesId']. Returns: [ResourceParameterAttributeConfig]. Raises: InvalidResourceArgumentLists: if the attributes defined in the yaml file don't match the expected fields in the API method. """ raw_attributes = [ ResourceParameterAttributeConfig.FromData(a) for a in attributes_data ] registered_param_names = [a.parameter_name for a in raw_attributes] final_attributes = [] # TODO(b/78851830): improve the time complexity here. for expected_name in expected_param_names: if raw_attributes and expected_name == raw_attributes[0].parameter_name: # Attribute matches expected, add it and continue checking. final_attributes.append(raw_attributes.pop(0)) elif expected_name in IGNORED_FIELDS: # Attribute doesn't match but is being ignored. Add an auto-generated # attribute as a substitute. # Currently, it would only be the project config. attribute_name = IGNORED_FIELDS[expected_name] ignored_attribute = DEFAULT_RESOURCE_ATTRIBUTE_CONFIGS.get(attribute_name) # Manually add the parameter name, e.g. project, projectId or projectsId. ignored_attribute.parameter_name = expected_name final_attributes.append(ignored_attribute) else: # It doesn't match (or there are no more registered params) and the # field is not being ignored, error. raise InvalidResourceArgumentLists(expected_param_names, registered_param_names) if raw_attributes: # All expected fields were processed but there are still registered # attribute params remaining, they must be extra. raise InvalidResourceArgumentLists(expected_param_names, registered_param_names) return final_attributes
5,351,201
async def reactionFromRaw(payload: RawReactionActionEvent) -> Tuple[Message, Union[User, Member], emojis.BasedEmoji]: """Retrieve complete Reaction and user info from a RawReactionActionEvent payload. :param RawReactionActionEvent payload: Payload describing the reaction action :return: The message whose reactions changed, the user who completed the action, and the emoji that changed. :rtype: Tuple[Message, Union[User, Member], BasedEmoji] """ emoji = None user = None message = None if payload.member is None: # Get the channel containing the reacted message if payload.guild_id is None: channel = botState.client.get_channel(payload.channel_id) else: guild = botState.client.get_guild(payload.guild_id) if guild is None: return None, None, None channel = guild.get_channel(payload.channel_id) # Individual handling for each channel type for efficiency if isinstance(channel, DMChannel): if channel.recipient.id == payload.user_id: user = channel.recipient else: user = channel.me elif isinstance(channel, GroupChannel): # Group channels should be small and far between, so iteration is fine here. for currentUser in channel.recipients: if currentUser.id == payload.user_id: user = currentUser if user is None: user = channel.me # Guild text channels elif isinstance(channel, TextChannel): user = channel.guild.get_member(payload.user_id) else: return None, None, None # Fetch the reacted message (api call) message = await channel.fetch_message(payload.message_id) # If a reacting member was given, the guild can be inferred from the member. else: user = payload.member message = await payload.member.guild.get_channel(payload.channel_id).fetch_message(payload.message_id) if message is None: return None, None, None # Convert reacted emoji to BasedEmoji try: emoji = emojis.BasedEmoji.fromPartial(payload.emoji, rejectInvalid=True) except exceptions.UnrecognisedCustomEmoji: return None, None, None return message, user, emoji
5,351,202
def cancelDelayedCalls(expected=2): """ :param expected: The number of calls to cancel. If the number found does not match this, none of them will be cancelled so that trial's cleanup can tell you more about them. Why the default of 2? Hopefully you're only testing one delayed calll generator at a time, and there's one for trial's 2 minute timeout. """ calls = reactor.getDelayedCalls() strings = [] for call in calls: strings.append(str(call)) call.cancel() if len(calls) != expected: raise AssertionError( '\n\nExpected {} delayed calls, found {}: {}'.format( expected, len(calls), strings ))
5,351,203
def poisson2d(N,dtype='d',format=None): """ Return a sparse matrix for the 2d poisson problem with standard 5-point finite difference stencil on a square N-by-N grid. """ if N == 1: diags = asarray( [[4]],dtype=dtype) return dia_matrix((diags,[0]), shape=(1,1)).asformat(format) offsets = array([0,-N,N,-1,1]) diags = empty((5,N**2),dtype=dtype) diags[0] = 4 #main diagonal diags[1:] = -1 #all offdiagonals diags[3,N-1::N] = 0 #first lower diagonal diags[4,N::N] = 0 #first upper diagonal return dia_matrix((diags,offsets),shape=(N**2,N**2)).asformat(format)
5,351,204
def qa(): """Works on qa environment""" global c c.env = 'qa'
5,351,205
def get_factory(): """随机获取一个工厂类""" return random.choice([BasicCourseFactory, ProjectCourseFactory])()
5,351,206
def delay_class_factory(motor_class): """ Create a subclass of DelayBase that controls a motor of class motor_class. Used in delay_instace_factory (DelayMotor), may be useful for one-line declarations inside ophyd Devices. """ try: cls = delay_classes[motor_class] except KeyError: cls = type( 'Delay' + motor_class.__name__, (DelayBase,), {'motor': Cpt(motor_class, '')} ) delay_classes[motor_class] = cls return cls
5,351,207
def check_and_makedir(folder_name): """ Does a directory exist? if not create it. """ if not os.path.isdir(folder_name): os.mkdir(folder_name) return False else: return True
5,351,208
def _get_all_files_in_directory(dir_path, excluded_glob_patterns): """Recursively collects all files in directory and subdirectories of specified path. Args: dir_path: str. Path to the folder to be linted. excluded_glob_patterns: set(str). Set of all glob patterns to be excluded. Returns: a list of files in directory and subdirectories without excluded files. """ files_in_directory = [] for _dir, _, files in os.walk(dir_path): for file_name in files: filepath = os.path.relpath( os.path.join(_dir, file_name), os.getcwd()) if not any([fnmatch.fnmatch(filepath, gp) for gp in excluded_glob_patterns]): files_in_directory.append(filepath) return files_in_directory
5,351,209
def lonlat2px_gt(img, lon, lat, lon_min, lat_min, lon_max, lat_max): """ Converts a pair of lon and lat to its corresponding pixel value in an geotiff image file. Parameters ---------- img : Image File, e.g. PNG, TIFF Input image file lon : float Longitude lat : float Latitude lon_min, lat_min : float lower left coordinate of geotiff lon_max, lat_max : float upper right coordinate of geotiff Returns ------- Row : float corresponding pixel value Col : float corresponding pixel value """ w, h = img.size londiff = lon_max - lon_min latdiff = lat_max - lat_min mw = w / londiff mh = h / latdiff row = (-lat + lat_max) * mh col = (lon - lon_min) * mw return row, col
5,351,210
def patchwise_contrastive_metric(image_sequence: torch.Tensor, kpt_sequence: torch.Tensor, method: str = 'norm', time_window: int = 3, patch_size: tuple = (7, 7), alpha: float = 0.1): """ Contrasts pixel patches around key-points. Positive examples are drawn from the same key-point at time-steps in the given time-window. Negative examples are drawn from other key-points at any time-step or the same key-point outside of the time-window. :param image_sequence: Tensor of sequential images in (N, T, C, H, W) :param kpt_sequence: Tensor of key-point coordinates in (N, T, K, D) :param method: Method to use: 'mean': Compares the mean patch differences 'norm': Compares the image norm of the patch differences 'vssil': Uses the pixelwise-contrastive feature representations 'tfeat': Uses tfeat encodings to compare the image patches :param time_window: Window size of positive examples around current the current time-step E.g. time_window=3 uses t-1 and t+1 as positives for t At t=0 and t=T, the window size is reduced. :param patch_size: Size of the patch so extract from the input, around the key-point If these would extend the image borders, they are moved to within the borders. TODO: Fix with padding instead ? :param alpha: Allowance for pos / neg similarity """ N, T, C, H, W = image_sequence.shape assert kpt_sequence.shape[0] == N, "images and kpts dont share batch size dim" assert kpt_sequence.shape[1] == T, "images and kpts dont share time dim" _, _, K, D = kpt_sequence.shape # To reduce the computational effort, the extracted patches are saved and re-used by demand patch_sequence = torch.empty(size=(N, T, K, C, patch_size[0], patch_size[1])) evaluated_kpts = [] L = torch.empty(size=(N, T, K)).to(kpt_sequence.device) # Iterate over time-steps for t in range(T): # Iterate over key-points for k in range(K): # # ANCHOR # if (t, k) in evaluated_kpts: anchor_patch = patch_sequence[:, t, k, ...].float() else: x_min, x_max, y_min, y_max = get_box_within_image_border(kpt_sequence, patch_size, H, W, t, k) anchor_patch = image_sequence[:, t, :, x_min: x_max + 1, y_min: y_max + 1].float() patch_sequence[:, t, k, ...] = anchor_patch evaluated_kpts.append((t, k)) # # POSITIVES # L_pos = torch.tensor([0]).to(kpt_sequence.device) t_range = np.arange(max(0, t - int(time_window/2)), min(T - 1, t + int(time_window/2)) + 1) # t_range = np.arange(0, T) for t_p in t_range: if t_p == t: continue if (t_p, k) in evaluated_kpts: positive_patch = patch_sequence[:, t_p, k, ...].float() else: x_min, x_max, y_min, y_max = get_box_within_image_border(kpt_sequence, patch_size, H, W, t_p, k) positive_patch = image_sequence[:, t_p, :, x_min: x_max + 1, y_min: y_max + 1].float() patch_sequence[:, t_p, k, ...] = positive_patch evaluated_kpts.append((t_p, k)) L_pos = L_pos + torch.norm(positive_patch - anchor_patch, p=2) L_pos = L_pos + torch.norm(kpt_sequence[:, t, k, :] - kpt_sequence[:, t_p, k, :], p=2) L_pos = (L_pos / (len(t_range) - 1)) if len(t_range) > 2 else L_pos # # NEGATIVES # L_neg = torch.tensor([0]).to(kpt_sequence.device) # for t_n in range(0, T): for t_n in t_range: for k_n in range(0, K): if (t_n in t_range or t_n == t) and k_n == k: continue else: if (t_n, k_n) in evaluated_kpts: negative_patch = patch_sequence[:, t_n, k_n].float() else: x_min, x_max, y_min, y_max = get_box_within_image_border(kpt_sequence, patch_size, H, W, t_n, k_n) negative_patch = image_sequence[:, t_n, :, x_min:x_max + 1, y_min:y_max + 1].float() patch_sequence[:, t_n, k_n, ...] = negative_patch evaluated_kpts.append((t_n, k_n)) L_neg = L_neg + torch.norm(negative_patch - anchor_patch, p=2) L_neg = L_neg + torch.norm(kpt_sequence[:, t, k, :] - kpt_sequence[:, t_n, k_n, :], p=2) L_neg = L_neg / (T*(K - 1) + T - len(t_range) + 1) print(f't: {t} k: {k} = ', max(L_pos - L_neg + alpha, torch.tensor([0.0])).mean().item()) L[:, t, k] = max(L_pos - L_neg + alpha, torch.tensor([0.0])) return torch.mean(L, dim=[0, 2])
5,351,211
def array_wishart_rvs(df, scale, **kwargs): """ Wrapper around scipy.stats.wishart to always return a np.array """ if np.size(scale) == 1: return np.array([[ scipy.stats.wishart(df=df, scale=scale, **kwargs).rvs() ]]) else: return scipy.stats.wishart(df=df, scale=scale, **kwargs).rvs()
5,351,212
def get_mpl_colors(): """ ================== Colormap reference ================== Reference for colormaps included with Matplotlib. This reference example shows all colormaps included with Matplotlib. Note that any colormap listed here can be reversed by appending "_r" (e.g., "pink_r"). These colormaps are divided into the following categories: Sequential: These colormaps are approximately monochromatic colormaps varying smoothly between two color tones---usually from low saturation (e.g. white) to high saturation (e.g. a bright blue). Sequential colormaps are ideal for representing most scientific data since they show a clear progression from low-to-high values. Diverging: These colormaps have a median value (usually light in color) and vary smoothly to two different color tones at high and low values. Diverging colormaps are ideal when your data has a median value that is significant (e.g. 0, such that positive and negative values are represented by different colors of the colormap). Qualitative: These colormaps vary rapidly in color. Qualitative colormaps are useful for choosing a set of discrete colors. For example:: color_list = plt.cm.Set3(np.linspace(0, 1, 12)) gives a list of RGB colors that are good for plotting a series of lines on a dark background. Miscellaneous: Colormaps that don't fit into the categories above. """ # Have colormaps separated into categories: # http://matplotlib.org/examples/color/colormaps_reference.html return [('Perceptually Uniform Sequential', [ 'viridis', 'plasma', 'inferno', 'magma']), ('Sequential', [ 'Greys', 'Purples', 'Blues', 'Greens', 'Oranges', 'Reds', 'YlOrBr', 'YlOrRd', 'OrRd', 'PuRd', 'RdPu', 'BuPu', 'GnBu', 'PuBu', 'YlGnBu', 'PuBuGn', 'BuGn', 'YlGn']), ('Sequential (2)', [ 'binary', 'gist_yarg', 'gist_gray', 'gray', 'bone', 'pink', 'spring', 'summer', 'autumn', 'winter', 'cool', 'Wistia', 'hot', 'afmhot', 'gist_heat', 'copper']), ('Diverging', [ 'PiYG', 'PRGn', 'BrBG', 'PuOr', 'RdGy', 'RdBu', 'RdYlBu', 'RdYlGn', 'Spectral', 'coolwarm', 'bwr', 'seismic']), ('Qualitative', [ 'Pastel1', 'Pastel2', 'Paired', 'Accent', 'Dark2', 'Set1', 'Set2', 'Set3', 'tab10', 'tab20', 'tab20b', 'tab20c']), ('Miscellaneous', [ 'flag', 'prism', 'ocean', 'gist_earth', 'terrain', 'gist_stern', 'gnuplot', 'gnuplot2', 'CMRmap', 'cubehelix', 'brg', 'hsv', 'gist_rainbow', 'rainbow', 'jet', 'nipy_spectral', 'gist_ncar'])]
5,351,213
def list_sequences(bam): """ List the sequences involved and whether they are forward or reverse :param bam: the bam object from pysam :type bam: pysam.AlignmentFile :return: :rtype: """ for template in locations: for primer in locations[template]: start, end = locations[template][primer] print("\nALIGNMENT: {} FROM {} TO {}\n".format(primer, start, end)) for read in bam.fetch(reference=template, start=start, end=end): print("{}\t{}\t{}".format(primer, read.query_name, read.is_reverse))
5,351,214
async def async_setup_entry(hass, entry): """Set up Jenkins from a config entry.""" hass.async_create_task( hass.config_entries.async_forward_entry_setup(entry, "sensor") ) return True
5,351,215
def signal_to_dataset(raw, fsamp, intvs, labels): """Segmentize raw data into list of epochs. returns dataset and label_array : a list of data, each block is 1 second, with fixed size. width is number of channels in certain standard order. Args: raw: EEG signals. Shape: (n_channel, n_sample). fsamp(int): sampling rate, i.e., window size of resulting epoch. Unit: Hz intvs: list of [start, end]. Unit: second labels: list of labels. Must be same len as INTVS Returns: tuple (dataset, labels): - dataset: list of data; (n_epochs, n_channels, n_sample_per_epoch) - labels: list of labels """ ds, lbl = [], [] for i, inv in enumerate(intvs): tstart, tend = inv chopped_sig = chop_signal( [ch[math.ceil(tstart*fsamp):math.floor(tend*fsamp)] for ch in raw], fsamp) ds.extend(chopped_sig) lbl.extend([labels[i]] * len(chopped_sig)) return ds, lbl
5,351,216
def project_dynamic_property_graph(graph, v_prop, e_prop, v_prop_type, e_prop_type): """Create project graph operation for nx graph. Args: graph (:class:`nx.Graph`): A nx graph. v_prop (str): The node attribute key to project. e_prop (str): The edge attribute key to project. v_prop_type (str): Type of the node attribute. e_prop_type (str): Type of the edge attribute. Returns: Operation to project a dynamic property graph. Results in a simple graph. """ check_argument(graph.graph_type == types_pb2.DYNAMIC_PROPERTY) config = { types_pb2.GRAPH_NAME: utils.s_to_attr(graph.key), types_pb2.GRAPH_TYPE: utils.graph_type_to_attr(types_pb2.DYNAMIC_PROJECTED), types_pb2.V_PROP_KEY: utils.s_to_attr(v_prop), types_pb2.E_PROP_KEY: utils.s_to_attr(e_prop), types_pb2.V_DATA_TYPE: utils.s_to_attr(utils.data_type_to_cpp(v_prop_type)), types_pb2.E_DATA_TYPE: utils.s_to_attr(utils.data_type_to_cpp(e_prop_type)), } op = Operation( graph._session_id, types_pb2.PROJECT_GRAPH, config=config, output_types=types_pb2.GRAPH, ) return op
5,351,217
def show__machines(args): """ Show the machines user is offering for rent. :param argparse.Namespace args: should supply all the command-line options :rtype: """ req_url = apiurl(args, "/machines", {"owner": "me"}) r = requests.get(req_url) r.raise_for_status() rows = r.json()["machines"] if args.raw: print(json.dumps(rows, indent=1, sort_keys=True)) else: for machine in rows: if args.quiet: print("{id}".format(id=machine["id"])) else: print("{N} machines: ".format(N=len(rows))) print( "{id}: {json}".format( id=machine["id"], json=json.dumps(machine, indent=4, sort_keys=True), ) )
5,351,218
def eqfm_(a, b): """Helper for comparing floats AND style names.""" n1, v1 = a n2, v2 = b if type(v1) is not float: return eq_(a, b) eqf_(v1, v2) eq_(n1, n2)
5,351,219
def play_env_problem_randomly(env_problem, num_steps): """Plays the env problem by randomly sampling actions for `num_steps`.""" # Reset all environments. env_problem.reset() # Play all environments, sampling random actions each time. for _ in range(num_steps): # Sample batch_size actions from the action space and stack them. actions = np.stack([ env_problem.action_space.sample() for _ in range(env_problem.batch_size) ]) # Execute actions, observations are stored in `env_problem`. _, _, dones, _ = env_problem.step(actions) # Get the indices where we are done and reset those. env_problem.reset(indices=done_indices(dones))
5,351,220
def flatten_comment(seq): """Flatten a sequence of comment tokens to a human-readable string.""" # "[CommentToken(value='# Extra settings placed in ``[app:main]`` section in generated production.ini.\\n'), CommentToken(value='# Example:\\n'), CommentToken(value='#\\n'), CommentToken(value='# extra_ini_settings: |\\n'), CommentToken(value='# mail.host = mymailserver.internal\\n'), CommentToken(value='# websauna.superusers =\\n'), CommentToken(value='# [email protected]\\n'), CommentToken(value='#\\n')] if not seq: return "" result = [] for item in seq: if not item: continue if isinstance(item, CommentToken): # Mangle away # comment start from the line s = item.value s = s.strip(" ") s = s.lstrip("#") s = s.rstrip("\n") if s.startswith(" "): s = s[1:] result.append(s) if result: raw_comment = "\n".join(result) else: return "" section_header = raw_comment.rfind("---") if section_header >= 0: raw_comment = raw_comment[section_header + 3:] return raw_comment
5,351,221
def test_countstore_reset_dates(): """Dates attrs are all set to None""" c = CountStore() c.start = 42 c.start_date = now() c.end = 52 c.end_date = now() c.reset() assert c.start is None c.start_date is None c.end is None c.end_date is None
5,351,222
def get_community(community_id): """ Verify that a community with a given id exists. :param community_id: id of test community :return: Community instance :return: 404 error if doesn't exist """ try: return Community.objects.get(pk=community_id) except Community.DoesNotExist: return
5,351,223
def load_labeled_info(csv4megan_excell, audio_dataset, ignore_files=None): """Read labeled info from spreat sheet and remove samples with no audio file, also files given in ignore_files """ if ignore_files is None: ignore_files = set() with open(csv4megan_excell) as csvfile: reader = csv.DictReader(csvfile) reader = list(reader) reader_strip = [] for row in reader: row = {r: row[r].strip() for r in row} reader_strip.append(row) reader = reader_strip.copy() missing_audio_files = [] for row in reader: if audio_dataset.get(row['File Name'], None) is None: missing_audio_files.append(row['File Name']) missing_audio_files = set(missing_audio_files) print((f'{len(missing_audio_files)} files are missing' + ' corresponding to excell entries')) megan_data_sheet = [] for row in reader: if row['File Name'] not in ignore_files: if row['File Name'] not in missing_audio_files: megan_data_sheet.append(row) deleted_files = set() deleted_files.update(ignore_files) deleted_files.update(missing_audio_files) pprint((f'-> {len(deleted_files)} number of samples are DELETED due to ' + 'ignore_files and missing_audio_files')) return megan_data_sheet, list(deleted_files)
5,351,224
def de_dupe_list(input): """de-dupe a list, preserving order. """ sam_fh = [] for x in input: if x not in sam_fh: sam_fh.append(x) return sam_fh
5,351,225
def donwload_l10ns(): """Download all l10ns in zip archive.""" url = API_PREFIX + 'download/' + FILENAME + KEY_SUFFIX l10ns_file = urllib2.urlopen(url) with open('all.zip','wb') as f: f.write(l10ns_file.read()) return True
5,351,226
def test_space_translation(): """Compare code-transformed waveform to analytically transformed waveform""" print("") ell_max = 8 for s in range(-2, 2 + 1): for ell in range(abs(s), ell_max + 1): print("\tWorking on spin s =", s, ", ell =", ell) for m in range(-ell, ell + 1): for space_translation in [[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]]: auxiliary_waveforms = {} for i in range(s + 2): auxiliary_waveforms[f"psi{4-i}_modes"] = samples.single_mode_proportional_to_time(s=i - 2) auxiliary_waveforms[f"psi{4-i}_modes"].data *= 0 w_m1 = samples.single_mode_proportional_to_time(s=s, ell=ell, m=m).transform( space_translation=space_translation, **auxiliary_waveforms, ) w_m2 = samples.single_mode_proportional_to_time_supertranslated( s=s, ell=ell, m=m, space_translation=np.array(space_translation) ) i1A = np.argmin(abs(w_m1.t - (w_m1.t[0] + 2 * np.linalg.norm(space_translation)))) i1B = np.argmin(abs(w_m1.t - (w_m1.t[-1] - 2 * np.linalg.norm(space_translation)))) i2A = np.argmin(abs(w_m2.t - w_m1.t[i1A])) i2B = np.argmin(abs(w_m2.t - w_m1.t[i1B])) assert np.allclose(w_m1.t[i1A : i1B + 1], w_m2.t[i2A : i2B + 1], rtol=0.0, atol=1e-16), ( w_m1.t[i1A], w_m2.t[i2A], w_m1.t[i1B], w_m2.t[i2B], w_m1.t[i1A : i1B + 1].shape, w_m2.t[i2A : i2B + 1].shape, ) data1 = w_m1.data[i1A : i1B + 1] data2 = w_m2.data[i2A : i2B + 1] assert np.allclose(data1, data2, rtol=0.0, atol=5e-14), ( [s, ell, m], space_translation, [ abs(data1 - data2).max(), data1.ravel()[np.argmax(abs(data1 - data2))], data2.ravel()[np.argmax(abs(data1 - data2))], ], [ np.unravel_index(np.argmax(abs(data1 - data2)), data1.shape)[0], list( sf.LM_range(abs(s), ell_max)[ np.unravel_index(np.argmax(abs(data1 - data2)), data1.shape)[1] ] ), ], )
5,351,227
def _constant_velocity_heading_from_kinematics(kinematics_data: KinematicsData, sec_from_now: float, sampled_at: int) -> np.ndarray: """ Computes a constant velocity baseline for given kinematics data, time window and frequency. :param kinematics_data: KinematicsData for agent. :param sec_from_now: How many future seconds to use. :param sampled_at: Number of predictions to make per second. """ x, y, vx, vy, _, _, _, _, _, _ = kinematics_data preds = [] time_step = 1.0 / sampled_at for time in np.arange(time_step, sec_from_now + time_step, time_step): preds.append((x + time * vx, y + time * vy)) return np.array(preds)
5,351,228
def arg_int(name, default=None): """ Fetch a query argument, as an integer. """ try: v = request.args.get(name) return int(v) except (ValueError, TypeError): return default
5,351,229
def db(app, request): """Session-wide test database.""" if os.path.exists(os.path.join(INSTANCE_FOLDER_PATH, 'test.sqlite')): os.unlink(os.path.join(INSTANCE_FOLDER_PATH, 'test.sqlite')) def teardown(): _db.drop_all() os.unlink(os.path.join(INSTANCE_FOLDER_PATH, 'test.sqlite')) _db.app = app apply_migrations(app) request.addfinalizer(teardown) return _db
5,351,230
def adapter_rest(request, api_module_rest, api_client_rest): """Pass.""" return { "adapter": request.param, "api_module": api_module_rest, "api_client": api_client_rest, }
5,351,231
def extract(lon, lat, dep, prop=['rho', 'vp', 'vs'], **kwargs): """ Simple CVM-S extraction lon, lat, dep: Coordinate arrays prop: 'rho', 'vp', or 'vs' nproc: Optional, number of processes Returns: (rho, vp, vs) material arrays """ lon = numpy.asarray(lon, 'f') lat = numpy.asarray(lat, 'f') dep = numpy.asarray(dep, 'f') shape = dep.shape nsample = dep.size cwd = os.getcwd() if os.path.exists('cvms-tmp'): shutil.rmtree('cvms-tmp') os.mkdir('cvms-tmp') os.chdir('cvms-tmp') cfg = configure(**kwargs) lon.tofile(cfg['file_lon']) lat.tofile(cfg['file_lat']) dep.tofile(cfg['file_dep']) del(lon, lat, dep) run(nsample=nsample, **kwargs) out = [] if type(prop) not in [list, tuple]: prop = [prop] for v in prop: f = cfg['file_' + v.lower()] out += [numpy.fromfile(f, 'f').reshape(shape)] os.chdir(cwd) shutil.rmtree('cvms-tmp') return out
5,351,232
def test_meridian_arc(lat1, lat2, arclen): """ meridianarc(deg2rad(40), deg2rad(80), wgs84Ellipsoid) """ assert lox.meridian_arc(lat1, lat2) == approx(arclen)
5,351,233
def remove_quotes(string): """Function to remove quotation marks surrounding a string""" string = string.strip() while len(string) >= 3 and string.startswith('\'') and string.endswith('\''): string = string[1:-1] string = quick_clean(string) string = quick_clean(string) return string
5,351,234
def create_chain_widget(rig, bone_name, radius=0.5, invert=False, bone_transform_name=None): """Creates a basic chain widget """ obj = create_widget(rig, bone_name, bone_transform_name) if obj != None: r = radius rh = radius/2 if invert: verts = [(rh, rh, rh), (r, -r, r), (-r, -r, r), (-rh, rh, rh), (rh, rh, -rh), (r, -r, -r), (-r, -r, -r), (-rh, rh, -rh)] else: verts = [(r, r, r), (rh, -rh, rh), (-rh, -rh, rh), (-r, r, r), (r, r, -r), (rh, -rh, -rh), (-rh, -rh, -rh), (-r, r, -r)] edges = [(0, 1), (1, 2), (2, 3), (3, 0), (4, 5), (5, 6), (6, 7), (7, 4), (0, 4), (1, 5), (2, 6), (3, 7)] mesh = obj.data mesh.from_pydata(verts, edges, []) mesh.update()
5,351,235
def compute_list_featuretypes( data, list_featuretypes, fourier_n_largest_frequencies, wavelet_depth, mother_wavelet, ): """ This function lets the user choose which combination of features they want to have computed. list_featuretypes: "Basic" - min, max, mean, kurt ,skew, std, sum. "FourierComplete" - all frequencies amplitudes and phases. "FourierNLargest" - n largest frequencies and their values. "WaveletComplete" - all approximation and details coefficients at each depth. "WaveletBasic" - takes "Basic" (min, max, etc) at each depth. Args: data (pd.DataFrame()) : one column from which to make features. list_featuretypes (list) : list of feature types to be computed. fourier_n_largest_frequencies (int) : amount of fourier features. wavelet_depth (int) : level of depth up to which the wavelet is computed. mother_wavelet (str) : type of wavelet used for the analysis. Returns: features (pd.DataFrame()) : row of features. """ if type(list_featuretypes) != list: raise AttributeError("'list_featuretypes' must be a list.") allowed_components = ["Basic", "FourierNLargest", "WaveletComplete", "WaveletBasic", "FourierComplete"] for argument in list_featuretypes: if argument not in allowed_components: raise ValueError(f"argument must be one of {allowed_components}") features_basic = pd.DataFrame() features_fourier = pd.DataFrame() features_wavelet = pd.DataFrame() features_wavelet_basic = pd.DataFrame() features_fft2 = pd.DataFrame() if "Basic" in list_featuretypes: features_basic = compute_basic(data) if "FourierNLargest" in list_featuretypes: features_fourier = compute_fourier_n_largest(data, fourier_n_largest_frequencies) if "FourierComplete" in list_featuretypes: features_fft2 = compute_fourier_complete(data) if "WaveletComplete" in list_featuretypes: features_wavelet = compute_wavelet_complete(data, wavelet_depth, mother_wavelet) if "WaveletBasic" in list_featuretypes: features_wavelet_basic = compute_wavelet_basic( data, wavelet_depth, mother_wavelet ) features = pd.concat( [features_basic, features_fourier, features_fft2, features_wavelet, features_wavelet_basic], axis=1, ) return features
5,351,236
def select(locator): """ Returns an :class:`Expression` for finding selects matching the given locator. The query will match selects that meet at least one of the following criteria: * the element ``id`` exactly matches the locator * the element ``name`` exactly matches the locator * the element ``id`` exactly matches the ``for`` attribute of a corresponding ``label`` element whose text matches the locator * the element is nested within a ``label`` element whose text matches the locator Args: locator (str): A string that identifies the desired selects. Returns: Expression: An :class:`Expression` object matching the desired selects. """ field_expr = x.descendant("select") return _locate_field(field_expr, locator)
5,351,237
def send_image(filename): """Route to uploaded-by-client images Returns ------- file Image file on the server (see Flask documentation) """ return send_from_directory(app.config['UPLOAD_FOLDER'], filename)
5,351,238
def update(key, view, errors): """ Update updates the set of shown errors. The method shos a given list of errors on the given view, keyed by `key`, each tool uses a different key. """ set = PhantomSet(view, key) all = [] for err in errors: if buffer.filename(view) == err.file: all.append(err.to_phantom(view)) set.update(all) state[id(key, view.id())] = set
5,351,239
def swissPairings(): """Returns a list of pairs of players for the next round of a match. Assuming that there are an even number of players registered, each player appears exactly once in the pairings. Each player is paired with another player with an equal or nearly-equal win record, that is, a player adjacent to him or her in the standings. Returns: A list of tuples, each of which contains (id1, name1, id2, name2) id1: the first player's unique id name1: the first player's name id2: the second player's unique id name2: the second player's name """ with get_cursor() as cursor: cursor.execute("SELECT * FROM pairings") pairings = cursor.fetchall() return pairings
5,351,240
def split(data, batch): """ PyG util code to create graph batches """ node_slice = torch.cumsum(torch.from_numpy(np.bincount(batch)), 0) node_slice = torch.cat([torch.tensor([0]), node_slice]) row, _ = data.edge_index edge_slice = torch.cumsum(torch.from_numpy(np.bincount(batch[row])), 0) edge_slice = torch.cat([torch.tensor([0]), edge_slice]) # Edge indices should start at zero for every graph. data.edge_index -= node_slice[batch[row]].unsqueeze(0) data.__num_nodes__ = torch.bincount(batch).tolist() slices = {'edge_index': edge_slice} if data.x is not None: slices['x'] = node_slice if data.edge_attr is not None: slices['edge_attr'] = edge_slice if data.y is not None: if data.y.size(0) == batch.size(0): slices['y'] = node_slice else: slices['y'] = torch.arange(0, batch[-1] + 2, dtype=torch.long) return data, slices
5,351,241
def test_add_asset_conflict_error(cli, init_assets): """Tests the method ``add_asset`` of the class ``InventoryClient`` with an already existing asset.""" asset_id = init_assets[2].asset_id asset = Asset(asset_id) timestamp = datetime.fromisoformat('2022-01-01T01:00:00+00:00') expiration = datetime.fromisoformat('2022-01-07T01:00:00+00:00') with pytest.raises(ConflictError, match=f'.*{asset_id}.*') as exc_info: cli.add_asset(asset, expiration, timestamp) assert exc_info.value.name == asset_id assert compare_unsorted_list(cli.assets(), init_assets, lambda x: x.vid)
5,351,242
def add_drop_subcommand(subparser_factory): """Add a subparser for the drop subcommand.""" drop_help = "Drop a database from the test server." subparser = subparser_factory.add_parser('drop', help=drop_help, description=drop_help) dbname_help = "The name of the database to drop." subparser.add_argument('dbname', help=dbname_help) subparser.set_defaults(subcommand=run_drop_subcommand)
5,351,243
def test_value_repr(): """Test string representations for archive values.""" value = SingleVersionValue( value=1, timestamp=Timestamp(intervals=[TimeInterval(start=1, end=3)]) ) assert str(value) == '(1 [[1, 3]])' value = MultiVersionValue([ SingleVersionValue( value=1, timestamp=Timestamp(intervals=[TimeInterval(start=2, end=3)]) ), SingleVersionValue( value=2, timestamp=Timestamp(intervals=[TimeInterval(start=4, end=5)]) ) ]) assert str(value) == '((1 [[2, 3]]), (2 [[4, 5]]))'
5,351,244
def test_withdrawal_request_status(mocker, response, uclient) -> None: """Test the test_withdrawal_request_status method of the sync client""" url = f'{LunoSyncClient.BASE_URI}withdrawals/1234' data = {} mocker.patch('requests.Session.request', return_value=response) response = uclient.withdrawal_request_status(withdrawal_id='1234') message = (f"expected response {data}, received {response}") assert data == response, message
5,351,245
def _get_shadowprice_data(scenario_id): """Gets data necessary for plotting shadow price :param str/int scenario_id: scenario id :return: (*tuple*) -- interconnect as a str, bus data as a data frame, lmp data as a data frame, branch data as a data frame and congestion data as a data frame """ s = Scenario(scenario_id) interconnect = s.info["interconnect"] interconnect = " ".join(interconnect.split("_")) s_grid = s.state.get_grid() # Get bus and add location data bus_map = project_bus(s_grid.bus) # get branch and add location data branch_map = project_branch(s_grid.branch) # get congestion congu = s.state.get_congu() congl = s.state.get_congl() cong_abs = pd.DataFrame( np.maximum(congu.to_numpy(), congl.to_numpy()), columns=congu.columns, index=congu.index, ) return interconnect, bus_map, s.state.get_lmp(), branch_map, cong_abs
5,351,246
def get_city_reviews(city): """ Given a city name, return the data for all reviews. Returns a pandas DataFrame. """ with open(f"{DATA_DIR}/{city}/review.json", "r") as f: review_list = [] for line in f: review = json.loads(line) review_list.append(review) # convert to pandas DataFrame reviews = to_pandas([city], {city: review_list}) # optimize memory usage reviews = optimize(reviews, {'city': 'category'}) return reviews
5,351,247
def extract_rows_from_table(dataset, col_names, fill_null=False): """ Extract rows from DB table. :param dataset: :param col_names: :return: """ trans_dataset = transpose_list(dataset) rows = [] if type(col_names).__name__ == 'str': col_names = [col_names] for col_name in col_names: if col_name in dataset[0]: idx = dataset[0].index(col_name) rows.append(trans_dataset[idx]) else: if fill_null: null_list = [''] * (len(trans_dataset[0])-1) null_list = [col_name] + null_list rows.append(null_list) else: pass if len(col_names) == 1: return rows[0] else: return transpose_list(rows)
5,351,248
def CalculateHydrogenNumber(mol): """ ################################################################# Calculation of Number of Hydrogen in a molecule ---->nhyd Usage: result=CalculateHydrogenNumber(mol) Input: mol is a molecule object. Output: result is a numeric value. ################################################################# """ i = 0 Hmol = Chem.AddHs(mol) for atom in Hmol.GetAtoms(): if atom.GetAtomicNum() == 1: i = i + 1 return i
5,351,249
def check_update (): """Return the following values: (False, errmsg) - online version could not be determined (True, None) - user has newest version (True, (version, url string)) - update available (True, (version, None)) - current version is newer than online version """ version, value = get_online_version() if version is None: # value is an error message return False, value if version == CurrentVersion: # user has newest version return True, None if is_newer_version(version): # value is an URL linking to the update package return True, (version, value) # user is running a local or development version return True, (version, None)
5,351,250
def draw_and_save(data_img_dir, img_fname, output, output_type, results_path, tfr_func=None): """Draws on the images for the detected object and saves them on the given location. Parameters: ----------- data_img_dir (str): directory path where images are saved img_fname (str): name of an image stored in the <data_img_dir> output (tuple): (out_scores, out_boxes, out_classes) to draw those boxes of detected objets output_type (str): in ['true', 'pred'] results_path (str): directory path where images are saved after drawing Returns: -------- None """ # Preprocess your image img_path = os.path.join(data_img_dir, img_fname) image = Image.open(img_path) image = np.array(image) # dtype=np.float32 # pdb.set_trace() if tfr_func is not None: image = tfr_func(image) image = Image.fromarray(image) out_scores, out_boxes, out_classes = output # Print predictions info class_names = yutil.get_class_names() colors = yutil.get_class_colors() # generate_colors(class_names) # Draw bounding boxes on the image file yutil.draw_boxes(image, out_scores, out_boxes, out_classes, class_names, colors) # Save the predicted bounding box on the image save_path = os.path.join(results_path, output_type + '_' + img_fname) image.save(save_path, quality=90) return
5,351,251
def count_regularization_baos_for_both(z, count_tokens, count_pieces, mask=None): """ Compute regularization loss, based on a given rationale sequence Use Yujia's formulation Inputs: z -- torch variable, "binary" rationale, (batch_size, sequence_length) percentage -- the percentage of words to keep Outputs: a loss value that contains two parts: continuity_loss -- \sum_{i} | z_{i-1} - z_{i} | sparsity_loss -- |mean(z_{i}) - percent| """ # (batch_size,) if mask is not None: mask_z = z * mask seq_lengths = torch.sum(mask, dim=1) else: mask_z = z seq_lengths = torch.sum(z - z + 1.0, dim=1) mask_z_ = torch.cat([mask_z[:, 1:], mask_z[:, -1:]], dim=-1) continuity_ratio = torch.sum(torch.abs(mask_z - mask_z_), dim=-1) / seq_lengths #(batch_size,) percentage = count_pieces * 2 / seq_lengths # continuity_loss = F.threshold(continuity_ratio - percentage, 0, 0, False) continuity_loss = torch.abs(continuity_ratio - percentage) sparsity_ratio = torch.sum(mask_z, dim=-1) / seq_lengths #(batch_size,) percentage = count_tokens / seq_lengths #(batch_size,) # sparsity_loss = F.threshold(sparsity_ratio - percentage, 0, 0, False) sparsity_loss = torch.abs(sparsity_ratio - percentage) return continuity_loss, sparsity_loss
5,351,252
def main(): """ Example for solving CC-Lagrangian and then computing 1-RDM and 2-RDM """ import pyscf import openfermion as of from openfermion.chem.molecular_data import spinorb_from_spatial from openfermionpyscf import run_pyscf from pyscf.cc.addons import spatial2spin from pyscf import cc import numpy as np np.set_printoptions(linewidth=500) basis = 'cc-pvdz' mol = pyscf.M( atom='H 0 0 0; F 0 0 {}'.format(1.6), basis=basis) mf = mol.RHF() mf.verbose = 3 mf.run() mycc = mf.CCSD() mycc.conv_tol = 1.0E-12 ecc, pyscf_t1, pyscf_t2 = mycc.kernel() print('CCSD correlation energy', mycc.e_corr) from functools import reduce from pyscf import ao2mo eris = mycc.ao2mo() conv, pyscf_l1, pyscf_l2 = cc.ccsd_lambda.kernel(mycc, eris, pyscf_t1, pyscf_t2, tol=mycc.conv_tol) pyscf_sopdm = cc.ccsd_rdm.make_rdm1(mycc, pyscf_t1, pyscf_t2, pyscf_l1, pyscf_l2) pyscf_stpdm = cc.ccsd_rdm.make_rdm2(mycc, pyscf_t1, pyscf_t2, pyscf_l1, pyscf_l2) pyscf_t1s = spatial2spin(mycc.t1) pyscf_t2s = spatial2spin(mycc.t2) t1 = pyscf_t1s.transpose(1, 0) t2 = pyscf_t2s.transpose(2, 3, 0, 1) l1 = spatial2spin(pyscf_l1) l2 = spatial2spin(pyscf_l2) h1 = reduce(np.dot, (mf.mo_coeff.T, mf.get_hcore(), mf.mo_coeff)) eri = ao2mo.full(mf._eri, mf.mo_coeff) eri = ao2mo.restore(1, eri, h1.shape[0]).reshape((h1.shape[0],) * 4) e1 = np.einsum('pq,pq', h1, pyscf_sopdm) e2 = np.einsum('pqrs,pqrs', eri, pyscf_stpdm) * .5 print(e1 + e2 + mol.energy_nuc() - mf.e_tot - ecc) print(e1 + e2 - mf.e_tot + mol.energy_nuc()) molecule = of.MolecularData(geometry=[['H', (0, 0, 0)], ['F', (0, 0, 1.6)]], basis=basis, charge=0, multiplicity=1) molecule = run_pyscf(molecule, run_ccsd=True) # oei, tei = molecule.get_integrals() oei, tei = h1, eri.transpose(0, 2, 3, 1) occ = mf.mo_occ nele = int(sum(occ)) nocc = nele // 2 norbs = oei.shape[0] nsvirt = 2 * (norbs - nocc) nsocc = 2 * nocc assert np.allclose(np.transpose(mycc.t2, [1, 0, 3, 2]), mycc.t2) soei, stei = spinorb_from_spatial(oei, tei) astei = np.einsum('ijkl', stei) - np.einsum('ijlk', stei) gtei = astei.transpose(0, 1, 3, 2) d1hf = np.diag([1.] * nsocc + [0.] * nsvirt) opdm = d1hf tpdm_wedge = 2 * of.wedge(opdm, opdm, (1, 1), (1, 1)) rdm_energy = np.einsum('ij,ij', soei, opdm.real) + 0.25 * np.einsum('ijkl,ijkl', tpdm_wedge.real, astei) print(rdm_energy + mol.energy_nuc(), mf.e_tot) assert np.allclose(rdm_energy + mol.energy_nuc(), mf.e_tot) eps = np.kron(molecule.orbital_energies, np.ones(2)) n = np.newaxis o = slice(None, nsocc) v = slice(nsocc, None) e_abij = 1 / (-eps[v, n, n, n] - eps[n, v, n, n] + eps[n, n, o, n] + eps[ n, n, n, o]) e_ai = 1 / (-eps[v, n] + eps[n, o]) fock = soei + np.einsum('piiq->pq', astei[:, o, o, :]) hf_energy = 0.5 * np.einsum('ii', (fock + soei)[o, o]) hf_energy_test = 1.0 * einsum('ii', fock[o, o]) -0.5 * einsum('ijij', gtei[o, o, o, o]) print("HF energies") print(hf_energy, rdm_energy) assert np.isclose(hf_energy, mf.e_tot - molecule.nuclear_repulsion) assert np.isclose(hf_energy_test, hf_energy) g = gtei print("T1/2 from pyscf") print(np.linalg.norm(singles_residual(t1, t2, fock, g, o, v))) print(np.linalg.norm(doubles_residual(t1, t2, fock, g, o, v))) print("l1/2 from pyscf") print(np.linalg.norm(lambda_singles(t1, t2, l1, l2, fock, g, o, v))) print(np.linalg.norm(lambda_doubles(t1, t2, l1, l2, fock, g, o, v))) ncr_opdm = ccsd_d1(t1, t2, l1, l2, np.eye(2 * h1.shape[0]), o, v) opdm_a = ncr_opdm[::2, ::2] opdm_b = ncr_opdm[1::2, 1::2] opdm_s = (opdm_a + opdm_b + opdm_a.T + opdm_b.T) / 2 print(opdm_s) print() print(pyscf_sopdm) print("1-RDM symm norm diff ", np.linalg.norm(opdm_s - pyscf_sopdm)) t1z, t2z = np.zeros((nsvirt, nsocc)), np.zeros((nsvirt, nsvirt, nsocc, nsocc)) t1f, t2f, l1f, l2f = kernel(t1z, t2z, fock, g, o, v, e_ai, e_abij, stopping_eps=mycc.conv_tol, diis_size=8) print("Final Correlation Energy") print(ccsd_energy(t1f, t2f, fock, g, o, v) - hf_energy) print("Lagrangian Energy - HF") print(lagrangian_energy(t1f, t2f, l1f, l2f, fock, g, o, v) - hf_energy) print("diff from pyscf t1/t2", np.linalg.norm(t1f - t1), np.linalg.norm(t2f - t2)) print("diff from pyscf l1/l2", np.linalg.norm(l1f - l1), np.linalg.norm(l2f - l2)) d1hf = np.eye(2 * norbs) opdm = ccsd_d1(t1, t2, l1, l2, d1hf, o, v) tpdm = ccsd_d2(t1, t2, l1, l2, d1hf, o, v) tpdm = tpdm.transpose(0, 1, 3, 2) # openfermion ordering opdm_a = opdm[::2, ::2] opdm_b = opdm[1::2, 1::2] opdm_s = (opdm_a + opdm_b + opdm_a.T + opdm_b.T) / 2 print(opdm_s) print() print(pyscf_sopdm) print("1-RDM symm norm diff ", np.linalg.norm(opdm_s - pyscf_sopdm)) rdm_energy = np.einsum('ij,ij', soei, opdm) + 0.25 * np.einsum('ijkl,ijkl', tpdm, astei) print("Correlation Energy from RDMs") print(rdm_energy - hf_energy)
5,351,253
def unsqueeze_samples(x, n): """ """ bn, d = x.shape x = x.reshape(bn//n, n, d) return x
5,351,254
def f_snr(seq): """compute signal to noise rate of a seq Args: seq: input array_like sequence paras: paras array, in this case should be "axis" """ seq = np.array(seq, dtype=np.float64) result = np.mean(seq)/float(np.std(seq)) if np.isinf(result): print "marker" result = 0 return result
5,351,255
def prepare_dataset_with_flair(args, data_path, tokenizer, pos_tokenizer): """使用flair标注的dirty,和由词表构建的tokenizer,来把token和pos转换成token_id和pos_id,并构建pos2word Args: write_path ([string]): [写vocab的路径] """ pos2word = [set() for i in range(len(pos_tokenizer.tag_vocab))] # tokenizer.vocab_size+2 包括了 unk 和 pad token_in_pos_id = np.zeros((len(pos_tokenizer.tag_vocab), tokenizer.vocab_size+2), dtype=np.int32) # token_counter 用于计算词频,在F2-softmax当中有用 token_counter = collections.Counter() for split, fname in flair_dirty_files.items(): logging.info("Prepare {} file.".format(split)) dirty_file = open(fname, 'r') all_token_list = [] all_pos_list = [] for idx, line in enumerate(dirty_file): token_pos_list = line.strip().split(" ") if len(token_pos_list) != 1 and not tagging_list_is_caption(token_pos_list): for token_idx in range(0, len(token_pos_list), 2): token = token_pos_list[token_idx] pos = token_pos_list[token_idx + 1].split(">")[0].split("<")[-1] token_id = tokenizer.convert_word_to_id(token) token_counter[token_id] += 1 pos_id = pos_tokenizer.convert_tag_to_id(pos) all_token_list.append(token_id) all_pos_list.append(pos_id) # 不能把unk排除在pos2word之外,因为y当中必然也存在unk # if token_id != tokenizer.unk_id: pos2word[pos_id].add(token_id) if idx % 1000 == 0: logging.info("Finish preparing {} lines.".format(idx)) pickle.dump(all_token_list, open(os.path.join(data_path,'flair_{split}_{size}.token'.format(split=split, size=args.vocab_size)), 'wb')) pickle.dump(all_pos_list, open(os.path.join(data_path,'flair_{split}_{size}.pos'.format(split=split, size=args.vocab_size)), 'wb')) # 对于从表中存在,但是文件中却没有出现的token_id置为1 # for id in range(tokenizer.vocab_size): # if id not in token_counter: # token_counter[id] = 1 tot = 0 cum_prob = [0] for i in token_counter.most_common(): tot += i[1] # cum_prob中是累计的词频 for i in token_counter.most_common(): cum_prob.append(cum_prob[-1] + i[1] / tot) cum_prob.pop(0) # 移除第一个元素 # new_dict 得到{token_id: 该token的词频从高到低的排名(从0开始)} token2order_dict = dict([(int(token_count[0]), int(idx)) for (idx, token_count) in enumerate(token_counter.most_common())]) pickle.dump(cum_prob, open(os.path.join(data_path, 'flair_{size}_probs.pkl'.format(size=args.vocab_size)), 'wb')) pickle.dump(token2order_dict, open(os.path.join(data_path, 'flair_{size}_token2order.pkl'.format(size=args.vocab_size)), 'wb')) pos2word = np.array([np.array(list(i)) for i in pos2word]) for pos_id, pos_i_vocab in enumerate(pos2word): for token_in_pos_i_id, token_id in enumerate(pos_i_vocab): token_in_pos_id[pos_id][token_id] = token_in_pos_i_id with open(os.path.join(data_path,'flair_{size}_pos2word.pkl'.format(size=args.vocab_size)), "wb") as writer: pickle.dump(pos2word, writer) with open(os.path.join(data_path,'flair_{size}_token_in_pos_id.pkl'.format(size=args.vocab_size)), "wb") as writer: pickle.dump(token_in_pos_id, writer)
5,351,256
def main(): """Runs label studio server using given config.""" global input_args input_args = parse_input_args() server.input_args = input_args # setup logging level if input_args.log_level: print(f"log level is {input_args.log_level}") logging.root.setLevel(input_args.log_level) label_studio.utils.functions.HOSTNAME = 'http://localhost:' + str(input_args.port) if input_args.command != 'start-multi-session': exit("Only multi user session is supported!") # Lets start the server app.run(host='0.0.0.0', port=input_args.port, debug=input_args.debug)
5,351,257
def _lorentzian_pink_beam(p, x): """ @author Saransh Singh, Lawrence Livermore National Lab @date 03/22/2021 SS 1.0 original @details the lorentzian component of the pink beam peak profile obtained by convolution of gaussian with normalized back to back exponentials. more details can be found in Von Dreele et. al., J. Appl. Cryst. (2021). 54, 3–6 p has the following parameters p = [A,x0,alpha0,alpha1,beta0,beta1,fwhm_l] """ A,x0,alpha,beta,fwhm_l = p del_tth = x - x0 p = -alpha*del_tth + 1j*0.5*alpha*fwhm_l q = -beta*del_tth + 1j*0.5*beta*fwhm_l y = np.zeros(x.shape) f1 = exp1exp(p) f2 = exp1exp(q) y = -(alpha*beta)/(np.pi*(alpha+beta))*(f1+f2).imag mask = np.isnan(y) y[mask] = 0. y *= A return y
5,351,258
def get_q_HPU_ave(Q_HPU): """1時間平均のヒートポンプユニットの平均暖房出力 (7) Args: Q_HPU(ndarray): 1時間当たりのヒートポンプユニットの暖房出力 (MJ/h) Returns: ndarray: 1時間平均のヒートポンプユニットの平均暖房出力 (7) """ return Q_HPU * 10 ** 6 / 3600
5,351,259
def run(self): """Run the Electrical module""" if self.parent is None: raise InputError("The Electrical object must be in a Simulation object to run") if self.parent.parent is None: raise InputError("The Simulation object must be in an Output object to run") self.get_logger().info("Starting Electric module") output = self.parent.parent machine = output.simu.machine if self.eec is None: # Init EEC depending on machine type if isinstance(machine, MachineSCIM): self.eec = EEC_SCIM() elif isinstance(machine, (MachineSIPMSM, MachineIPMSM)): self.eec = EEC_PMSM() else: # Check that EEC is consistent with machine type if isinstance(machine, MachineSCIM) and ( not isinstance(self.eec, EEC_SCIM) and not isinstance(self.eec, EEC_ANL) ): raise Exception( "Cannot run Electrical model if machine is SCIM and eec is not EEC_SCIM or EEC_ANL" ) elif isinstance(machine, (MachineSIPMSM, MachineIPMSM)) and ( not isinstance(self.eec, EEC_PMSM) and not isinstance(self.eec, EEC_ANL) ): raise Exception( "Cannot run Electrical model if machine is PMSM and eec is not EEC_PMSM or EEC_ANL" ) if self.ELUT_enforced is not None: # enforce parameters of EEC coming from enforced ELUT at right temperatures if self.eec.parameters is None: self.eec.parameters = dict() self.eec.parameters.update(self.ELUT_enforced.get_param_dict(OP=output.elec.OP)) # Generate drive # self.eec.gen_drive(output) # self.eec.parameters["U0_ref"] = output.elec.U0_ref # self.eec.parameters["Ud_ref"] = output.elec.OP.get_Ud_Uq()["Ud"] # self.eec.parameters["Uq_ref"] = output.elec.OP.get_Ud_Uq()["Uq"] # Compute parameters of the electrical equivalent circuit if some parameters are missing in ELUT self.eec.comp_parameters( machine, OP=output.elec.OP, Tsta=self.Tsta, Trot=self.Trot, ) # Solve the electrical equivalent circuit out_dict = self.eec.solve_EEC() # Solve for each harmonic in case of Us_PWM out_dict_harm = dict() if output.elec.Us_PWM is not None: Us_harm = output.elec.get_Us_harm() result = Us_harm.get_along("freqs", "phase") Udqh = result[Us_harm.symbol] freqs = result["freqs"].tolist() Is_harm = zeros((len(freqs), machine.stator.winding.qs), dtype=complex) # Remove Id/Iq from eec parameters del self.eec.parameters["Id"] del self.eec.parameters["Iq"] for i, f in enumerate(freqs): # Update eec paremeters self.eec.freq0 = f self.eec.parameters["Ud"] = Udqh[i, 0] self.eec.parameters["Uq"] = Udqh[i, 1] # Solve eec out_dict_i = self.eec.solve_EEC() Is_harm[i, :] = array([out_dict_i["Id"], out_dict_i["Iq"], 0]) out_dict_harm["Is_harm"] = Is_harm out_dict_harm["axes_list"] = Us_harm.get_axes() # Compute losses due to Joule effects out_dict = self.eec.comp_joule_losses(out_dict, machine) # Compute electromagnetic power out_dict = self.comp_power(out_dict, machine) # Compute torque self.comp_torque(out_dict, output.elec.OP.get_N0()) # Store electrical quantities contained in out_dict in OutElec, as Data object if necessary output.elec.store(out_dict, out_dict_harm)
5,351,260
def android_example(): """A basic example of how to use the android agent.""" env = holodeck.make("AndroidPlayground") # The Android's command is a 94 length vector representing torques to be applied at each of his joints command = np.ones(94) * 10 for i in range(10): env.reset() for j in range(1000): if j % 50 == 0: command *= -1 state, reward, terminal, _ = env.step(command) # To access specific sensor data: pixels = state[Sensors.PIXEL_CAMERA] orientation = state[Sensors.ORIENTATION_SENSOR] # For a full list of sensors the android has, view the README
5,351,261
def create_table_ISI(): """ create ISI (inter spike interval) table """ commands = [ """ CREATE TABLE ISI_tb ( cluster_no SMALLINT NOT NULL, analysis_ts TIMESTAMP NOT NULL, tetrode_no SMALLINT NOT NULL, session_name VARCHAR NOT NULL, filename VARCHAR NOT NULL, n_drive_user VARCHAR NOT NULL, animal_id VARCHAR NOT NULL, session_ts TIMESTAMP NOT NULL, PRIMARY KEY (cluster_no, tetrode_no, session_name, n_drive_user, animal_id, session_ts), FOREIGN KEY (cluster_no, tetrode_no, n_drive_user, animal_id, session_ts) REFERENCES clusters_tb (cluster_no, tetrode_no, n_drive_user, animal_id, session_ts) ON DELETE CASCADE ON UPDATE CASCADE, hist_ISI BYTEA NOT NULL, bin_edges_ISI BYTEA NOT NULL, ISI_stats_contam DOUBLE PRECISION NOT NULL, ISI_stats_contam_perc DOUBLE PRECISION NOT NULL, ISI_stats_percent_bursts DOUBLE PRECISION NOT NULL ) """] conn = None try: params = config() conn = psycopg2.connect(**params) cur = conn.cursor() for command in commands: print(command) cur.execute(command) cur.close() conn.commit() except (Exception, psycopg2.DatabaseError) as error: print(error) finally: if conn is not None: conn.close()
5,351,262
def particles(t1cat): """Return a list of the particles in a T1 catalog DataFrame. Use it to find the individual particles involved in a group of events.""" return particles_fromlist(t1cat.particles.tolist())
5,351,263
def interp_coeff_lambda3(i2,dx2,nx): """ NOTE: input and output index from 0 to nx-1 !!! """ i2=i2+1 # TODO, waiting for script to be updated # Find index of other cells i1 = i2 - 1 i3 = i2 + 1 i4 = i2 + 2 # Find normalised distance to other cells dx1 = dx2 + 1.0 dx3 = 1.0 - dx2 dx4 = 2.0 - dx2 # lambda 3 kernel ax1 = 1.0 / 6.0 * (1.0 - dx1) * (2.0 - dx1) * (3.0 - dx1) ax2 = 1.0 / 2.0 * (1 - dx2 ** 2) * (2 - dx2) ax3 = 1.0 / 2.0 * (1 - dx3 ** 2) * (2 - dx3) ax4 = 1.0 / 6.0 * (1.0 - dx4) * (2.0 - dx4) * (3.0 - dx4) if i2==nx-1: i1 ,i2 ,i3 ,i4 = 1 ,1 ,1 ,1 ax1,ax2,ax3,ax4 = 0.,0.,0.,0. elif i2 == 1: i1 ,i2 ,i3 ,i4 = 1 ,1 ,1 ,1 ax1,ax2,ax3,ax4 = 0.,0.,0.,0. elif i2 < 1: # Should not happen i1 ,i2 ,i3 ,i4 = 1 ,1 ,1 ,1 ax1,ax2,ax3,ax4 = 0.,0.,0.,0. elif (i2 > nx - 1): # Should not happen i1 ,i2 ,i3 ,i4 = 1 ,1 ,1 ,1 ax1,ax2,ax3,ax4 = 0.,0.,0.,0. elif i1 <= 0 or i2 <= 0: # Might happen if on grid i1 ,i2 ,i3 ,i4 = 1 ,1 ,1 ,1 ax1,ax2,ax3,ax4 = 0.,0.,0.,0. elif i4 > nx or i3 > nx: # Might happen if on grid i1 ,i2 ,i3 ,i4 = 1 ,1 ,1 ,1 ax1,ax2,ax3,ax4 = 0.,0.,0.,0. return ax1,ax2,ax3,ax4,i1-1,i2-1,i3-1,i4-1
5,351,264
def prime_list(num): """ This function returns a list of prime numbers less than natural number entered. :param num: natural number :return result: List of primes less than natural number entered """ prime_table = [True for _ in range(num+1)] i = 2 while i ** 2 <= num: if prime_table[i]: j = i + i while j <= num: prime_table[j] = False j += i i += 1 result = [i for i in range(num) if prime_table[i] and i >= 2] return result
5,351,265
def check_deadline_exceeded_and_store_partial_minimized_testcase( deadline, testcase_id, job_type, input_directory, file_list, file_to_run_data, main_file_path): """Store the partially minimized test and check the deadline.""" testcase = data_handler.get_testcase_by_id(testcase_id) store_minimized_testcase(testcase, input_directory, file_list, file_to_run_data, main_file_path) deadline_exceeded = time.time() > deadline if deadline_exceeded: attempts = testcase.get_metadata( 'minimization_deadline_exceeded_attempts', default=0) if attempts >= MAX_DEADLINE_EXCEEDED_ATTEMPTS: _skip_minimization(testcase, 'Exceeded minimization deadline too many times.') else: testcase.set_metadata('minimization_deadline_exceeded_attempts', attempts + 1) tasks.add_task('minimize', testcase_id, job_type) return deadline_exceeded
5,351,266
def _item_to_python_repr(item, definitions): """Converts the given Capirca item into a typed Python object.""" # Capirca comments are just appended to item strings s = item.split("#")[0].strip() # A reference to another network if s in definitions.networks: return s # IPv4 address / network try: return ipaddress.IPv4Address(s) except ValueError: pass try: return ipaddress.IPv4Network(s, strict=False) except ValueError: pass # IPv6 address / network try: return ipaddress.IPv6Address(s) except ValueError: pass try: return ipaddress.IPv6Network(s, strict=False) except ValueError: pass raise ValueError("Unknown how to convert {s}".format(s=s))
5,351,267
def test_resolve_interpreter_with_nonexistent_interpreter(mock_exists): """Should SystemExit with an nonexistent python interpreter path""" mock_exists.return_value = False with pytest.raises(SystemExit): virtualenv.resolve_interpreter("/usr/bin/python53") mock_exists.assert_called_with("/usr/bin/python53")
5,351,268
def floor_divide(x1, x2, out=None, where=True, **kwargs): """ Return the largest integer smaller or equal to the division of the inputs. It is equivalent to the Python ``//`` operator and pairs with the Python ``%`` (`remainder`), function so that ``a = a % b + b * (a // b)`` up to roundoff. Args: x1 (numpoly.ndpoly): Numerator. x2 (numpoly.ndpoly): Denominator. If ``x1.shape != x2.shape``, they must be broadcastable to a common shape (which becomes the shape of the output). out (Optional[numpy.ndarray]): A location into which the result is stored. If provided, it must have a shape that the inputs broadcast to. If not provided or `None`, a freshly-allocated array is returned. A tuple (possible only as a keyword argument) must have length equal to the number of outputs. where (Optional[numpy.ndarray]): This condition is broadcast over the input. At locations where the condition is True, the `out` array will be set to the ufunc result. Elsewhere, the `out` array will retain its original value. Note that if an uninitialized `out` array is created via the default ``out=None``, locations within it where the condition is False will remain uninitialized. kwargs: Keyword args passed to numpy.ufunc. Returns: (numpoly.ndpoly): This is a scalar if both `x1` and `x2` are scalars. Examples: >>> xyz = [1, 2, 4]*numpoly.symbols("x y z") >>> numpoly.floor_divide(xyz, 2.) polynomial([0.0, y, 2.0*z]) >>> numpoly.floor_divide(xyz, [1, 2, 4]) polynomial([x, y, z]) >>> numpoly.floor_divide([1, 2, 4], xyz) Traceback (most recent call last): ... ValueError: only constant polynomials can be converted to array. """ x1, x2 = numpoly.align_polynomials(x1, x2) x2 = x2.tonumpy() no_output = out is None if no_output: out = numpoly.ndpoly( exponents=x1.exponents, shape=x1.shape, names=x1.indeterminants, dtype=numpy.common_type(x1, numpy.array(1.)), ) for key in x1.keys: numpy.floor_divide(x1[key], x2, out=out[key], where=where, **kwargs) if no_output: out = numpoly.clean_attributes(out) return out
5,351,269
def node_args_argument(command: Callable[..., None]) -> Callable[..., None]: """ Decorate a function to allow choosing arguments to run on a node. """ function = click.argument( 'node_args', type=str, nargs=-1, required=True, )(command) # type: Callable[..., None] return function
5,351,270
def _tag_error(func): """Decorates a unittest test function to add failure information to the TestCase.""" @functools.wraps(func) def decorator(self, *args, **kwargs): """Add failure information to `self` when `func` raises an exception.""" self.test_failed = False try: func(self, *args, **kwargs) except unittest.SkipTest: raise except Exception: # pylint: disable=broad-except self.test_failed = True raise # re-raise the error with the original traceback. return decorator
5,351,271
def create_drizzle_products(total_obj_list, custom_limits=None): """ Run astrodrizzle to produce products specified in the total_obj_list. Parameters ---------- total_obj_list: list List of TotalProduct objects, one object per instrument/detector combination is a visit. The TotalProduct objects are comprised of FilterProduct and ExposureProduct objects. custom_limits : list, optional 4-element list containing the mosaic bounding rectangle X min and max and Y min and max values for custom mosaics RETURNS ------- product_list: list A list of output products """ # Get rules files rules_files = {} log.info("Processing with astrodrizzle version {}".format(drizzlepac.astrodrizzle.__version__)) # Generate list of all input exposure filenames that are to be processed edp_names = [] for t in total_obj_list: edp_names += [e.full_filename for e in t.edp_list] # Define dataset-specific rules filenames for each input exposure for imgname in edp_names: rules_files[imgname] = proc_utils.get_rules_file(imgname) print('Generated RULES_FILE names of: \n{}\n'.format(rules_files)) # Keep track of all the products created for the output manifest product_list = [] # For each detector (as the total detection product are instrument- and detector-specific), # create the drizzle-combined filtered image, the drizzled exposure (aka single) images, # and finally the drizzle-combined total detection image. for filt_obj in total_obj_list: filt_obj.rules_file = rules_files[filt_obj.edp_list[0].full_filename] log.info("~" * 118) # Get the common WCS for all images which are part of a total detection product, # where the total detection product is detector-dependent. meta_wcs = filt_obj.generate_metawcs(custom_limits=custom_limits) log.info("CREATE DRIZZLE-COMBINED FILTER IMAGE: {}\n".format(filt_obj.drizzle_filename)) filt_obj.wcs_drizzle_product(meta_wcs) product_list.append(filt_obj.drizzle_filename) product_list.append(filt_obj.trl_filename) # Add individual single input images with updated WCS headers to manifest for exposure_obj in filt_obj.edp_list: product_list.append(exposure_obj.full_filename) # Create Drizzled images for each input on SkyCell pixels exposure_obj.wcs_drizzle_product(meta_wcs) # Add drizzled FLC images to manifest product_list.append(exposure_obj.drizzle_filename) product_list.append(exposure_obj.trl_filename) # Ensure that all drizzled products have headers that are to specification try: log.info("Updating these drizzle products for CAOM compatibility:") fits_files = fnmatch.filter(product_list, "*dr?.fits") for filename in fits_files: log.info(" {}".format(filename)) proc_utils.refine_product_headers(filename, total_obj_list) except Exception: log.critical("Trouble updating drizzle products for CAOM.") exc_type, exc_value, exc_tb = sys.exc_info() traceback.print_exception(exc_type, exc_value, exc_tb, file=sys.stdout) logging.exception("message") # Remove rules files copied to the current working directory for rules_filename in list(rules_files.values()): log.info("Removed rules file {}".format(rules_filename)) os.remove(rules_filename) # Add primary header information to all objects for filt_obj in total_obj_list: filt_obj = poller_utils.add_primary_fits_header_as_attr(filt_obj) # Return product list for creation of pipeline manifest file return product_list
5,351,272
def get_submission_praw(n, sub, n_num): """ Returns a list of results for submission in past: 1st list: current result from n hours ago until now 2nd list: prev result from 2n hours ago until n hours ago """ mid_interval = datetime.today() - timedelta(hours=n) timestamp_mid = int(mid_interval.timestamp()) timestamp_start = int((mid_interval - timedelta(hours=n)).timestamp()) timestamp_end = int(datetime.today().timestamp()) recent = {} prev = {} subreddit = reddit.subreddit(sub) all_results = [] for post in subreddit.new(limit=n_num): all_results.append([post.title, post.link_flair_text, post.selftext, post.score, post.num_comments, post.created_utc]) # start --> mid --> end recent[sub] = [posts for posts in all_results if timestamp_mid <= posts[5] <= timestamp_end] prev[sub] = [posts for posts in all_results if timestamp_start <= posts[5] < timestamp_mid] return recent, prev
5,351,273
def preprocess(dframe, log_dir, log_name): """ Convert date type and save to disk""" dframe['date'] = lookup_date(dframe['date']) dframe.to_csv(os.path.join(log_dir, log_name + ".csv_preprocessed"))
5,351,274
def memory_kernel_logspace(dt, coeffs, dim_x, noDirac=False): """ Return the value of the estimated memory kernel Parameters ---------- dt: Timestep coeffs : Coefficients for diffusion and friction dim_x: Dimension of visible variables noDirac: Remove the dirac at time zero Returns ------- timespan : array-like, shape (n_samples, ) Array of time to evaluate memory kernel kernel_evaluated : array-like, shape (n_samples, dim_x,dim_x) Array of values of the kernel at time provided """ Avv = coeffs["A"][:dim_x, :dim_x] Ahv = coeffs["A"][dim_x:, :dim_x] Avh = coeffs["A"][:dim_x, dim_x:] Ahh = coeffs["A"][dim_x:, dim_x:] eigs = np.linalg.eigvals(Ahh) Kernel = np.zeros((150, dim_x, dim_x)) final_time = 25 / np.min(np.abs(np.real(eigs))) times = np.logspace(np.log10(dt), np.log10(final_time), num=150) for n, t in enumerate(times): Kernel[n, :, :] = -np.matmul(Avh, np.matmul(scipy.linalg.expm(-1 * t * Ahh), Ahv)) if not noDirac: Kernel[0, :, :] = Kernel[0, :, :] + Avv return times, Kernel
5,351,275
def create_tables(cur, conn): """ Creates each table using the queries in `create_table_queries` list. Returns: None """ for query in create_table_queries: cur.execute(query) conn.commit()
5,351,276
async def aclose_forcefully(resource: AsyncResource) -> None: """ Close an asynchronous resource in a cancelled scope. Doing this closes the resource without waiting on anything. :param resource: the resource to close """ async with open_cancel_scope() as scope: await scope.cancel() await resource.aclose()
5,351,277
def _is_constant(x, atol=1e-7, positive=None): """ True if x is a constant array, within atol """ x = np.asarray(x) return (np.max(np.abs(x - x[0])) < atol and (np.all((x > 0) == positive) if positive is not None else True))
5,351,278
def load_db_data_from_json(initial_database_data: pathlib.Path) -> None: """Load database data from a JSON file. Args: initial_database_data (Path): JSON file. """ with open(initial_database_data, "r", encoding=cfg.glob.FILE_ENCODING_DEFAULT) as json_file: json_data = json.load(json_file) api_version = json_data[cfg.glob.JSON_NAME_API_VERSION] if api_version != cfg.glob.setup.dcr_version: utils.terminate_fatal(f"Expected api version is' {cfg.glob.setup.dcr_version}' " f"- got '{api_version}'") data = json_data[cfg.glob.JSON_NAME_DATA] for json_table in data[cfg.glob.JSON_NAME_TABLES]: table_name = json_table[cfg.glob.JSON_NAME_TABLE_NAME].lower() if table_name not in ["language"]: if table_name in [ "content_tetml_line", "content_tetml_page", "content_tetml_word", "content_token", "document", "run", "version", ]: utils.terminate_fatal(f"The database table '{table_name}' must not be changed via the JSON file.") else: utils.terminate_fatal(f"The database table '{table_name}' does not exist in the database.") for json_row in json_table[cfg.glob.JSON_NAME_ROWS]: db_columns = {} for json_column in json_row[cfg.glob.JSON_NAME_ROW]: db_columns[json_column[cfg.glob.JSON_NAME_COLUMN_NAME]] = json_column[ cfg.glob.JSON_NAME_COLUMN_VALUE ] db.dml.insert_dbt_row( table_name, db_columns, )
5,351,279
def test_is_valid_password_v2_false2(): """ Test of is_valid_password_v2() with a false example, take 2 """ result = is_valid_password_v2( {"low": 1, "high": 2, "letter": "w", "password": "aa"} ) assert not result
5,351,280
def estimate_M(X, estimator, B, ratio): """Estimating M with Block or incomplete U-statistics estimator :param B: Block size :param ratio: size of incomplete U-statistics estimator """ p = X.shape[1] x_bw = util.meddistance(X, subsample = 1000)**2 kx = kernel.KGauss(x_bw) if estimator == 'inc': hsic_M = hsic.HSIC_Inc(kx, kx, ratio = ratio) else: # 'block' hsic_M = hsic.HSIC_Block(kx, kx, bsize = B) M_true = np.zeros((p, p)) for i in range(p): for j in range(i+1): M_true[i, j] = np.mean(hsic_M.estimates(X[:, i, np.newaxis], X[:, j, np.newaxis])) M_true[j, i] = M_true[i, j] M = nearestPD(M_true) # positive definite approximation return M_true, M
5,351,281
def load_nf_conntrack(): """ Try to force the nf_conntrack_netlink kernel module to be loaded. """ _log.info("Running conntrack command to force load of " "nf_conntrack_netlink module.") try: # Run a conntrack command to trigger it to load the kernel module if # it's not already compiled in. We list rules with a randomly-chosen # link local address. That makes it very unlikely that we generate # any wasteful output. We used to use "-S" (show stats) here but it # seems to be bugged on some platforms, generating an error. futils.check_call(["conntrack", "-L", "-s", "169.254.45.169"]) except FailedSystemCall: _log.exception("Failed to execute conntrack command to force load of " "nf_conntrack_netlink module. conntrack commands may " "fail later.")
5,351,282
def query_yes_no(question, default="yes"): """Queries user for confimration""" valid = {"yes": True, "y": True, "ye": True, "no": False, "n": False} if default is None: prompt = " [y/n] " elif default == "yes": prompt = " [Y/n] " elif default == "no": prompt = " [y/N] " else: raise ValueError("invalid default answer: '%s'" % default) while True: console.print(question + escape(prompt)) choice = input().lower() if default is not None and choice == '': return valid[default] elif choice in valid: return valid[choice] else: console.print("Please respond with 'yes' or 'no' " "(or 'y' or 'n').\n")
5,351,283
def decode(password, encoded, notice): """ :type password: str :type encoded: str """ dec = [] try: encoded_bytes = base64.urlsafe_b64decode(encoded.encode()).decode() except binascii.Error: notice("Invalid input '{}'".format(encoded)) return for i in range(len(encoded_bytes)): key_c = password[i % len(password)] dec_c = chr((256 + ord(encoded_bytes[i]) - ord(key_c)) % 256) dec.append(dec_c) return "".join(dec)
5,351,284
def read_file_unlabelled_data(file_name): """ read_file_unlabelled_data reades from file_name These files are to be in csv-format with one token per line (see the example project). returns text_vector: Ex: [['7_7', 'perhaps', 'there', 'is', 'a_a', 'better', 'way', '._.'], ['2_2', 'Why', 'are', 'you, 'doing','doing', 'it', '._.']] """ # Read file, to get text, grouped into sentences text_vector = [] current_text = [] f = open(file_name) for line in f: word = line.strip() if word != "": if len(word) == 1: word = word + "_" + word # to cover for a bug in scikit learn's tokenization current_text.append(word) else: if len(current_text) != 0: # end of sentence text_vector.append(current_text) current_text = [] if len(current_text) != 0: # the last sentence text_vector.append(current_text) f.close() return text_vector
5,351,285
def replace(data, replacements): """ Allows to performs several string substitutions. This function performs several string substitutions on the initial ``data`` string using a list of 2-tuples (old, new) defining substitutions and returns the resulting string. """ return reduce(lambda a, kv: a.replace(*kv), replacements, data)
5,351,286
def fake_kafka() -> FakeKafka: """Fixture for fake kafka.""" return FakeKafka()
5,351,287
def rr20(prec: pd.Series) -> Union[float, int]: """Function for count of heavy precipitation (days where rr greater equal 20mm) Args: prec (list): value array of precipitation Returns: np.nan or number: the count of icing days """ assert isinstance(prec, pd.Series) op = operator.ge num = 20.0 return number_of(prec, num, op)
5,351,288
def get_neg_label(cls_label: np.ndarray, num_neg: int) -> np.ndarray: """Generate random negative samples. :param cls_label: Class labels including only positive samples. :param num_neg: Number of negative samples. :return: Label with original positive samples (marked by 1), negative samples (marked by -1), and ignored samples (marked by 0) """ seq_len, num_scales = cls_label.shape cls_label = cls_label.copy().reshape(-1) cls_label[cls_label < 0] = 0 # reset negative samples neg_idx, = np.where(cls_label == 0) np.random.shuffle(neg_idx) neg_idx = neg_idx[:num_neg] cls_label[neg_idx] = -1 cls_label = np.reshape(cls_label, (seq_len, num_scales)) return cls_label
5,351,289
def G12(x, a): """ Eqs 20, 24, 25 of Khangulyan et al (2014) """ alpha, a, beta, b = a pi26 = np.pi ** 2 / 6.0 G = (pi26 + x) * np.exp(-x) tmp = 1 + b * x ** beta g = 1.0 / (a * x ** alpha / tmp + 1.0) return G * g
5,351,290
def custom_uwg(directory): """Generate UWG json with custom reference BEMDef and SchDef objects.""" # override at 5,2 and add at 18,2 # SchDef default_week = [[0.15] * 24] * 3 schdef1 = SchDef(elec=default_week, gas=default_week, light=default_week, occ=default_week, cool=default_week, heat=default_week, swh=default_week, q_elec=18.9, q_gas=3.2, q_light=18.9, n_occ=0.12, vent=0.0013, v_swh=0.2846, bldtype='largeoffice', builtera='new') default_week = [[0.35] * 24] * 3 schdef2 = SchDef(elec=default_week, gas=default_week, light=default_week, occ=default_week, cool=default_week, heat=default_week, swh=default_week, q_elec=18.9, q_gas=3.2, q_light=18.9, n_occ=0.12, vent=0.0013, v_swh=0.2846, bldtype='customhospital', builtera='new') # BEMDedf # materials insulation = Material(0.049, 836.8 * 265.0, 'insulation') gypsum = Material(0.16, 830.0 * 784.9, 'gypsum') wood = Material(0.11, 1210.0 * 544.62, 'wood') # elements wall = Element(0.22, 0.92, [0.01, 0.01, 0.0127], [wood, insulation, gypsum], 0, 293, False, 'wood_frame_wall') roof = Element(0.22, 0.92, [0.01, 0.01, 0.0127], [wood, insulation, gypsum], 0, 293, True, 'wood_frame_roof') mass = Element(0.2, 0.9, [0.05, 0.05], [ wood, wood], 0, 293, True, 'wood_floor') # building bldg = Building( floor_height=3.0, int_heat_night=1, int_heat_day=1, int_heat_frad=0.1, int_heat_flat=0.1, infil=0.171, vent=0.00045, glazing_ratio=0.4, u_value=3.0, shgc=0.3, condtype='AIR', cop=3, coolcap=41, heateff=0.8, initial_temp=293) bemdef1 = BEMDef(building=bldg, mass=mass, wall=wall, roof=roof, bldtype='largeoffice', builtera='new') bemdef2 = BEMDef(building=bldg, mass=mass, wall=wall, roof=roof, bldtype='customhospital', builtera='new') # vectors ref_sch_vector = [schdef1, schdef2] ref_bem_vector = [bemdef1, bemdef2] bld = [('largeoffice', 'new', 0.4), # overwrite ('hospital', 'new', 0.5), ('customhospital', 'new', 0.1)] # extend model = UWG.from_param_args( epw_path=None, bldheight=10.0, blddensity=0.5, vertohor=0.5, zone='1A', treecover=0.1, grasscover=0.1, bld=bld, ref_bem_vector=ref_bem_vector, ref_sch_vector=ref_sch_vector) dest_file = os.path.join(directory, 'custom_uwg.json') with open(dest_file, 'w') as fp: json.dump(model.to_dict(include_refDOE=True), fp, indent=4)
5,351,291
def update_table_params(switch_inst, table_name, params, find_params=None, row_id=None, validate_updates=True): """Configure port parameters in table. Args: switch_inst(object): Switch instance to work with table_name(str): The name of table to work with params(dict): Parameters and values that should be configured for port find_params(list): List of parameters to find a row in table row_id(int): Row ID in table validate_updates(bool): Verify if updates were set Returns: None Examples:: helpers.update_table_params(env.switch[1], "DcbxPfcPortsAdmin", {"willing": "Enabled", "enabled": "0,0,0,1,0,0,0,0"}, [port_id, ]) helpers.update_table_params(env.switch[1], "DcbxPorts", {"adminStatus": 'Disabled'}, row_id=24) """ if row_id is None and find_params is not None: row_id = switch_inst.findprop(table_name, find_params) elif row_id is None and find_params is None: raise ValueError("Find_params or row_id should be specified to update parameter value in table") assert row_id != -1, "Can't find row in table '%s' with find params %s" % (table_name, find_params) for field, value in params.items(): assert switch_inst.setprop(table_name, field, [row_id, value]) == 0, "%s values is not set for field %s" % (field, value) if validate_updates: verify_port_params_in_table(switch_inst, table_name, params, row_id=row_id)
5,351,292
def binomial(n, k): """ binomial coefficient """ if k < 0 or k > n: return 0 if k == 0 or k == n: return 1 num = 1 den = 1 for i in range(1, min(k, n - k) + 1): # take advantage of symmetry num *= (n + 1 - i) den *= i c = num // den return c
5,351,293
def pagination(page): """ Generates the series of links to the pages in a paginated list. """ paginator = page.paginator page_num = page.number #pagination_required = (not cl.show_all or not cl.can_show_all) and cl.multi_page if False: #not pagination_required: page_range = [] else: ON_EACH_SIDE = 3 ON_ENDS = 2 # If there are 10 or fewer pages, display links to every page. # Otherwise, do some fancy if paginator.num_pages <= 10: page_range = range(1, paginator.num_pages + 1) else: # Insert "smart" pagination links, so that there are always ON_ENDS # links at either end of the list of pages, and there are always # ON_EACH_SIDE links at either end of the "current page" link. page_range = [] if page_num > (ON_EACH_SIDE + ON_ENDS): page_range.extend(range(1, ON_ENDS)) page_range.append(DOT) page_range.extend(range(page_num - ON_EACH_SIDE, page_num + 1)) else: page_range.extend(range(1, page_num + 1)) if page_num < (paginator.num_pages - ON_EACH_SIDE - ON_ENDS): page_range.extend(range(page_num + 1, page_num + ON_EACH_SIDE + 1)) page_range.append(DOT) page_range.extend(range(paginator.num_pages - ON_ENDS, paginator.num_pages + 1)) else: page_range.extend(range(page_num + 1, paginator.num_pages + 1)) #need_show_all_link = cl.can_show_all and not cl.show_all and cl.multi_page return { 'paginator': paginator, 'page_obj': page, 'page': page.number, #'pagination_required': pagination_required, #'show_all_url': need_show_all_link and cl.get_query_string({ALL_VAR: ''}), 'page_range': page_range, #'ALL_VAR': ALL_VAR, '1': 1, 'is_paginated': True, }
5,351,294
def send_apns_push_message(): """ a sample to show hwo to send web push message :return: """ message = messaging.Message( apns=apns_push_config, # TODO: token=['your token'] ) try: # Case 1: Local CA sample code # response = messaging.send_message(message, verify_peer="../Push-CA-Root.pem") # Case 2: No verification of HTTPS's certificate response = messaging.send_message(message) # Case 3: use certifi Library # import certifi # response = messaging.send_message(message, verify_peer=certifi.where()) print("response is ", json.dumps(vars(response))) assert (response.code == '80000000') except Exception as e: print(repr(e))
5,351,295
def bubbleSort(arr): """ >>> bubbleSort(arr) [11, 12, 23, 25, 34, 54, 90] """ n = len(arr) for i in range(n-1): for j in range(0, n-i-1): if arr[j] > arr[j+1]: arr[j], arr[j+1] = arr[j+1], arr[j] return arr
5,351,296
def generate_kronik_feats(fn): """Generates features from a Kronik output file""" header = get_tsv_header(fn) return generate_split_tsv_lines(fn, header)
5,351,297
def regressor_contrast(model1:RegressorMixin, model2:RegressorMixin, test_data:pd.DataFrame, label_data:pd.Series, threshold:int=10)->pd.DataFrame: """Compute 11 metrics to compare a Sckit-learn regression models and make statistical test for residual normality""" np.random.seed(33) models_time = [] models_memory = [] models_predictions = [] models_acc = [] models_dagostino = [] models_dagostino_p = [] models_explained_variance = [] models_r2 = [] models_rmse = [] models_mae = [] models_shapiro = [] models_shapiro_p = [] for m in [model1, model2]: t1 = time() predictions = m.predict(test_data) t2 = time() models_time.append(t2 -t1) models_predictions.append(predictions) models_explained_variance.append(round(explained_variance_score(label_data,predictions),5)) models_r2.append(round(r2_score(label_data,predictions),5)) models_rmse.append(round(mean_squared_error(label_data,predictions, squared = False ),5)) models_mae.append(round(mean_absolute_error(label_data,predictions),5)) models_acc.append(round(percentaje_acc(label_data,predictions, threshold=threshold),5)) models_memory.append(sys.getsizeof(m)/1024) shap_sta, shap_p, dagostino_sta, dagostino_p = _multiples_normality_test(predictions, label_data) models_shapiro.append(round(shap_sta,5)) models_dagostino.append(round(dagostino_sta,5)) models_shapiro_p.append(shap_p) models_dagostino_p.append(dagostino_p) table = pd.DataFrame({ "Model": ["Model1", "Model2"], "Exec time(seg)": models_time, "Memory (Kb)": models_memory, "R2":models_r2, "MAE": models_mae, "RMSE": models_rmse, "Explained Variance": models_explained_variance, "Residual Shapiro Test Stat": models_shapiro , "Residual Shapiro Test p-value": models_shapiro_p, "Residual D’Agostino’s Statical": models_dagostino , "Residual D’Agostino’s p-value": models_dagostino_p, "Ratio errors in Threshold": models_acc }) return table
5,351,298
def delete_product(uuid: str, db: Session = Depends(auth)): """Delete a registered product.""" if product := repo.get_product_by_uuid(db=db, uuid=uuid): if product.taken: raise HTTPException( status_code=status.HTTP_401_UNAUTHORIZED, detail="Cannot delete products already taken.", ) repo.delete_product(db=db, product=product) return { "deleted": True, "product": product, } raise HTTPException( status_code=status.HTTP_404_NOT_FOUND, detail="No product found for the code specified.", )
5,351,299