content
stringlengths
22
815k
id
int64
0
4.91M
def wrap( module: nn.Module, cls: Callable = FullyShardedDataParallel, activation_checkpoint: bool = False, **wrap_overrides: Any ) -> nn.Module: """ Annotate that a module should be wrapped. Annotated modules will only be wrapped if inside of an :func:`enable_wrap` context manager. An important use case is annotating large layers that should be sharded (in-place) during initialization, to avoid running out of system memory. Usage:: with enable_wrap(**params): # Wraps layer in FSDP by default if within context self.l1 = wrap(torch.nn.Linear(5, 5)) Args: module (nn.Module): module to wrap (if in :func:`enable_wrap` context) cls (Callable): class wrapper to wrap the model with if in context (default: :class:`FullyShardedDataParallel`) activation_checkpoint (bool): use activation checkpointing wrapper (default: False) **wrap_overrides: configuration overrides that will take priority over the values provided by the :func:`enable_wrap` context """ if ConfigAutoWrap.in_autowrap_context: wrap_overrides = {**ConfigAutoWrap.kwargs, **wrap_overrides} if activation_checkpoint: module = checkpoint_wrapper(module) return cls(module, **wrap_overrides) return module
5,351,500
def _is_multiple_state(state_size): """Check whether the state_size contains multiple states.""" return (hasattr(state_size, '__len__') and not isinstance(state_size, tensor_shape.TensorShape))
5,351,501
def deque_to_yaml(representer, node): """Convert collections.deque to YAML""" return representer.represent_sequence("!collections.deque", (list(node), node.maxlen))
5,351,502
def parse_webpage(url, page_no): """ Parses the given webpage using 'BeautifulSoup' and returns html content of that webpage. """ page = urllib2.urlopen(url + page_no) parsed_page = BeautifulSoup(page, 'html.parser') return parsed_page
5,351,503
def randomDigits(length=8): """ 生成随机数字串 randomDigits() ==> 73048139 """ return ''.join([random.choice(digits) for _ in range(length)])
5,351,504
def adaptor_set_all(handle, adaptors=None, server_id=1, **kwargs): """ Example: adaptor_set_all(handle, adaptors=[ {id: 1, lldp: "enabled", fip_mode: "enabled", port_channel_enable: "enabled", vntag_mode: "enabled", admin_action:None} ] ) """ from imcsdk.mometa.adaptor.AdaptorGenProfile import AdaptorGenProfile api = 'adaptor_set_all' api_error_msg = VicConst.ADAPTOR_ERROR_MSG if not adaptors: log.debug("No adapters present for configuration") return # fetch adaptors from end point, adaptor_ep_dict is dict {id, adaptor_mo} adaptor_ep_dict = _prepare_ep_adaptor_dict(handle, api_error_msg) # validate input and checks if adaptor exists at end point for adaptor in adaptors: id = adaptor.get('id', None) if id is None: raise ImcOperationError( api_error_msg, 'Provide adapter slot to configure') if id not in adaptor_ep_dict: raise ImcOperationError( api_error_msg, "Adaptor %s is not present at end point." % id) # configure adapter mos = [] restart_server = None adaptor_list = [] #adaptors are the configured adaptors in intersight AdaptorConfiguration Policy for adaptor in adaptors: id = adaptor['id'] lldp = adaptor.pop('lldp', None) fip_mode = adaptor.pop('fip_mode', None) port_channel_enable = adaptor.pop('port_channel_enable', None) log.debug("Adapter Config Policy - configured Values") log.debug("Port Channel: %s, LLDP Mode: %s, Fip Mode: %s", port_channel_enable, lldp, fip_mode) # vntag_mode = adaptor.pop('vntag_mode', None) # admin_state = adaptor.pop('admin_state', None) mo = adaptor_ep_dict[id] adaptor_properties = adaptor_properties_get(handle, id, server_id=1) adaptor_list.append(adaptor_properties) #port_channel_capable returns None for < Gen4 adapters and False for Gen4+ unsupported portchannel adapters. #Hence a check has to be done for both None and False #for backward compatibility in deploying Adapter Configuration Policy. if adaptor_properties.port_channel_capable == None or adaptor_properties.port_channel_capable == "False": log.debug("Port Channel is not supported for the adapter at slot: %s", adaptor_properties.pci_slot) port_channel_enable = None if adaptor_properties.port_channel_capable == "True" and port_channel_enable == "disabled": log.debug("Port Channel is disabled by user for adapter at slot %s. Server restart initiated", adaptor_properties.pci_slot) restart_server=True mo.admin_state = AdaptorUnitConsts.ADMIN_STATE_ADAPTOR_RESET_DEFAULT if port_channel_enable == "disabled": AdaptorGenProfile(parent_mo_or_dn=mo, lldp=lldp, fip_mode=fip_mode, port_channel_enable="disabled", vntag_mode="disabled") else: #port_channel_enable value is set to enabled by default. # Hence, its not required to send the default value. AdaptorGenProfile(parent_mo_or_dn=mo, lldp=lldp, fip_mode=fip_mode, vntag_mode="disabled") mos.append(mo) response = handle.set_mos(mos) ret = [] if response: ret.append(_process_response(response, api, api_error_msg)) ext_ethif_adaptor_mos = [] for adaptor in adaptors: id = adaptor['id'] ext_ethifs = adaptor.pop('ext_ethifs', None) if ext_ethifs: mo = adaptor_ep_dict[id] ext_ethif_mos = _ext_ethif_set_all(handle, ext_ethifs, mo) ext_ethif_adaptor_mos.extend(ext_ethif_mos) if len(ext_ethif_adaptor_mos) > 0: response = handle.set_mos(ext_ethif_adaptor_mos) if response: error_msg = VicConst.DCE_IF_ERROR_MSG ret.append(_process_response(response, api, error_msg)) results = {} results["changed"] = True results["msg"] = "" results["msg_params"] = ret #Power Cycle Host for the changes to take effect. if restart_server: log.debug("Restarting server...") server_power_cycle(handle, timeout=180) _wait_for_power_state(handle, state="on", timeout=60, interval=5, server_id=1) log.debug("Server restarted successfully. Adaptor initialisation check in progress.") for adaptor in adaptor_list: adaptor_initialization_in_progress = True wait_count = 0 while adaptor_initialization_in_progress and wait_count < 5: try: adaptor = _get_mo(handle, dn=adaptor.dn) adaptor_initialization_in_progress = False log.debug("Adaptor at slot %s initialisation complete.", adaptor.pci_slot) except ImcOperationError: log.debug("Adaptor at slot %s initialisation in progress. Sleep for 5s.", adaptor.pci_slot) wait_count += 1 time.sleep(5) if adaptor_initialization_in_progress: log.debug("Adaptor initialisation failure for adaptor at slot %s", adaptor.pci_slot) raise ImcOperationError( api_error_msg, "Adaptor %s is not initialised at end point." % adaptor.pci_slot) log.debug("Sleeping for 1 minute") time.sleep(60) log.debug("Returning results") return results
5,351,505
def some_func(string: str, function: Callable) -> bool: """Check if some elements in a string match the function (functional). Args: string: <str> string to verify. function: <callable> function to call. Returns: True if some of elements are in the sequence are True. Examples: >>> assert some_func('abcdefg&%$', str.isalpha) >>> assert not some_func('&%$=', str.isalpha) """ return any(map(function, string)) and not all(map(function, string))
5,351,506
def UseExceptions(*args): """UseExceptions()""" return _ogr.UseExceptions(*args)
5,351,507
def get_mlm_logits(input_tensor, albert_config, mlm_positions, output_weights): """From run_pretraining.py.""" input_tensor = gather_indexes(input_tensor, mlm_positions) with tf.variable_scope("cls/predictions"): # We apply one more non-linear transformation before the output layer. # This matrix is not used after pre-training. with tf.variable_scope("transform"): input_tensor = tf.layers.dense( input_tensor, units=albert_config.embedding_size, activation=modeling.get_activation(albert_config.hidden_act), kernel_initializer=modeling.create_initializer( albert_config.initializer_range)) input_tensor = modeling.layer_norm(input_tensor) # The output weights are the same as the input embeddings, but there is # an output-only bias for each token. output_bias = tf.get_variable( "output_bias", shape=[albert_config.vocab_size], initializer=tf.zeros_initializer()) logits = tf.matmul( input_tensor, output_weights, transpose_b=True) logits = tf.nn.bias_add(logits, output_bias) return logits
5,351,508
def test_mesh2d_delete_hanging_edges(): """Tests `mesh2d_delete_hanging_edges` by deleting 2 hanging edges in a simple Mesh2d 4* | 3---2---5* | | 0---1 """ mk = MeshKernel() node_x = np.array([0.0, 1.0, 1.0, 0.0, 0.0, 2.0], dtype=np.double) node_y = np.array([0.0, 0.0, 1.0, 1.0, 2.0, 1.0], dtype=np.double) edge_nodes = np.array([0, 1, 1, 2, 2, 3, 3, 0, 3, 4, 2, 5], dtype=np.int32) mesh2d = Mesh2d(node_x, node_y, edge_nodes) mk.mesh2d_set(mesh2d) mk.mesh2d_delete_hanging_edges() mesh2d = mk.mesh2d_get() assert mesh2d.node_x.size == 4 assert mesh2d.edge_x.size == 4 assert mesh2d.face_x.size == 1
5,351,509
def spm_hrf(TR, t1=6, t2=16, d1=1, d2=1, ratio=6, onset=0, kernel=32): """Python implementation of spm_hrf.m from the SPM software. Parameters ---------- TR : float Repetition time at which to generate the HRF (in seconds). t1 : float (default=6) Delay of response relative to onset (in seconds). t2 : float (default=16) Delay of undershoot relative to onset (in seconds). d1 : float (default=1) Dispersion of response. d2 : float (default=1) Dispersion of undershoot. ratio : float (default=6) Ratio of response to undershoot. onset : float (default=0) Onset of hemodynamic response (in seconds). kernel : float (default=32) Length of kernel (in seconds). Returns ------- hrf : array Hemodynamic repsonse function References ---------- [1] Adapted from the Poldrack lab fMRI tools. https://github.com/poldracklab/poldracklab-base/blob/master/fmri/spm_hrf.py """ ## Define metadata. fMRI_T = 16.0 TR = float(TR) ## Define times. dt = TR/fMRI_T u = np.arange(kernel/dt + 1) - onset/dt ## Generate (super-sampled) HRF. hrf = gamma(t1/d1,scale=1.0/(dt/d1)).pdf(u) - gamma(t2/d2,scale=1.0/(dt/d2)).pdf(u)/ratio ## Downsample. good_pts=np.array(range(np.int(kernel/TR)))*fMRI_T hrf=hrf[good_pts.astype(int)] ## Normalize and return. hrf = hrf/np.sum(hrf) return hrf
5,351,510
def prob_active_neuron(activity_matrix): """Get expected co-occurrence under independence assumption. Parameters ---------- activity_matrix : np.array num_neurons by num_bins, boolean (1 or 0) Returns ------- prob_active : np.array Fraction of bins each cell participates in individually """ prob_active = np.mean(activity_matrix, axis=1) return prob_active
5,351,511
def translate_x(image: tf.Tensor, pixels: int, replace: int) -> tf.Tensor: """Equivalent of PIL Translate in X dimension.""" image = translate(wrap(image), [-pixels, 0]) return unwrap(image, replace)
5,351,512
def reduce_time_space_seasonal_regional( mv, season=seasonsyr, region=None, vid=None, exclude_axes=[] ): """Reduces the variable mv in all time and space dimensions. Any other dimensions will remain. The averages will be restricted to the the specified season and region. The season should be a cdutil.times.Seasons object. The region may be a string defining a region in defines.py, or it may be a list of four numbers as in defines.py. That is, it would take the form [latmin,latmax,lonmin,lonmax]. """ #if len( set(['time','lat','lon','lev']) & set([ax.id for ax in allAxes(mv)]) )==0: if len( [ax for ax in allAxes(mv) if ax.isTime() or ax.isLatitude() or ax.isLongitude() or ax.isLevel() ] )==0: return mv # nothing to reduce if vid is None: vid = 'reduced_'+mv.id mvreg = select_region(mv, region) axes = allAxes( mvreg ) #axis_names = [ a.id for a in axes if a.id=='lat' or a.id=='lon' or a.id=='lev'] axis_names = [ a.id for a in axes if a.isLatitude() or a.isLongitude() or a.isLevel() and a.id not in exclude_axes] axes_string = '('+')('.join(axis_names)+')' if len(axes_string)>2: for axis in axes: if axis.getBounds() is None and not (axis.isTime() and hasattr(axis,'climatology')): axis._bounds_ = axis.genGenericBounds() mvsav = cdutil.averager( mvreg, axis=axes_string ) mvtsav = calculate_seasonal_climatology(mvsav, season) mvtsav.id = vid #mvtsav = delete_singleton_axis(mvtsav, vid='time') #mvtsav = delete_singleton_axis(mvtsav, vid='lev') #mvtsav = delete_singleton_axis(mvtsav, vid='lat') #mvtsav = delete_singleton_axis(mvtsav, vid='lon') return mvtsav
5,351,513
def get_args(): """ Function to retrieve and parse the command line arguments, then to return these arguments as an ArgumentParser object. Parameters: None. Returns: parser.parse_args(): inputed or default argument objects. """ parser = argparse.ArgumentParser() parser.add_argument('--input', type=str, default='flowers/test/100/image_07896.jpg', help="path to folder of images") parser.add_argument('--checkpoint', type=str, default='save_directory/checkpoint.pth', help='file to load the checkpoint') parser.add_argument('--category_names', type=str, default='cat_to_name.json', help='file to map the real names') parser.add_argument('--top_k', type=int, default=3, help='top classes predicted to return') parser.add_argument('--gpu', action='store_true', help='hyperparameters for GPU') return parser.parse_args()
5,351,514
def apply_modifications(model, custom_objects=None): """Applies modifications to the model layers to create a new Graph. For example, simply changing `model.layers[idx].activation = new activation` does not change the graph. The entire graph needs to be updated with modified inbound and outbound tensors because of change in layer building function. Args: model: The `keras.models.Model` instance. Returns: The modified model with changes applied. Does not mutate the original `model`. """ # The strategy is to save the modified model and load it back. This is done because setting the activation # in a Keras layer doesnt actually change the graph. We have to iterate the entire graph and change the # layer inbound and outbound nodes with modified tensors. This is doubly complicated in Keras 2.x since # multiple inbound and outbound nodes are allowed with the Graph API. model_path = os.path.join(tempfile.gettempdir(), next(tempfile._get_candidate_names()) + '.h5') try: model.save(model_path) return load_model(model_path, custom_objects=custom_objects) finally: os.remove(model_path)
5,351,515
def bioinfo(): """Main entry point of load commands""" pass
5,351,516
def make_ccd_temperature_timeseries_pickle(sectornum): """ convert the engineering data on MAST to a pickled time-series of on-chip CCD temperature. outputs: '/nfs/phtess1/ar1/TESS/FFI/ENGINEERING/sector0001_ccd_temperature_timeseries.pickle' which contains a dictionary, with keys 'S_CAM{:d}_ALCU_sensor_CCD{:d}', each of which contains a dictionary of time and temperature. """ engdatadir = '/nfs/phtess1/ar1/TESS/FFI/ENGINEERING/' if sectornum==1: # identical to the other one on mast. LGB tested this in # explore_ccd_temperature_timeseries fname = os.path.join(engdatadir,'tess2018323111417_sector01-eng.fits') elif sectornum>1: fnames = glob(os.path.join( engdatadir,'tess2*_sector{:s}-eng.fits'. format(str(sectornum).zfill(2)) )) if len(fnames) > 1: raise AssertionError('expected one engineering file per sector') elif len(fnames)==0: raise AssertionError('got no engineering files for this sector!!') fname = fnames[0] else: raise NotImplementedError hdulist = fits.open(fname) ccds, cams = list(range(1,5)), list(range(1,5)) # NOTE: unclear if "COOKED" is actually the temperature... plausible # but unclear. d = {} temperature_hdr_names = ['S_CAM{:d}_ALCU_sensor_CCD{:d}'.format(cam, ccd) for cam in cams for ccd in ccds] for temperature_hdr_name in temperature_hdr_names: this_time = hdulist[temperature_hdr_name].data['TIME'] try: this_temperature = hdulist[temperature_hdr_name].data['COOKED'] except KeyError: # in sectors >=13, the engineering data changed the key from # "COOKED" to "VALUE". this_temperature = hdulist[temperature_hdr_name].data['VALUE'] d[temperature_hdr_name] = {} d[temperature_hdr_name]['time'] = this_time d[temperature_hdr_name]['temperature'] = this_temperature # note: the read out times on each camera are not identical. therefore we # can't make this extremely simple as just temperatures in the 16 readouts, # vs time. it needs to be: for each readout, what is the temperature # time-series? pklpath = os.path.join( engdatadir, 'sector{:s}_ccd_temperature_timeseries.pickle'. format(str(sectornum).zfill(4)) ) with open(pklpath, 'wb') as f: pickle.dump(d, f, pickle.HIGHEST_PROTOCOL) print('{} made {}'.format(datetime.utcnow().isoformat(), pklpath))
5,351,517
def stuur_nieuwe_taak_email(email, aantal_open): """ Stuur een e-mail ter herinnering dat er een taak te wachten staat. """ text_body = ("Hallo %s!\n\n" % email.account.get_first_name() + "Er is zojuist een nieuwe taak voor jou aangemaakt op %s\n" % settings.SITE_URL + "Op het moment van sturen stonden er %s taken open.\n\n" % aantal_open + "Bedankt voor je aandacht!\n" + "Het bondsbureau\n") mailer_queue_email(email.bevestigde_email, 'Er is een nieuwe taak voor jou', text_body)
5,351,518
def search_variations(image, old_image): """ Search, but now for the variation of each tile. """ if is_complete(image): yield image return constrains = calc_constrains(image) position = max(constrains, key=constrains.get) tile = tiles[old_image[position]] possible_variations = tuple(gen_variations(tile)) for direction, neighbor_position, neighbor_tile in gen_neighbors(position, image): if neighbor_tile is None: continue possible_variations = tuple( variation for variation in gen_matching_variations( neighbor_tile, tile, INVERSE_DIRECTIONS[direction] ) if any((variation == other).all() for other in possible_variations) ) for variation in possible_variations: new_image = {**image, position: variation} for solution in search_variations(new_image, old_image): yield solution
5,351,519
def fuel_requirement(mass: int) -> int: """Fuel is mass divide by three, round down and subtract 2""" return math.floor(mass / 3) - 2
5,351,520
def test_basic_ops(cinq_test_service): """ Test will pass if: 1. Auditor can detect non-compliant EC2 instances 2. Auditor respect grace period settings """ # Prep cinq_test_service.start_mocking_services('ec2') setup_info = setup_test_aws(cinq_test_service) recipient = setup_info['recipient'] account = setup_info['account'] db_setting = dbconfig.get('audit_scope', NS_AUDITOR_REQUIRED_TAGS) db_setting['enabled'] = ['aws_ec2_instance'] dbconfig.set(NS_AUDITOR_REQUIRED_TAGS, 'audit_scope', DBCJSON(db_setting)) dbconfig.set(NS_AUDITOR_REQUIRED_TAGS, 'collect_only', False) # Add resources client = aws_get_client('ec2') resource = client.run_instances(ImageId='i-10000', MinCount=1, MaxCount=1) # Collect resources collect_resources(account=account, resource_types=['ec2']) # Initialize auditor auditor = MockRequiredTagsAuditor() # Test 1 --- Test if auditor respect grace period settings cinq_test_service.modify_resource( resource['Instances'][0]['InstanceId'], 'launch_date', datetime.datetime.utcnow().isoformat() ) auditor.run() assert auditor._cinq_test_notices == {} # Test 2 --- Test if auditor can pick up non-compliant resources correctly ''' Modify resource property''' assert cinq_test_service.modify_resource( resource['Instances'][0]['InstanceId'], 'launch_date', '2000-01-01T00:00:00' ) is True auditor.run() notices = auditor._cinq_test_notices assert recipient in notices assert notices[recipient]['not_fixed'][0]['resource'].id == resource['Instances'][0]['InstanceId'] # Test 3 --- Test if auditor can terminate ec2 correctly cinq_test_service.modify_issue( auditor._cinq_test_notices[recipient]['not_fixed'][0]['issue'].id, 'created', 0 ) auditor.run() notices = auditor._cinq_test_notices ''' Check if the action is correct''' assert notices[recipient]['not_fixed'][0]['action'] == AuditActions.REMOVE ''' Check if the instance is terminated ''' assert client.describe_instance_status( InstanceIds=[notices[recipient]['not_fixed'][0]['resource'].id] )['InstanceStatuses'][0]['InstanceState']['Name'] == 'terminated'
5,351,521
def data_head(fname): """ Get the columns-names of the csv Parameters ---------- fname: str Filename of the csv-data Returns ---------- str-list: header-names of the csv-data """ return pd.read_csv(fname, encoding='ISO-8859-1').columns
5,351,522
def print_count(description, count, total_count, indent=''): """ Prints the results for a single count, including its percent. Arguments: description (string): A description of the count. count (int): The count to be printed. total_count (int): Total count used to calculate percentage. indent (string): A prefix string that can be used for the output. """ print "{indent}{description}: {count} ({percent}%)".format( indent=indent, description=description, count=count, percent=(100 * count / total_count), )
5,351,523
def test_linked_list_push_adds_new_item(): """Linded List push method should add a new item to the list.""" from linked_list import LinkedList l = LinkedList() l.push('val') assert l.head.value == 'val'
5,351,524
def format(serverDict, sortKeyword='id'): """ Returns an array of nicely formatted servers, sorted by whatever the user prefers, or id by default. """ sortDict = {'id': lambda server: int(server.name[4:-3]), 'uptime': lambda server: server.uptime} sortFunction = sortDict[sortKeyword] class Server: def __init__(self, serverName, dataSet): self.name = str(serverName) self.loadAvgs = dataSet[serverName]['load_avgs'] self.users = dataSet[serverName]['users'] self.uptime = dataSet[serverName]['uptime'] def __str__(self): return str(self.name[:-3]) + " (" + str(self.loadAvgs[1] * 100) + "% mean CPU load, " + str(len(self.users)) + " users online, up for " + cleanTime(self.uptime) + ")" serverList = [] for server in serverDict: serverList.append(Server(server, serverDict)) # Now, sort the list based on the sorting function serverList.sort(key=sortFunction) return serverList
5,351,525
def moray_script(): """ JavaScript関数を公開するためのjsモジュールを生成 Returns: JavaScript関数を公開するためのjsモジュール """ return bottle.static_file('moray.js', root=_root_static_module)
5,351,526
def load_compdat(wells, buffer, meta, **kwargs): """Load COMPDAT table.""" _ = kwargs dates = meta['DATES'] columns = ['DATE', 'WELL', 'I', 'J', 'K1', 'K2', 'MODE', 'Sat', 'CF', 'DIAM', 'KH', 'SKIN', 'ND', 'DIR', 'Ro'] df = pd.DataFrame(columns=columns) for line in buffer: if '/' not in line: break line = line.split('/')[0].strip() if not line: break vals = line.split() full = [None] * len(columns) full[0] = dates[-1] if not dates.empty else pd.to_datetime('') shift = 1 for i, v in enumerate(vals): if '*' in v: shift += int(v.strip('*')) - 1 else: full[i+shift] = v df = df.append(dict(zip(columns, full)), ignore_index=True) df[['WELL', 'MODE', 'DIR']] = df[['WELL', 'MODE', 'DIR']].applymap( lambda x: x.strip('\'\"') if x is not None else x) df[['I', 'J', 'K1', 'K2']] = df[['I', 'J', 'K1', 'K2']].astype(int) df[['Sat', 'CF', 'DIAM', 'KH', 'Ro']] = df[['Sat', 'CF', 'DIAM', 'KH', 'Ro']].astype(float) for k, v in DEFAULTS.items(): if k in df: df[k] = df[k].fillna(v) if not df.empty: welldata = {k: {'COMPDAT': v.reset_index(drop=True)} for k, v in df.groupby('WELL')} wells.update(welldata, mode='a', ignore_index=True) return wells
5,351,527
def scale_random(a: float, b: float, loc: Optional[float] = None, scale: Optional[float] = None) -> float: """Returns a value from a standard normal truncated to [a, b] with mean loc and standard deviation scale.""" return _default.scale_random(a, b, loc=loc, scale=scale)
5,351,528
def created_link(dotfile: ResolvedDotfile) -> str: """An output line for a newly-created link. """ return ( co.BOLD + co.BRGREEN + OK + " " + ln(dotfile.installed.disp, dotfile.link_dest) + co.RESET )
5,351,529
def instruction2_task(scr): """ Description of task 1 """ scr.draw_text(text = "Great Work!! "+ "\n\nNow comes your TASK 3: **Consider an image**."+ "\n\nIf you press the spacebar now, an image will "+ "appear at the bottom of the screen. You can use the information from the"+ " image to make any modifications to the translation of the sentence."+ "\n\n***However in certain cases, the image is not related to the sentence "+ "or not present at all.***"+ "\n\nAfter looking at the image, say loudly if you'd like to modify your translation"+ " by saying "+ "\"I'd like to modify my translation.\" or \"I'd keep the same translation\""+ "\nif you would like to stick with your translation."+ "\n\nThe final TASK 4 is to **Say the translation again (modified or not)**."+ "\nPlease press the spacebar to indicate the start of your new translation.\nYou can stop your"+ " recording by pressing the spacebar and moving to the next sentence.", fontsize = 25) return scr
5,351,530
def tail(f, lines=10, _buffer=4098): """Tail a file and get X lines from the end""" # place holder for the lines found lines_found = [] # block counter will be multiplied by buffer # to get the block size from the end block_counter = -1 # loop until we find X lines while len(lines_found) < lines: try: f.seek(block_counter * _buffer, os.SEEK_END) except IOError: # either file is too small, or too many lines requested f.seek(0) lines_found = f.readlines() break lines_found = f.readlines() # we found enough lines, get out # Removed this line because it was redundant the while will catch # it, I left it for history # if len(lines_found) > lines: # break # decrement the block counter to get the # next X bytes block_counter -= 1 return lines_found[-lines:]
5,351,531
def loggedin_and_owner_required(func): """ Decorator that applies to functions expecting the "owner" name as a second argument. It will check that the visitor is also considered as the owner of the resource it is accessing. Note: automatically calls login_required and check_and_set_owner decorators. """ # TODO when not logged in send a 401 authentication requested and # implement corresponding template (at least send a 401 for non-GET # requests !) @login_required(login_url=settings.LOGIN_URL) @check_and_set_owner def _loggedin_and_owner_required(request, owner_name, *args, **kwargs): if request.user != request.owner_user: return HttpResponseForbidden() else: return func(request, owner_name, *args, **kwargs) return _loggedin_and_owner_required
5,351,532
def recursive_subs(e: sp.Basic, replacements: list[tuple[sp.Symbol, sp.Basic]]) -> sp.Basic: """ Substitute the expressions in ``replacements`` recursively. This might not be necessary in all cases, Sympy's builtin ``subs()`` method should also do this recursively. .. note:: The order of the tuples in ``replacements`` might matter, make sure to order these sensibly in case the expression contains a lot of nested substitutions. Parameters ---------- e : sp.Basic Input expression replacements : list[tuple[sp.Symbol, sp.Basic]] List of replacements: ``symbol, replace`` Returns ------- sp.Basic Substituted expression """ for _ in range(0, len(replacements) + 1): new_e = e.subs(replacements) if new_e == e: return new_e else: e = new_e return new_e
5,351,533
def plot_pp_lab4(): """ 0.000909536 0.000863872 0.000859136 0.000866720 0.000920096 """ times = { "Block = 512, 450 000 эл.": [0.001181888], "Block = 256, 450 000 эл.": [0.001197120], "Block = 512, 900 000 эл.": [0.002302080], "Block = 256, 900 000 эл.": [0.002325952], } block_size = [1024, 512, 256, 128, 64] ax1: plt.Axes _, ax1 = plt.subplots(nrows=1, ncols=1, figsize=(12, 8)) for item in times.keys(): ax1.bar(item, times[item], width=0.8, label=item) #ax1.plot(block_size, times[item], label=item) ax1.legend(loc='upper center', bbox_to_anchor=(0.5, -0.2), shadow=True, ncol=2) ax1.yaxis.set_major_locator(ticker.MultipleLocator(0.001)) ax1.yaxis.set_minor_locator(ticker.MultipleLocator(0.0001)) # Добавляем линии основной сетки: ax1.yaxis.set_visible(True) #ax1.xaxis.set_visible(False) ax1.grid(which='major', color='k') ax1.minorticks_on() ax1.grid(which='minor', color='gray',linestyle=':') #ax1.set_ylim(ymin=0, ymax=0.001) ax1.set_title("Параллельные алгоритмы поиска суммы векторов") #ax1.set_xlabel("Размер блока, шт") ax1.set_ylabel("Среднее время работы, с") plt.rc('legend', fontsize=22) # legend fontsize plt.rc('figure', titlesize=48) # fontsize of the figure title plt.show()
5,351,534
def visualize_dataset_nd(X, ys, grid_shape=(2,2), alpha=0.5, xlim=None, ylim=None, loc='upper left', bbox_to_anchor=(1.04,1), figsize=(16, 8), unique_ys=None, save_path=None, label_text_lookup=None): """ Args: X: 2d np.array ys: 1d n.array """ import matplotlib.pyplot as plt from matplotlib import gridspec # To avoid type 3 fonts. ACM Digital library complain about this # based on the recomendations here http://phyletica.org/matplotlib-fonts/ plt.rcParams['pdf.fonttype'] = 42 plt.rcParams['ps.fonttype'] = 42 if unique_ys is not None: c_lookup = gen_color_map(unique_ys) else: c_lookup = gen_color_map(set(ys)) fig = plt.figure(figsize=figsize) gs = gridspec.GridSpec(grid_shape[0], grid_shape[1]) n_dim = X.shape[1] dim_1 = 0 dim_2 = 1 for i in range(grid_shape[0]): for j in range(grid_shape[1]): ax = fig.add_subplot(gs[i, j]) for label in set(ys): color = c_lookup[label] mask = ys == label ax.scatter(X[mask, dim_1], X[mask, dim_2], c=color, label=label if label_text_lookup is None else label_text_lookup[label], alpha=alpha) ax.set_xlabel('Z{0}'.format(dim_1)) ax.set_ylabel('Z{0}'.format(dim_2)) ax.grid(True) if xlim: ax.set_xlim(xlim) if ylim: ax.set_ylim(ylim) dim_2 += 1 if dim_2 == n_dim: dim_1 += 1 dim_2 = dim_1 + 1 plt.tight_layout() if save_path: plt.savefig(save_path) plt.show()
5,351,535
def wait( ctx: click.core.Context, cluster_id: str, superuser_username: str, superuser_password: str, transport: Transport, skip_http_checks: bool, enable_spinner: bool, ) -> None: """ Wait for DC/OS to start. """ check_cluster_id_exists( new_cluster_id=cluster_id, existing_cluster_ids=existing_cluster_ids(), ) cluster_containers = ClusterContainers( cluster_id=cluster_id, transport=transport, ) http_checks = not skip_http_checks doctor_command_name = command_path(sibling_ctx=ctx, command=doctor) wait_for_dcos( cluster=cluster_containers.cluster, superuser_username=superuser_username, superuser_password=superuser_password, http_checks=http_checks, doctor_command_name=doctor_command_name, enable_spinner=enable_spinner, )
5,351,536
def create_lag_i(df,time_col,colnames,lag): """ the table should be index by i,year """ # prepare names if lag>0: s = "_l" + str(lag) else: s = "_f" + str(-lag) values = [n + s for n in colnames] rename = dict(zip(colnames, values)) # create lags dlag = df.reset_index() \ .assign(t=lambda d: d[time_col] + lag) \ .rename(columns=rename)[['i',time_col] + values] \ .set_index(['i',time_col]) # join and return return(df.join(dlag))
5,351,537
def crop_image(image_array, point, size): """ Cropping the image into the assigned size image_array: numpy array of image size: desirable cropped size return -> cropped image array """ img_height, img_width = point # assigned location in crop # for color image if len(image_array.shape) == 3: image_array = image_array[:, img_height:img_height + size[0], img_width:img_width + size[1]] # for gray image elif len(image_array.shape) == 2: image_array = image_array[img_height:img_height + size[0], img_width:img_width + size[1]] return image_array
5,351,538
def deg_to_rad(deg): """Convert degrees to radians.""" return deg * pi / 180.0
5,351,539
def find_plane_normal(points): """ d - number of dimensions n - number of points :param points: `d x n` array of points :return: normal vector of the best-fit plane through the points """ mean = np.mean(points, axis=1) zero_centre = (points.T - mean.T).T U, s, VT = np.linalg.svd(zero_centre) normal = U[:, -1] return normal
5,351,540
def binary_search(data, target, low, high): """Return True if target is found in indicated portion of a Python list. The search only considers the portion from data[low] to data[high] inclusive. """ if low > high: return False # interval is empty; no match else: mid = (low + high) // 2 if target == data[mid]: # found a matcha return True elif target < data[mid]: # recur on the portion left of the middle return binary_search(data, target, low, mid - 1) else: # recur on the portion right of the middle return binary_search(data, target, mid + 1, high)
5,351,541
def ReadUnifiedTreeandHaloCatalog(fname, desiredfields=[], icombinedfile=1,iverbose=1): """ Read Unified Tree and halo catalog from HDF file with base filename fname. Parameters ---------- Returns ------- """ if (icombinedfile): hdffile=h5py.File(fname,'r') #load data sets containing number of snaps headergrpname="Header/" numsnaps=hdffile[headergrpname].attrs["NSnaps"] #allocate memory halodata=[dict() for i in range(numsnaps)] numhalos=[0 for i in range(numsnaps)] atime=[0 for i in range(numsnaps)] tree=[[] for i in range(numsnaps)] cosmodata=dict() unitdata=dict() #load cosmology data cosmogrpname="Cosmology/" fieldnames=[str(n) for n in hdffile[headergrpname+cosmogrpname].attrs.keys()] for fieldname in fieldnames: cosmodata[fieldname]=hdffile[headergrpname+cosmogrpname].attrs[fieldname] #load unit data unitgrpname="Units/" fieldnames=[str(n) for n in hdffile[headergrpname+unitgrpname].attrs.keys()] for fieldname in fieldnames: unitdata[fieldname]=hdffile[headergrpname+unitgrpname].attrs[fieldname] #for each snap load the appropriate group start=time.clock() for i in range(numsnaps): snapgrpname="Snap_%03d/"%(numsnaps-1-i) if (iverbose==1): print("Reading ",snapgrpname) isnap=hdffile[snapgrpname].attrs["Snapnum"] atime[isnap]=hdffile[snapgrpname].attrs["scalefactor"] numhalos[isnap]=hdffile[snapgrpname].attrs["NHalos"] if (len(desiredfields)>0): fieldnames=desiredfields else: fieldnames=[str(n) for n in hdffile[snapgrpname].keys()] for catvalue in fieldnames: halodata[isnap][catvalue]=np.array(hdffile[snapgrpname+catvalue]) hdffile.close() print("read halo data ",time.clock()-start) else : hdffile=h5py.File(fname+".snap_000.hdf.data",'r') numsnaps=int(hdffile["NSnaps"][0]) #get field names fieldnames=[str(n) for n in hdffile.keys()] #clean of header info fieldnames.remove("Snapnum") fieldnames.remove("NSnaps") fieldnames.remove("NHalos") fieldnames.remove("TotalNHalos") fieldnames.remove("scalefactor") if (len(desiredfields)>0): fieldnames=desiredfields hdffile.close() halodata=[[] for i in range(numsnaps)] numhalos=[0 for i in range(numsnaps)] atime=[0 for i in range(numsnaps)] tree=[[] for i in range(numsnaps)] start=time.clock() for i in range(numsnaps): hdffile=h5py.File(fname+".snap_%03d.hdf.data"%(numsnaps-1-i),'r') atime[i]=(hdffile["scalefactor"])[0] numhalos[i]=(hdffile["NHalos"])[0] halodata[i]=dict() for catvalue in fieldnames: halodata[i][catvalue]=np.array(hdffile[catvalue]) hdffile.close() print("read halo data ",time.clock()-start) #lets ignore the tree file for now for i in range(numsnaps): tree[i]=dict() return atime,tree,numhalos,halodata,cosmodata,unitdata if (icombinedfile==1): hdffile=h5py.File(fname+".tree.hdf.data",'r') treefields=["haloID", "Num_progen"] #do be completed for Progenitor list although information is contained in the halo catalog by searching for things with the same head #treefields=["haloID", "Num_progen", "Progen"] for i in range(numsnaps): snapgrpname="Snap_%03d/"%(numsnaps-1-i) tree[i]=dict() for catvalue in treefields: """ if (catvalue==treefields[-1]): tree[i][catvalue]=[[]for j in range(numhalos[i])] for j in range(numhalos[i]): halogrpname=snapgrpname+"/Halo"+str(j) tree[i][catvalue]=np.array(hdffile[halogrpname+catvalue]) else: tree[i][catvalue]=np.array(hdffile[snapgrpname+catvalue]) """ tree[i][catvalue]=np.array(hdffile[snapgrpname+catvalue]) hdffile.close() return atime,tree,numhalos,halodata,cosmodata,unitdata
5,351,542
def save_as_txt(file_path:str, obj:Union[str, list[str]]) -> None: """Save an object in a txt-file if obj (str) -> line[0] = obj (list) -> line[i] = obj[i] (item_i) Args: file_path (str): saving path obj (Any): object to save """ file_path= glob_utils.file.utils.append_extension(file_path, glob_utils.file.utils.FileExt.txt) lines = [] if isinstance(obj,str): lines.append(obj) elif isinstance(obj, list): lines.extend(f'{item}' for item in obj) else: return with open(file_path, 'w') as file: [ file.write(f'{line}\n') for line in lines ] glob_utils.file.utils.logging_file_saved(file_path)
5,351,543
def lines_in_pull(pull): """Return a line count for the pull request. To consider both added and deleted, we add them together, but discount the deleted count, on the theory that adding a line is harder than deleting a line (*waves hands very broadly*). """ ignore = r"(/vendor/)|(conf/locale)|(static/fonts)|(test/data/uploads)" lines = 0 files = pull.get_files() for f in files: if re.search(ignore, f.filename): #print("Ignoring file {}".format(f.filename)) continue lines += f.additions + f.deletions//5 if pull.combinedstate == "merged" and lines > 2000: print("*** Large pull: {lines:-6d} lines, {pr.created_at} {pr.number:-4d}: {pr.title}".format(lines=lines, pr=pull)) return lines
5,351,544
def plot_panels(volume, panels, figsize=(16, 9), save_name=None): """Plot on the same figure a number of views, as defined by a list of panel Parameters ---------- volume : cortex.Volume The data to plot. panels : list of dict List of parameters for each panel. An example of panel is: { 'extent': [0.000, 0.000, 0.300, 0.300], 'view': { 'hemisphere': 'left', 'angle': 'lateral_pivot', 'surface': 'inflated', } } The `extent` and `zoom` entries are ordered as [left, bottom, width, height] with values in [0, 1]. figsize : tuple of float Size of the figure. save_name : str or None Name of the file where the figure is saved. None to not save. Can end with different extensions, such as '.png' of '.pdf'. Returns ------- fig : matplotlib.Figure Created figure. Can be used for instance for custom save functions. Example ------- >>> from cortex.export import plot_panels, params_flatmap_lateral_medial >>> plot_panels(volume, **params_flatmap_lateral_medial) """ # list of couple of angles and surfaces angles_and_surfaces = [(panel['view']['angle'], panel['view']['surface']) for panel in panels] # remove redundant couples, e.g. left and right angles_and_surfaces = list(set(angles_and_surfaces)) list_angles, list_surfaces = list(zip(*angles_and_surfaces)) # create all images temp_dir = tempfile.mkdtemp() base_name = os.path.join(temp_dir, 'fig') filenames = save_3d_views(volume, base_name, list_angles=list_angles, list_surfaces=list_surfaces, trim=True, size=(1600 * 4, 900 * 4)) fig = plt.figure(figsize=figsize) for panel in panels: # load image angle_and_surface = (panel['view']['angle'], panel['view']['surface']) index = angles_and_surfaces.index(angle_and_surface) image = plt.imread(filenames[index]) # chose hemisphere if 'hemisphere' in panel['view']: left, right = np.split(image, [image.shape[1] // 2], axis=1) if panel['view']['hemisphere'] == 'left': image = left else: image = right # trim white borders image = image[image.sum(axis=1).sum(axis=1) > 0] image = image[:, image.sum(axis=0).sum(axis=1) > 0] # zoom if 'zoom' in panel['view']: left, bottom, width, height = panel['view']['zoom'] left = int(left * image.shape[1]) width = int(width * image.shape[1]) bottom = int(bottom * image.shape[0]) height = int(height * image.shape[0]) image = image[bottom:bottom + height] image = image[:, left:left + width] # add ax and image ax = plt.axes(panel['extent']) ax.axis('off') ax.imshow(image) # note that you might get a slightly different layout with `plt.show()` # since it might use a different backend if save_name is not None: fig.savefig(save_name, bbox_inches='tight', dpi=100) # delete temporary directory try: shutil.rmtree(temp_dir) except OSError as e: # reraise if the directory has not already been deleted if e.errno != errno.ENOENT: raise return fig
5,351,545
def get_active_milestones(session, project): """Returns the list of all the active milestones for a given project.""" query = ( session.query(model.Issue.milestone) .filter(model.Issue.project_id == project.id) .filter(model.Issue.status == "Open") .filter(model.Issue.milestone.isnot(None)) ) return sorted([item[0] for item in query.distinct()])
5,351,546
def set_camera_parameters(cfg): """ Set camera parameters. All values come from the dict generated from the JSON file. :param cfg: JSON instance. :type cam: dict :return: None :rtype: None """ # set camera resolution [width x height] camera = PiCamera() camera.resolution = cfg["stream"]["resolution"] # set camera frame rate [Hz] camera.framerate = cfg["stream"]["framerate"] # exposure mode camera.exposure_mode = cfg["exposure"]["mode"] if cfg["exposure"]["set_iso"]: camera.iso = cfg["exposure"]["iso"] return camera
5,351,547
def get_srl_result_for_instance(srl_dict, instance): """Get SRL output for an instance.""" sent_id = instance.sent_id tokens_gold = instance.tokens srl_output = srl_dict[sent_id] srl_output["words"] = [word for word in srl_output["words"] if word != "\\"] tokens_srl = srl_output['words'] if tokens_srl != tokens_gold: srl2gold_id_map = get_gold_map(tokens_srl, tokens_gold) else: srl2gold_id_map = {i: i for i in range(len(tokens_srl))} return srl_output, srl2gold_id_map
5,351,548
def ensure_tty(file=sys.stdout): """ Ensure a file object is a tty. It must have an `isatty` method that returns True. TypeError is raised if the method doesn't exist, or returns False. """ isatty = getattr(file, 'isatty', None) if isatty is None: raise TypeError( 'Cannot detect tty, file has no `isatty` method: {}'.format( getattr(file, 'name', type(file).__name__) ) ) if not isatty(): raise TypeError( 'This will not work, file object is not a tty: {}'.format( getattr(file, 'name', type(file).__name__) ) ) return True
5,351,549
def chop_cells(text: str, max_size: int, position: int = 0) -> List[str]: """Break text in to equal (cell) length strings.""" _get_character_cell_size = get_character_cell_size characters = [ (character, _get_character_cell_size(character)) for character in text ][::-1] total_size = position lines: List[List[str]] = [[]] append = lines[-1].append pop = characters.pop while characters: character, size = pop() if total_size + size > max_size: lines.append([character]) append = lines[-1].append total_size = size else: total_size += size append(character) return ["".join(line) for line in lines]
5,351,550
def read_input_field_lonlat( input_file, fld_name, level, conf_in, *, conv_fact=None, crop=0, ): """Read from file and pre-process a field. Returns the field as a Field2D object. Arguments: - input_file: Input netCDF file. - fld_name: Name of the input field used in the input file. - conf_in: Input configuration. Optional arguments: - conv_fact: Conversion factor applied to the field. - crop: cut N pixels off around the domain """ lon, lat = read_lonlat2d( infile=conf_in["infile_lonlat"], name_lon=conf_in["lonlat_names"][0], name_lat=conf_in["lonlat_names"][1], transpose2d=conf_in["infield_transpose"], reduce_grid_res=conf_in["reduce_grid_resolution"], reduce_grid_stride=conf_in["reduce_grid_stride"], ) # Read the raw field from file try: with nc4.Dataset(input_file, "r") as fi: # Strip leading time dimension fld_raw = fi[fld_name][0].astype(np.float32) except Exception as e: err = "Cannot read '{}' from {}\n{}: {}".format( fld_name, input_file, e.__class__.__name__, str(e).strip() ) raise IOError(err) if conf_in["infield_transpose"]: fld_raw = fld_raw.T # SR_TMP < assert lon.shape == fld_raw.shape # SR_TMP > # Shrink domain if crop is not None and crop > 0: fld_raw = fld_raw[crop:-crop, crop:-crop] lon = lon[crop:-crop, crop:-crop] lat = lat[crop:-crop, crop:-crop] # Select level if level is not None: fld_raw = fld_raw[level, :, :] # Apply a conversion factor if conv_fact is not None: fld_raw *= conv_fact # Create a Field2D object fld = Field2D(fld_raw, lon, lat) return fld
5,351,551
def multiply_scenarios(sep, *args): """ Create the cross product of two lists of scenarios """ result = None for scenes in args: if result == None: result = scenes else: total = [] for scena in result: for scenb in scenes: # Create a merged scenario with a concatenated name name = scena[0] + sep + scenb[0] tdict = {} tdict.update(scena[1]) tdict.update(scenb[1]) # If there is a 'P' value, it represents the # probability that we want to use this scenario # If both scenarios list a probability, multiply them. if 'P' in scena[1] and 'P' in scenb[1]: P = scena[1]['P'] * scenb[1]['P'] tdict['P'] = P total.append((name, tdict)) result = total return check_scenarios(result)
5,351,552
def export_output(): """ Returns a function that will return the contents of the first file in a zip file which is not named '_metadata.csv' """ def fn(export: FlexibleDataExport): out = BytesIO() export.file_format = FileFormat.ZIP_CSV export.write_data(out) with ZipFile(out, 'r') as zipfile: names = [name for name in zipfile.namelist() if name != '_metadata.csv'] with zipfile.open(names[0], 'r') as infile: return infile.read().decode('utf-8') yield fn
5,351,553
def _fill_in_database(): """Fill in 'foodlik' of datas.""" datas_path = Path().resolve() / "core" / "back" / "requests" datas_path = datas_path / "datas" _fill_in_categories(datas_path) _fill_in_products(datas_path) _fill_in_products_number(datas_path)
5,351,554
def compute_total_probability_vector(mix_coeff_matrix, kernel_probability_matrix): """ Computes the total, weighted probability vector using the mixture coefficient matrix and the kernel probability matrix. """ # Start writing code here. The computation for the total probability vector can be # written in one line! total_probability_vector=K.sum(mix_coeff_matrix*kernel_probability_matrix,axis=1, keepdims=True) # Return statement here. return total_probability_vector
5,351,555
def test_extract_seq_with_skip_before_and_after_ctg_clv(): """ AA GT┘ <-bridge read G--AGT-GC <-bridge contig 0 123 456 <-contig coord ctg_clv^ ^ice ...GACAGTTGC... <-genome 5678901234 <-genome coord 1 | ref_clv^ ^ire """ ctg = MagicMock() ctg.reference_name = 'chr2' ctg.query_sequence = 'GAGTGC' ctg.cigartuples = ( (S.BAM_CMATCH, 1), (S.BAM_CREF_SKIP, 2), (S.BAM_CMATCH, 3), (S.BAM_CREF_SKIP, 1), (S.BAM_CMATCH, 2), ) ref_fa = MagicMock() ref_fa.get_reference_length.return_value = 100 ref_fa.fetch.return_value = 'AC' kw = dict(contig=ctg, strand='+', ref_clv=10, ref_fa=ref_fa, ctg_clv=3) assert extract_seq(**kw) == 'GACAGT' ref_fa.fetch.assert_called_once_with('chr2', 6, 8)
5,351,556
def mnist_noniid(dataset, num_users): """ Sample non-I.I.D client data from MNIST dataset :param dataset: :param num_users: :return: """ num_shards, num_imgs = 200, 300 idx_shard = [i for i in range(num_shards)] dict_users = {i: np.array([], dtype='int64') for i in range(num_users)} idxs = np.arange(num_shards * num_imgs) labels = dataset.train_labels.numpy() # sort labels idxs_labels = np.vstack((idxs, labels)) idxs_labels = idxs_labels[:, idxs_labels[1, :].argsort()] idxs = idxs_labels[0, :] # divide and assign for i in range(num_users): rand_set = set(np.random.choice(idx_shard, 2, replace=False)) idx_shard = list(set(idx_shard) - rand_set) for rand in rand_set: dict_users[i] = np.concatenate((dict_users[i], idxs[rand * num_imgs:(rand + 1) * num_imgs]), axis=0) return dict_users
5,351,557
def test_tls_fingerprint_cascade(mgr, temp_chains, cleanup): """Verify that when the first cert-to-name entry doesn't match, the next one is tried""" do_cert_test( mgr, temp_chains, client_ca_certs=[SERVER_INTR, SERVER_CA, CLIENT_INTR, CLIENT_CA], server_trusted_client_certs=[ { "^ks:name": "TrustedClientRootCA", "ks:certificate": read_pem_b64(CLIENT_CA), }, { "^ks:name": "TrustedClientIntermediateCA", "ks:certificate": read_pem_b64(CLIENT_INTR), }, { "^ks:name": "TrustedClientLeaf", "ks:certificate": read_pem_b64(CLIENT_LEAF), }, ], cert_to_names={ "ncs:cert-to-name": [ { "^ncs:id": "1", "ncs:fingerprint": "04" + 8 * ":DE:AD:BE:EF", "ncs:map-type": "x509c2n:specified", "ncs:name": "not-exist", }, { "^ncs:id": "2", "ncs:fingerprint": cert_fingerprint(CLIENT_CA), "ncs:map-type": "x509c2n:specified", "ncs:name": "root", }, ] }, )
5,351,558
def first_order_model(nt, rates): """ Returns the first-order model asymptotic solution for a network nt. Takes a list of interaction weigths (in the same order as the list of nodes) as the "rates" argument """ if type(nt) == list: nt = az.transform(nt) M = network_matrix(nt, rates=rates) elif type(nt) == np.ndarray: M = nt nt = None else: M = network_matrix(nt, rates=rates) L, V = np.linalg.eig(M) kmax = np.real(L).argmax() return (np.real(V[:,kmax])/np.real(V[:,kmax]).sum(), az.transform(nt))
5,351,559
def save_to_disk(url, save_path): """ Saves to disk non-destructively (xb option will not overwrite) """ print('Downloading: %s' % url) r = requests.get(url) if r.status_code == 404: print('URL broken, unable to download: %s' % url) return False else: with open(save_path, 'xb') as f: f.write(r.content) return True
5,351,560
def test_decision_tree(): """ This is a test for decision tree on college dataset The test simply checks for whether a ML model has been fit successfully This test will be helpful when we switch to API for dataset and dynamically select features """ model = fkm.fit_decision_tree(True) assert("<class 'sklearn.tree.tree.DecisionTreeClassifier'>" in str(type(model)))
5,351,561
def arguments(): """Parse arguments. Returns ------- argparse.Namespace Returns Argparse Namespace. """ parser = argparse.ArgumentParser(prog='pyslackdesc', description="pyslackdesc - simple, \ interactive script to generate \ Slack-desc files", epilog="Have fun!") parser.add_argument("-i", "--interactive", default=False, help="run script in interactive mode", action="store_true") parser.add_argument("-o", "--output", default='slack-desc', metavar='filename', help="output file (default is slack-desc)") parser.add_argument("-v", "--verbose", help="show generated file", action="store_true", default=False) parser.add_argument("-V", "--version", action='version', version='%(prog)s ' '{version}'.format(version=__version__)) # Add group cmd_parser = parser.add_argument_group('commandline mode') cmd_parser.add_argument("-n", "--name", nargs=1, metavar='name', type=str, help="program name (single word)") cmd_parser.add_argument("-s", "--short", nargs='+', metavar='"short description"', type=str, help="program short description (one line)") cmd_parser.add_argument("-d", "--description", nargs='+', metavar='"long description"', help="program long description (up to 6 lines)") cmd_parser.add_argument("-u", "--url", nargs=1, metavar='url', help="program URL") args = parser.parse_args() return args
5,351,562
def render_settings_window(s_called, s_int, ntfc_called, ntfc_state, s_state): """ Render the settings window """ win = Settings(s_called, s_int, ntfc_called, ntfc_state, s_state) win.connect("delete-event", Gtk.main_quit) win.show_all() Gtk.main() return win.settings_called, win.interval, win.notifications_called, win.notifications_state
5,351,563
def run(): """ Initialize and runs the app (blocks until the GUI is destroyed) :return: """ dump_configuration() state = AppState() controller = AppController(state) AppController.copy_to_clipboard('hello') render_main_view(controller=controller)
5,351,564
def server(server_id): """ Returns a list of sourcemod servers """ data = {} db_server = ServerModel.select().join(IPModel) db_server = db_server.where(ServerModel.id == server_id).get() server_address = (db_server.ip.address, db_server.port) info = {} try: querier = ServerQuerier(server_address, 1) info = querier.get_info() except NoResponseError: pass players = [] try: players = querier.get_players()["players"] except BrokenMessageError: pass except NoResponseError: pass data["id"] = db_server.id for key in info: data[key] = str(info[key]) data["players"] = [] for player in players: player_data = {} for key in player: if type(player[key]) == str: player_data[key] = player[key].encode('utf8') continue player_data[key] = player[key] data["players"].append(player_data) return json.dumps(data)
5,351,565
def atom_hsoc(case, soc): """ Return atomic spin-orbit coupling matrix :math:`\\vec{l}\cdot\\vec{s}` in complex spherical harmonics basis. Parameters ---------- case : str String label indicating atomic shell, - 'p': for :math:`p` -shell. - 't2g': for :math:`t_{2g}` -shell. - 'd': for :math:`d` -shell. - 'f': for :math:`f` -shell. soc : float The strength of spin-orbit coupling. Returns ------- hsoc : 2d complex array The spin-orbit coupling matrix. """ sqrt2 = np.sqrt(2.0) sqrt6 = np.sqrt(6.0) sqrt10 = np.sqrt(10.0) sqrt12 = np.sqrt(12.0) if case.strip() == 'p': hsoc = np.zeros((6, 6), dtype=np.complex128) hsoc[0,0] = -1.0 hsoc[3,0] = sqrt2 hsoc[1,1] = 1.0 hsoc[5,2] = sqrt2 hsoc[0,3] = sqrt2 hsoc[4,4] = 1.0 hsoc[2,5] = sqrt2 hsoc[5,5] = -1.0 return 0.5 * soc * hsoc elif case.strip() == 't2g': hsoc = np.zeros((6, 6), dtype=np.complex128) hsoc[0,0] = -1.0 hsoc[3,0] = sqrt2 hsoc[1,1] = 1.0 hsoc[5,2] = sqrt2 hsoc[0,3] = sqrt2 hsoc[4,4] = 1.0 hsoc[2,5] = sqrt2 hsoc[5,5] = -1.0 return 0.5 * -soc * hsoc elif case.strip() == 'd': hsoc = np.zeros((10, 10), dtype=np.complex128) hsoc[0,0] = -2.0 hsoc[3,0] = 2.0 hsoc[1,1] = 2.0 hsoc[2,2] = -1.0 hsoc[5,2] = sqrt6 hsoc[0,3] = 2.0 hsoc[3,3] = 1.0 hsoc[7,4] = sqrt6 hsoc[2,5] = sqrt6 hsoc[6,6] = 1.0 hsoc[9,6] = 2.0 hsoc[4,7] = sqrt6 hsoc[7,7] = -1.0 hsoc[8,8] = 2.0 hsoc[6,9] = 2.0 hsoc[9,9] = -2.0 return 0.5 * soc * hsoc elif case.strip() == 'f': hsoc = np.zeros((14, 14), dtype=np.complex128) hsoc[0,0 ] = -3.0 hsoc[3,0 ] = sqrt6 hsoc[1,1 ] = 3.0 hsoc[2,2 ] = -2.0 hsoc[5,2 ] = sqrt10 hsoc[0,3 ] = sqrt6 hsoc[3,3 ] = 2.0 hsoc[4,4 ] = -1.0 hsoc[7,4 ] = sqrt12 hsoc[2,5 ] = sqrt10 hsoc[5,5 ] = 1.0 hsoc[9,6 ] = sqrt12 hsoc[4,7 ] = sqrt12 hsoc[8,8 ] = 1.0 hsoc[11,8 ] = sqrt10 hsoc[6,9 ] = sqrt12 hsoc[9,9 ] = -1.0 hsoc[10,10] = 2.0 hsoc[13,10] = sqrt6 hsoc[8,11 ] = sqrt10 hsoc[11,11] = -2.0 hsoc[12,12] = 3.0 hsoc[10,13] = sqrt6 hsoc[13,13] = -3.0 return 0.5 * soc * hsoc else: print("don't support SOC for this case: ", case) return
5,351,566
def wcs_to_celestial_frame(wcs): """ For a given WCS, return the coordinate frame that matches the celestial component of the WCS. Parameters ---------- wcs : :class:`~astropy.wcs.WCS` instance The WCS to find the frame for Returns ------- frame : :class:`~astropy.coordinates.baseframe.BaseCoordinateFrame` subclass instance An instance of a :class:`~astropy.coordinates.baseframe.BaseCoordinateFrame` subclass instance that best matches the specified WCS. Notes ----- To extend this function to frames not defined in astropy.coordinates, you can write your own function which should take a :class:`~astropy.wcs.WCS` instance and should return either an instance of a frame, or `None` if no matching frame was found. You can register this function temporarily with:: >>> from astropy.wcs.utils import wcs_to_celestial_frame, custom_frame_mappings >>> with custom_frame_mappings(my_function): ... wcs_to_celestial_frame(...) """ for mapping_set in WCS_FRAME_MAPPINGS: for func in mapping_set: frame = func(wcs) if frame is not None: return frame raise ValueError("Could not determine celestial frame corresponding to " "the specified WCS object")
5,351,567
def conv2d(x, f=64, k=3, d=1, act=None, pad='SAME', name='conv2d'): """ :param x: input :param f: filters, default 64 :param k: kernel size, default 3 :param d: strides, default 2 :param act: activation function, default None :param pad: padding (valid or same), default same :param name: scope name, default conv2d :return: covn2d net """ return tf.layers.conv2d(x, filters=f, kernel_size=k, strides=d, kernel_initializer=tf.contrib.layers.variance_scaling_initializer(), kernel_regularizer=tf.contrib.layers.l2_regularizer(5e-4), bias_initializer=tf.zeros_initializer(), activation=act, padding=pad, name=name)
5,351,568
def combine_subdomain(filenames, outfilename): """Recombine per-processor subdomain files into one single file. Note: filenames must be an array of files organized to reflect the subdomain decomposition. filenames[0,0] is bottom left, filenames[0,-1] is bottom right, filenames[-1,0] is top left, filenames[-1,-1] is top right. For example, filenames = [[sub_00.nc, sub_01.nc], [sub_02.nc, sub_03.nc]]. This can be improved. At some point it would be worthwhile to automatically build the filenames array. Also, we might want to add netcdf attributes like history, units, etc. :arg filenames: An array containing the names of the files to be recombined. :type filenames: numpy array (2D) :arg outfilename: The name of the file for saving output :type outfilename: string """ # Determine shape of each subdomain shapes = _define_shapes(filenames) # Initialize new = nc.Dataset(outfilename, 'w') _initialize_dimensions(new, nc.Dataset(filenames[0, 0])) newvars = _initialize_variables(new, nc.Dataset(filenames[0, 0])) # Build full array _concatentate_variables(filenames, shapes, newvars) new.close()
5,351,569
def _is_uniform_distributed_cf(cf): """ Check if the provided center frequencies are uniformly distributed. """ return np.any(np.diff(np.diff(cf))!=0)
5,351,570
def do_execfile(user_input): """only exists in PY2""" try: execfile(user_input) # noqa: F821 except Exception: pass
5,351,571
def description_serializer(description, object_, path, linker): """ Serializes the given description. This function is a generator. Parameters ---------- description : ``GravedDescription`` The table to serialize object_ : ``UnitBase`` The respective unit. path : ``QualPath`` Path of the respective object to avoid incorrect link generation in subclasses. linker : `func` Function which creates relative link between two unit. Yields ------ html_part : `str` """ yield '<p>' content = graved_to_escaped(description.content, object_, path, linker) yield content yield '</p>'
5,351,572
def build_person(first_name, last_name): """Return a dictionary of information about a person.""" person = {'first': first_name, 'last': last_name} return person
5,351,573
def test_temp_bad_units(): """Simple check of bad units in temperature""" with pytest.raises(datatypes.UnitsError): datatypes.temperature(-99, "Q")
5,351,574
def radon(image, theta=None): """ Calculates the radon transform of an image given specified projection angles. Parameters ---------- image : array_like, dtype=float Input image. theta : array_like, dtype=float, optional (default np.arange(180)) Projection angles (in degrees). Returns ------- output : ndarray Radon transform (sinogram). """ if image.ndim != 2: raise ValueError('The input image must be 2-D') if theta is None: theta = np.arange(180) height, width = image.shape diagonal = np.sqrt(height**2 + width**2) heightpad = np.ceil(diagonal - height) widthpad = np.ceil(diagonal - width) padded_image = np.zeros((int(height + heightpad), int(width + widthpad))) y0, y1 = int(np.ceil(heightpad / 2)), \ int((np.ceil(heightpad / 2) + height)) x0, x1 = int((np.ceil(widthpad / 2))), \ int((np.ceil(widthpad / 2) + width)) padded_image[y0:y1, x0:x1] = image out = np.zeros((max(padded_image.shape), len(theta))) h, w = padded_image.shape dh, dw = h // 2, w // 2 shift0 = np.array([[1, 0, -dw], [0, 1, -dh], [0, 0, 1]]) shift1 = np.array([[1, 0, dw], [0, 1, dh], [0, 0, 1]]) def build_rotation(theta): T = -np.deg2rad(theta) R = np.array([[np.cos(T), -np.sin(T), 0], [np.sin(T), np.cos(T), 0], [0, 0, 1]]) return shift1.dot(R).dot(shift0) for i in range(len(theta)): rotated = homography(padded_image, build_rotation(-theta[i])) out[:, i] = rotated.sum(0)[::-1] return out
5,351,575
def build_idrac_table_schemas(metric_definitions: list): """build_table_schemas Build iDRAC Table Schemas Build table schemas based on the idrac telemetry metric definitions Args: metric_definitions (list): idrac telemetry metric definitions Returns: dict: iDRAC table schemas """ table_schemas = {} try: for metric in metric_definitions: table_name = metric['Id'] metric_type = metric['MetricDataType'] metric_unit = metric.get('Units', None) # For network metrics, use BIG INT for storing the metric readings if metric_unit == 'By' or metric_unit == 'Pkt': value_type = 'BIGINT' else: value_type = utils.data_type_mapping.get(metric_type, 'TEXT') column_names = ['Timestamp', 'NodeID', 'Source', 'FQDD', 'Value'] column_types = ['TIMESTAMPTZ NOT NULL', 'INT NOT NULL', 'TEXT', \ 'TEXT', value_type] table_schemas.update({ table_name: { 'column_names': column_names, 'column_types': column_types, } }) except Exception as err: log.error(f"Cannot build idrac table schemas: {err}") return table_schemas
5,351,576
def _increasing_randomly_negate_to_arg( level: int, params: Tuple[float, float] ) -> Tuple[float]: """ Convert level to transform magnitude. This assumes transform magnitude increases (or decreases with 50% chance) linearly with level. Args: level (int): Level value. params (Tuple[float, float]): Params contains two values: 1) Base transform magnitude when level is 0; 2) Maxmimum increasing in transform magnitude when level is at maxmimum. """ magnitude = (level / _AUGMENTATION_MAX_LEVEL) * params[1] return (params[0] + _randomly_negate(magnitude),)
5,351,577
def test_column_duplicated(df1): """Raise ValueError if column is duplicated in `columns`""" with pytest.raises(ValueError): df1.complete( columns=[ "Year", "Taxon", {"Year": lambda x: range(x.min().x.max() + 1)}, ] )
5,351,578
def hashify(params, max_length=8): """ Create a short hashed string of the given parameters. :param params: A dictionary of key, value pairs for parameters. :param max_length: [optional] The maximum length of the hashed string. """ param_str = json.dumps(params, separators=(',', ':'), sort_keys=True) param_hash = hashlib.md5(param_str.encode('utf-8')).hexdigest() return param_hash[:max_length]
5,351,579
def main(): """Complete the crudedata collection with missing dates.""" for cow in mongo.cows(): print("Completing cow {}...".format(cow)) dates = mongo.dates(cow) missing = get_missing_dates(dates) if missing: for lact in reversed(mongo.lacts(cow)): # The last date and day in database for this lactation last_date, last_day = get_last(cow, lact) # We start at the last day of the lactation then add the # missing lines till we reach the first day of the lactation while last_day > 1: last_day -= 1 last_date -= dt.timedelta(1) if last_date in missing: # If day = 1, we'll have a problem of unique key in # the second loop missing.remove(last_date) insert(cow, last_date, lact, last_day) last_date, last_day = get_last(cow, lact) # We start at the last day of the lactation then add the # missing lines till we reach the next lactation or the end of # the data if this lactation is the last one while last_date + dt.timedelta(1) in missing: last_day += 1 last_date += dt.timedelta(1) insert(cow, last_date, lact, last_day)
5,351,580
def version(package, encoding='utf-8'): """Obtain the packge version from a python file e.g. pkg/__init__.py See <https://packaging.python.org/en/latest/single_source_version.html>. """ path = os.path.join(os.path.dirname(__file__), package, '__init__.py') with io.open(path, encoding=encoding) as fp: version_info = fp.read() version_match = re.search(r"""^__version__ = ['"]([^'"]*)['"]""", version_info, re.M) if not version_match: raise RuntimeError("Unable to find version string.") return version_match.group(1)
5,351,581
def unique_entries(results): """Prune non-unqiue search results.""" seen = set() clean_results = [] for i in results: if i['code'] not in seen: clean_results.append(i) seen.add(i['code']) return clean_results
5,351,582
def get_q_confidence() -> int: """Get's the user's confidence for the card""" response = input("How confident do you feel about being able to answer this question (from 1-10)? ") if response.isnumeric() & 0 < response <= 10: return int(response) else: print("Incorrect score value, please enter a number from 1 to 10.") # we call the function until it returns the appropriate value get_q_confidence()
5,351,583
def generate_offices_table(offices, by_office, by_polling_center, election_day, day_after_election_day): """ Pre-compute key data needed for generating election day office reports. """ offices_by_key = {str(office['code']): office for office in offices} rows = [] for key in sorted([key for key in by_office.keys()]): row = by_office[key] key = str(key) # copy name from the offices hash array row['english_name'] = offices_by_key[key]['english_name'] row['arabic_name'] = offices_by_key[key]['arabic_name'] on_election_day = row.get(election_day, {}) # get election day numbers row['opened'] = on_election_day.get('opened', 0) row['votes_reported_1'] = on_election_day.get('1', 0) row['votes_reported_2'] = on_election_day.get('2', 0) row['votes_reported_3'] = on_election_day.get('3', 0) # and aggregate counts row['reported_1'] = on_election_day.get('1_count', 0) row['reported_2'] = on_election_day.get('2_count', 0) row['reported_3'] = on_election_day.get('3_count', 0) # check for late results # We only want late reports for period 4. The JSON data has aggregate # numbers for office by day, but you can't tell which of those values are new reports on # EDAY+1 and which ones are replacements for values given on EDAY, so we have to iterate # through each center to get that info row['votes_reported_4'] = 0 reported_4 = 0 # Which polling centers are in this office? centers = {k: v for k, v in by_polling_center.items() if str(v['office_id']) == key} for center_id, center in centers.items(): if day_after_election_day in center and '4' in center[day_after_election_day]: # found a period 4 report on EDAY+1. Sum the votes and increment the report count row['votes_reported_4'] += center[day_after_election_day]['4'] reported_4 += 1 elif election_day in center and '4' in center[election_day]: # didn't find an EDAY+1 report, so use EDAY, if present row['votes_reported_4'] += center[election_day]['4'] reported_4 += 1 row['reported_4'] = reported_4 # save derived values row['not_opened'] = row['polling_center_count'] - row['opened'] row['not_reported_1'] = row['polling_center_count'] - row['reported_1'] row['not_reported_2'] = row['polling_center_count'] - row['reported_2'] row['not_reported_3'] = row['polling_center_count'] - row['reported_3'] row['not_reported_4'] = row['polling_center_count'] - reported_4 row['closed'] = reported_4 # reporting final tally means center closed rows.append(row) return rows
5,351,584
def bang(nick, chan, message, db, conn, notice): """when there is a duck on the loose use this command to shoot it.""" global game_status, scripters if chan in opt_out: return network = conn.name score = "" out = "" miss = ["You just shot yourself in the foot, the duck laughed at you as it flew off.", "WHOOSH! You missed the duck completely!", "Your gun jammed!", "Better luck next time.", "Your barrel must be bent lol, maybe next time!", "Clearly you're using a BB gun, get a real gun and try again!", "Did you just throw a firecracker? Go buy a shotgun and come back!","Wow, Could you be a worse shot?" ] if not game_status[network][chan]['game_on']: return "There is no activehunt right now. Use @starthunt to start a game." elif game_status[network][chan]['duck_status'] != 1: if game_status[network][chan]['no_duck_kick'] == 1: out = "KICK {} {} The last duck was already nabbed, try again with the next duck.".format(chan, nick) conn.send(out) return return "The last duck was already nabbed, try again with the next duck." else: game_status[network][chan]['shoot_time'] = time() deploy = game_status[network][chan]['duck_time'] shoot = game_status[network][chan]['shoot_time'] if nick.lower() in scripters: if scripters[nick.lower()] > shoot: notice("You are in a cool down period, you can try again in {} seconds.".format(str(scripters[nick.lower()] - shoot))) return chance = hit_or_miss(deploy, shoot) if not random.random() <= chance and chance > .05: out = random.choice(miss) + " You can try again in 3 seconds." scripters[nick.lower()] = shoot + 3 return out if chance == .05: out += "You pulled the trigger in {} seconds, that's mighty fast. Are you running a script for this game? Take a 2 hour cool down.".format(str(shoot - deploy)) scripters[nick.lower()] = shoot + 7200 if not random.random() <= chance: return random.choice(miss) + " " + out else: message(out) game_status[network][chan]['duck_status'] = 2 score = db.execute(select([table.c.shot]) \ .where(table.c.network == conn.name) \ .where(table.c.chan == chan.lower()) \ .where(table.c.name == nick.lower())).fetchone() if score: score = score[0] score += 1 dbupdate(nick, chan, db, conn, score, 0) else: score = 1 dbadd_entry(nick, chan, db, conn, score, 0) timer = "{:.3f}".format(shoot - deploy) duck = "duck" if score == 1 else "ducks" message("{} Perfect aim, you shot the duck in {} seconds! You have killed {} {} in {}.".format(nick, timer, score, duck, chan)) set_ducktime(chan, conn)
5,351,585
def is_nan(param: float, param_name: str = None, message=None): """ Guards the specified :param param from not being a nan (not a number) by throwing an exception of type ArgumentException with a specific :param message when the precondition has not been met :param param: The param to be checked :param param_name: The name of the param to be checked, that will be included in the exception :param message: The message that will be included in the exception """ if not param_name: param_name = GenericParameterName if not message: message = Template(template=Templates.NaNMessage).substitute(var=param_name) if not math.isnan(param): raise ArgumentException(message=message)
5,351,586
def print_summary(model, line_length=None, positions=None, print_fn=None, expand_depth=0): """Prints a summary of a model. Args: model: Keras model instance. line_length: Total length of printed lines (e.g. set this to adapt the display to different terminal window sizes). positions: Relative or absolute positions of log elements in each line. If not provided, defaults to `[.33, .55, .67, 1.]`. print_fn: Print function to use. It will be called on each line of the summary. You can set it to a custom function in order to capture the string summary. It defaults to `print` (prints to stdout). """ if print_fn is None: print_fn = print if model.__class__.__name__ == 'Sequential': sequential_like = True elif not model._is_graph_network: # We treat subclassed models as a simple sequence of layers, for logging # purposes. sequential_like = True else: sequential_like = True nodes_by_depth = model._nodes_by_depth.values() nodes = [] for v in nodes_by_depth: if (len(v) > 1) or (len(v) == 1 and len(nest.flatten(v[0].keras_inputs)) > 1): # if the model has multiple nodes # or if the nodes have multiple inbound_layers # the model is no longer sequential sequential_like = False break nodes += v if sequential_like: # search for shared layers for layer in model.layers: flag = False for node in layer._inbound_nodes: if node in nodes: if flag: sequential_like = False break else: flag = True if not sequential_like: break if sequential_like: line_length = line_length or 65 positions = positions or [.45, .85, 1.] if positions[-1] <= 1: positions = [int(line_length * p) for p in positions] # header names for the different log elements to_display = ['Layer (type)', 'Output Shape', 'Param #'] else: line_length = line_length or 98 positions = positions or [.33, .55, .67, 1.] if positions[-1] <= 1: positions = [int(line_length * p) for p in positions] # header names for the different log elements to_display = ['Layer (type)', 'Output Shape', 'Param #', 'Connected to'] relevant_nodes = [] for v in model._nodes_by_depth.values(): relevant_nodes += v def print_row(fields, positions): line = '' for i in range(len(fields)): if i > 0: line = line[:-1] + ' ' line += str(fields[i]) line = line[:positions[i]] line += ' ' * (positions[i] - len(line)) print_fn(line) print_fn('Model: "{}"'.format(model.name)) print_fn('_' * line_length) print_row(to_display, positions) print_fn('=' * line_length) def print_layer_summary(layer): """Prints a summary for a single layer. Args: layer: target layer. """ try: output_shape = layer.output_shape except AttributeError: output_shape = 'multiple' except RuntimeError: # output_shape unknown in Eager mode. output_shape = '?' name = layer.name cls_name = layer.__class__.__name__ if not layer.built and not getattr(layer, '_is_graph_network', False): # If a subclassed model has a layer that is not called in Model.call, the # layer will not be built and we cannot call layer.count_params(). print(f"Subclassed layer: {layer.name}") params = '0 (unused)' else: params = layer.count_params() fields = [name + ' (' + cls_name + ')', output_shape, params] print_row(fields, positions) def print_layer_summary_with_connections(layer): """Prints a summary for a single layer (including topological connections). Args: layer: target layer. """ try: output_shape = layer.output_shape except AttributeError: output_shape = 'multiple' connections = [] for node in layer._inbound_nodes: if relevant_nodes and node not in relevant_nodes: # node is not part of the current network continue for inbound_layer, node_index, tensor_index, _ in node.iterate_inbound(): connections.append('{}[{}][{}]'.format(inbound_layer.name, node_index, tensor_index)) name = layer.name cls_name = layer.__class__.__name__ if not connections: first_connection = '' else: first_connection = connections[0] fields = [ name + ' (' + cls_name + ')', output_shape, layer.count_params(), first_connection ] print_row(fields, positions) if len(connections) > 1: for i in range(1, len(connections)): fields = ['', '', '', connections[i]] print_row(fields, positions) def print_expanded_summary(layer, expand, is_last): if expand > 0 and hasattr(layer, "layers"): for i in range(len(layer.layers)): print_expanded_summary( layer.layers[i], expand-1, is_last and i == len(layers)-1) else: if sequential_like: print_layer_summary(layer) else: print_layer_summary_with_connections(layer) if is_last: print_fn('=' * line_length) else: print_fn('_' * line_length) layers = model.layers for i in range(len(layers)): print_expanded_summary(layers[i], expand_depth, i == len(layers)-1) if hasattr(model, '_collected_trainable_weights'): trainable_count = count_params(model._collected_trainable_weights) else: trainable_count = count_params(model.trainable_weights) non_trainable_count = count_params(model.non_trainable_weights) print_fn('Total params: {:,}'.format( trainable_count + non_trainable_count)) print_fn('Trainable params: {:,}'.format(trainable_count)) print_fn('Non-trainable params: {:,}'.format(non_trainable_count)) print_fn('_' * line_length)
5,351,587
async def test_avg_processing_time(aresponses): """Test requesting AdGuard Home DNS avarage processing time stats.""" aresponses.add( "example.com:3000", "/control/stats", "GET", aresponses.Response( status=200, headers={"Content-Type": "application/json"}, text='{"avg_processing_time": 0.03141}', ), ) async with aiohttp.ClientSession() as session: adguard = AdGuardHome("example.com", session=session) result = await adguard.stats.avg_processing_time() assert result == 31.41
5,351,588
def pfunc_role_coverage(args): """Another intermediate function for parallelization; as for pfunc_doctor_banding.""" rota = args[0] role = args[1] return rota.get_role_coverage(role)
5,351,589
def parse(s: str) -> Tree: """ Parse PENMAN-notation string *s* into its tree structure. Args: s: a string containing a single PENMAN-serialized graph Returns: The tree structure described by *s*. Example: >>> import penman >>> penman.parse('(b / bark-01 :ARG0 (d / dog))') # noqa Tree(('b', [('/', 'bark-01'), (':ARG0', ('d', [('/', 'dog')]))])) """ tokens = lex(s, pattern=PENMAN_RE) return _parse(tokens)
5,351,590
def subtract(v: Vector, w: Vector) -> Vector: """Subtracts corresponding elements""" assert len(v) == len(w), "vectors must be the same length" return [v_i - w_i for v_i, w_i in zip(v, w)]
5,351,591
def get_QUTFish(image_path, train_ratio=0.8): """ get train and test dataset of QUTFish: https://wiki.qut.edu.au/display/cyphy/Fish+Dataset step1: download the dataset step2: set the root to QUT_fish_data/ :param image_path: the QUT_fish_data/ :param the percentage used for training :return: """ # if the images has been scanned before then just load train_images_file = 'data/QUTFish_train_images.npy' train_labels_file = 'data/QUTFish_train_labels.npy' test_images_file = 'data/QUTFish_test_images.npy' test_labels_file = 'data/QUTFish_test_labels.npy' if os.path.exists(train_images_file): print('Found pre-generated train/test lists!') images_train = np.load(train_images_file) labels_train = np.load(train_labels_file) images_val = np.load(test_images_file) labels_val = np.load(test_labels_file) images_train, labels_train = shuffle(images_train, labels_train) return images_train, labels_train, images_val, labels_val # scan the image folder to get the train and test image/label list images = [] labels = [] label_id = 0 # read label and image file list from final_all_index.txt # line format: 1=A73EGS~P=controlled=A73EGS~P_7=7s images_tmp = [] current_class = None with open(os.path.join(image_path, "final_all_index.txt")) as f: for line in f: names = line.split('=') if names[2] != 'insitu': continue if not os.path.exists(os.path.join(image_path, 'images/raw_images/' + names[3] + '.jpg')): continue # print(names) if current_class is None: current_class = int(names[0]) images_tmp.append(os.path.join(image_path, 'images/raw_images/' + names[3] + '.jpg')) else: if current_class == int(names[0]): images_tmp.append(os.path.join(image_path, 'images/raw_images/' + names[3] + '.jpg')) else: if len(images_tmp) > 10: # only save class has >10 images # append this class to dataset labels_tmp = np.ones(len(images_tmp))*label_id images.extend(images_tmp) labels.extend(labels_tmp.astype(np.int8).tolist()) label_id += 1 print('Dataset [QUTFish]: #class=%s, #sample=%s' % (label_id, len(images_tmp))) # move on to next class current_class = int(names[0]) images_tmp = [] images_tmp.append(os.path.join(image_path, 'images/raw_images/' + names[3] + '.jpg')) print('QUT: #classes: ', label_id, ', #images: ', len(images)) images_train, labels_train, images_val, labels_val = train_val_split(images, labels, train_ratio) # save the indexes to files np.save(train_images_file, np.asarray(images_train)) np.save(train_labels_file, np.asarray(labels_train)) np.save(test_images_file, np.asarray(images_val)) np.save(test_labels_file, np.asarray(labels_val)) # random shuffle images_train, labels_train = shuffle(images_train, labels_train) return images_train, labels_train, images_val, labels_val
5,351,592
def test_unknown_tag_code(event, glsc): """if unknown tag code is somehow selected, a meaningful error message should be returned. """ choices = get_event_model_form_choices(event) # make sure cwt is a valid fin clip choice: event_dict = create_event_dict(event) event_dict["fish_tags"] = ["XX"] form = StockingEventForm(event_dict, choices=choices, user=glsc) status = form.is_valid() assert status is False error_message = form.errors["fish_tags"] expected = "Select a valid choice. XX is not one of the available choices." assert expected in error_message
5,351,593
def getWordScore(word, n): """ Returns the score for a word. Assumes the word is a valid word. The score for a word is the sum of the points for letters in the word, multiplied by the length of the word, PLUS 50 points if all n letters are used on the first turn. Letters are scored as in Scrabble; A is worth 1, B is worth 3, C is worth 3, D is worth 2, E is worth 1, and so on (see SCRABBLE_LETTER_VALUES) word: string (lowercase letters) n: integer (HAND_SIZE; i.e., hand size required for additional points) returns: int >= 0 """ result = 0 bonus = 0 if len(word) == n: bonus = 50 for letter in word: result += SCRABBLE_LETTER_VALUES[letter] result *= len(word) result += bonus return result
5,351,594
def test_02(): """Test""" args = '--seed 1 --kmer_size 4 --num_words 5 --max_word 8' out = getoutput('{} {} {}'.format(prg, args, dickinson)) expected = """ 1: miled 2: iliar 3: noticin 4: venture 5: nelled """.strip() assert out.strip() == expected
5,351,595
def resolve_label(token, symbols, memory): """Given a label, resolve it in to an address and save to symbol table.""" symbols.add(token, memory.get_address())
5,351,596
def AutomateMe(data, sslSocket): """ This is where you automate me. :param data: data that came in [str] :param sslSocket: Client SSL Socket Object [ssl wrapper socket object] :return: None """ # Caught it! # Put your automation here. Notice if you have a lot of computation you might # want to go back to line 48 and put this as a thread so not to get a # timeout on the client side or backlog other clients here. return
5,351,597
def get_commit(): """ Try to return the intended commit / release to deal with. Otherwise raise an acceptable error. 1) it was specified on the command line 2) use the current branch in the target repo """ commit = getattr(env, 'commit', None) or rev_parse('HEAD') if commit is None: raise RuntimeError( 'Unable to ascertain target commit from command line or git repo') return commit
5,351,598
def main(): """ The main function of our asteroids game. """ # Optional: Ask for difficulty in terminal try: diff = int(input("Choose your difficulty! 1: easy, 2: medium, 3: hard. " )) if type(diff) != int: raise ValueError except ValueError: diff = 1 print("That was no integer. You now play on default: easy!") # Initialize pygame pygame.init() # Fix FPS clock = pygame.time.Clock() FPS = 60 # Gameloop variables shoot_count = 0 time_count = 0 lives = 3 score = 0 game_ended = False # Create game window screen screen = pygame.display.set_mode((WIDTH,HEIGHT)) # Game title pygame.display.set_caption("Asteroids") # Asteroid icon: https://www.flaticon.com/free-icon/meteorite_4260653?term=asteroids&related_id=4260653 img = pygame.image.load("images/meteorite.png") pygame.display.set_icon(img) # Background: https://wallpapertag.com/wallpaper/full/a/5/b/547899-large-star-sky-wallpaper-3100x1740-4k.jpg bg = pygame.image.load("images/star_sky.jpg") bg = pygame.transform.scale(bg, (WIDTH, HEIGHT)) # Score and lives display font_over = pygame.font.SysFont("Georgia", 60) font_score = pygame.font.SysFont("Georgia", 30) font_lives = pygame.font.SysFont("Georgia", 15) again_display = font_score.render("Press 'r' to restart.", True, (255,255,255)) # Setup sprite groups for use in collision detection objects = pygame.sprite.Group() bullets = pygame.sprite.Group() enemies = pygame.sprite.Group() # Instantiate player object player = P.Player() objects.add(player) # Main game loop running = True while running: for event in pygame.event.get(): if event.type == pygame.QUIT: running = False # Get keys for input checks keys = pygame.key.get_pressed() # Shooting only possible three times per second if shoot_count >= FPS//3: # Shoot on 'space' if keys[pygame.K_SPACE]: # Spawn bullet from player and add to sprite groups for collision bullet = B.Bullet(player) objects.add(bullet) bullets.add(bullet) shoot_count = 0 # Spawn enemies with random size every 3 seconds if time_count >= FPS*3: scale = sample([50, 100, 150], k=1)[0] enemy = E.Enemy(diff, scale) objects.add(enemy) enemies.add(enemy) time_count = 0 # Collision detection for death (player,enemy) and score (bullet,enemy) for enemy in enemies: if pygame.sprite.collide_rect(player, enemy): enemy.kill() lives -= 1 if lives == 0: # destroy every object for obj in enemies: obj.kill() game_ended = True for bullet in bullets: if pygame.sprite.collide_rect(bullet, enemy): score += 10 bullet.kill() # If an enemy is shot, spawn two new, smaller enemies if enemy.scale == 150: e1 = E.Enemy(diff, 100) e2 = E.Enemy(diff, 100) e1.pos_x, e1.pos_y = enemy.pos_x, enemy.pos_y e2.pos_x, e2.pos_y = enemy.pos_x+50, enemy.pos_y+50 enemies.add(e1) enemies.add(e2) objects.add(e1) objects.add(e2) enemy.kill() elif enemy.scale == 100: e1 = E.Enemy(diff, 50) e2 = E.Enemy(diff, 50) e1.pos_x, e1.pos_y = enemy.pos_x, enemy.pos_y e2.pos_x, e2.pos_y = enemy.pos_x+25, enemy.pos_y+25 enemies.add(e1) enemies.add(e2) objects.add(e1) objects.add(e2) enemy.kill() else: enemy.kill() score += 40 # If game is over, wait for player to restart or quit the game if game_ended: # Draw background and gameover display screen.blit(bg, (0, 0)) display = "GAME OVER! Your score is " + str(score) + "." gameover_display = font_over.render(display, True, (255,255,255)) screen.blit(gameover_display, (WIDTH/2 - gameover_display.get_width()/2, HEIGHT/2 - gameover_display.get_height()/2)) screen.blit(again_display, (WIDTH/2 - again_display.get_width()/2, HEIGHT/2 - again_display.get_height()/2 + gameover_display.get_height())) # Restart on 'r' if keys[pygame.K_r]: score = 0 lives = 3 player.pos_x, player.pos_y = WIDTH//2, HEIGHT//2 player.rect = player.img.get_rect(center=(player.pos_x, player.pos_y)) screen.blit(player.img, player.rect) game_ended = False else: # Draw background image onto screen at top left corner screen.blit(bg, (0, 0)) score_display = font_score.render(str(score), True, (255,255,255)) display = "Lives: " + str(lives) lives_display = font_lives.render(display, True, (255,255,255)) # Update objects and draw on screen for obj in objects: obj.move() screen.blit(obj.img, obj.rect) # Draw score and lives onto screen screen.blit(score_display, (10, 10)) screen.blit(lives_display, (10, 40)) # Increase time vars every update clock.tick(FPS) time_count += 1 shoot_count += 1 # Update display every frame pygame.display.update() # Quit the game after application is not 'running' anymore pygame.quit()
5,351,599