content
stringlengths 22
815k
| id
int64 0
4.91M
|
---|---|
def bittensor_dtype_to_torch_dtype(bdtype):
""" Translates between bittensor.dtype and torch.dtypes.
Args:
bdtype (bittensor.dtype): bittensor.dtype to translate.
Returns:
dtype: (torch.dtype): translated torch.dtype.
"""
if bdtype == bittensor.proto.DataType.FLOAT32:
dtype = torch.float32
elif bdtype == bittensor.proto.DataType.FLOAT64:
dtype = torch.float64
elif bdtype == bittensor.proto.DataType.INT32:
dtype = torch.int32
elif bdtype == bittensor.proto.DataType.INT64:
dtype = torch.int64
else:
raise DeserializationException(
'Unknown bittensor.Dtype or no equivalent torch.dtype for bittensor.dtype = {}'
.format(bdtype))
return dtype
| 5,350,400 |
def read_all(dataset, table):
"""Read all data from the API, convert to pandas dataframe"""
return _read_from_json(
CFG.path.replace("data", dataset=dataset, table=table, converter="path")
)
| 5,350,401 |
def compute_spectrum_welch(sig, fs, avg_type='mean', window='hann',
nperseg=None, noverlap=None,
f_range=None, outlier_percent=None):
"""Compute the power spectral density using Welch's method.
Parameters
-----------
sig : 1d or 2d array
Time series.
fs : float
Sampling rate, in Hz.
avg_type : {'mean', 'median'}, optional
Method to average across the windows:
* 'mean' is the same as Welch's method, taking the mean across FFT windows.
* 'median' uses median across FFT windows instead of the mean, to minimize outlier effects.
window : str or tuple or array_like, optional, default: 'hann'
Desired window to use. See scipy.signal.get_window for a list of available windows.
If array_like, the array will be used as the window and its length must be nperseg.
nperseg : int, optional
Length of each segment, in number of samples.
If None, and window is str or tuple, is set to 1 second of data.
If None, and window is array_like, is set to the length of the window.
noverlap : int, optional
Number of points to overlap between segments.
If None, noverlap = nperseg // 8.
f_range : list of [float, float], optional
Frequency range to sub-select from the power spectrum.
outlier_percent : float, optional
The percentage of outlier values to be removed. Must be between 0 and 100.
Returns
-------
freqs : 1d array
Frequencies at which the measure was calculated.
spectrum : 1d or 2d array
Power spectral density.
Examples
--------
Compute the power spectrum of a simulated time series using Welch's method:
>>> from neurodsp.sim import sim_combined
>>> sig = sim_combined(n_seconds=10, fs=500,
... components={'sim_powerlaw': {}, 'sim_oscillation': {'freq': 10}})
>>> freqs, spec = compute_spectrum_welch(sig, fs=500)
"""
# Calculate the short time Fourier transform with signal.spectrogram
nperseg, noverlap = check_spg_settings(fs, window, nperseg, noverlap)
freqs, _, spg = spectrogram(sig, fs, window, nperseg, noverlap)
# Throw out outliers if indicated
if outlier_percent is not None:
spg = discard_outliers(spg, outlier_percent)
# Average across windows
spectrum = get_avg_func(avg_type)(spg, axis=-1)
# Trim spectrum, if requested
if f_range:
freqs, spectrum = trim_spectrum(freqs, spectrum, f_range)
return freqs, spectrum
| 5,350,402 |
def _function_set_name(f):
"""
return the name of a function (not the module)
@param f function
@return name
.. versionadded:: 1.1
"""
name = f.__name__
return name.split(".")[-1]
| 5,350,403 |
def better_event_loop(max_fps=100):
"""A simple event loop that schedules draws."""
td = 1 / max_fps
while update_glfw_canvasses():
# Determine next time to draw
now = perf_counter()
tnext = math.ceil(now / td) * td
# Process events until it's time to draw
while now < tnext:
glfw.wait_events_timeout(tnext - now)
now = perf_counter()
| 5,350,404 |
def measure_step(max_steps, multiplier):
"""
Measure the performance of STEP
:param max_steps:
:param multiplier:
:return:
"""
resp = requests.post(f"{API_URL_BASE}/ic", json={"filename": "TEST.scn"})
assert resp.status_code == 200, "Expected the scenario to be loaded"
resp = requests.post(f"{API_URL_BASE}/dtmult", json={"multiplier": multiplier})
assert resp.status_code == 200, "Expected DTMULT to be set"
times = []
n_steps = 0
try:
while True:
print(f"Step {n_steps + 1}")
start = time.time()
resp = requests.post(f"{API_URL_BASE}/step")
assert resp.status_code == 200, "Expected the simulation was stepped"
times.append(time.time() - start)
n_steps += 1
if n_steps >= max_steps:
break
except KeyboardInterrupt:
print("Cancelled")
if not times:
print("No data collected")
return
print(f"times: {times}")
print(f"Mean step time {sum(times)/float(len(times)):.2f}s (dtmult={multiplier})")
| 5,350,405 |
def _notes_from_paths(
paths: Sequence[Path],
wiki_name: str,
callback: Optional[Callable[[int, int], None]]) -> Set[TwNote]:
"""
Given an iterable of paths, compile the notes found in all those tiddlers.
:param paths: The paths of the tiddlers to generate notes for.
:param wiki_name: The name/id of the wiki these notes are from.
:param callback: Optional callable passing back progress. See :func:`find_notes`.
:return: A set of all the notes found in the tiddler files passed.
"""
notes = set()
for index, tiddler in enumerate(paths, 0):
with open(tiddler, 'rb') as f:
tid_text = f.read().decode()
tid_name = tiddler.name[:tiddler.name.find(f".{RENDERED_FILE_EXTENSION}")]
notes.update(_notes_from_tiddler(tid_text, wiki_name, tid_name))
if callback is not None and not index % 50:
callback(index+1, len(paths))
if callback is not None:
callback(len(paths), len(paths))
return notes
| 5,350,406 |
def load_matrix(file_matrix, V):
"""load matrix
:param file_matrix: path of pre-trained matrix (output file)
:param V: vocab size
:return: matrix(list)
"""
matrix = [[0 for _ in range(V)] for _ in range(V)]
with open(file_matrix) as fp:
for line in fp:
target_id, context_id_values = line.strip().split("\t")
context_id_values = context_id_values.split()
for context_id_value in context_id_values:
context_id, value = context_id_value.split(":")
matrix[int(target_id)][int(context_id)] += float(value)
return matrix
| 5,350,407 |
def add(*args):
"""Adding list of values"""
return sum(args)
| 5,350,408 |
def test_ion_fraction_field_is_from_on_disk_fields():
"""
Test to add various ion fields to Enzo dataset and slice on them
"""
ds = load(ISO_GALAXY)
add_ion_fields(ds, ['H'], ftype='gas')
ad = ds.all_data()
# Assure that a sampling of fields are added and can be sliced
arr1 = ad['H_p0_ion_fraction']
arr2 = ad['H_p0_number_density'] / ad['H_nuclei_density']
assert_array_rel_equal(arr1, arr2, decimals=15)
| 5,350,409 |
def get_chart_dates(df, start_date=None, end_date=None, utc=True, auto_start=None, auto_end=None):
"""
Get dates for chart functions.
More info on date string formats at: https://strftime.org/
Parameters:
df : The dataframe for the chart, needed to acertain start and end dates, if none are provided.
start_date : The start date for the entire series to be contained in the chart (start of max range).
end_date : The end date for the entire series to be contained in the chart (end of max range).
auto_start : The start of the default range to display on charts, until a user clicks a differnt range.
auto_end : The end of the default range to display on charts, until a user clicks a differnt range.
"""
if utc:
utc_now = pytz.utc.localize(datetime.utcnow())
utc_now.isoformat()
utc_td_dmy_str = utc_now.strftime("%d-%m-%Y")
utc_td_ymd_str = utc_now.strftime('%Y-%m-%d')
t = utc_now
t_dmy_str = utc_td_dmy_str
t_ymd_str = utc_td_ymd_str
elif not utc:
now = datetime.now()
td_dmy_str = now.strftime("%d-%m-%Y")
td_ymd_str = now.strftime('%Y-%m-%d')
t = now
t_dmy_str = td_dmy_str
t_ymd_str = td_ymd_str
# End date:
if end_date == None:
end = df.index.max()
chart_end = end.strftime("%d-%m-%Y")
elif (end_date != None) and (isinstance(end_date, str)):
end = datetime.strptime(end_date, '%Y-%m-%d')
chart_end = end.strftime("%d-%m-%Y")
elif (end_date != None) and (type(end_date) == datetime):
end = end_date
chart_end = end.strftime("%d-%m-%Y")
elif (end_date != None) and (type(end_date) == date):
end = end_date
chart_end = end.strftime("%d-%m-%Y")
elif isinstance(end_date, pd.Timestamp):
end = pd.to_datetime(end_date)
chart_end = end.strftime("%d-%m-%Y")
# Start date:
if start_date == None:
start = df.index.min()
chart_start = start.strftime("%d-%m-%Y")
elif (end_date != None) and (isinstance(end_date, str)):
end = datetime.strptime(end_date, '%Y-%m-%d')
chart_end = end.strftime("%d-%m-%Y")
elif (end_date != None) and (type(end_date) == datetime):
end = end_date
chart_end = end.strftime("%d-%m-%Y")
elif (end_date != None) and (type(end_date) == date):
end = end_date
chart_end = end.strftime("%d-%m-%Y")
elif isinstance(end_date, pd.Timestamp):
end = pd.to_datetime(end_date)
chart_end = end.strftime("%d-%m-%Y")
# Auto end
if auto_end == None:
auto_end = t_ymd_str
elif auto_end == 'yst':
at_end = t - timedelta(days=1)
auto_end = at_end.strftime('%Y-%m-%d')
elif (auto_end != None) and (isinstance(auto_end, str)):
at_end = datetime.strptime(auto_end, '%Y-%m-%d')
auto_end = at_end.strftime('%Y-%m-%d')
elif (auto_end != None) and (type(auto_end) == datetime):
at_end = auto_end
auto_end = at_end.strftime('%Y-%m-%d')
elif (auto_end != None) and (type(auto_end) == date):
at_end = auto_end
auto_end = at_end.strftime('%Y-%m-%d')
elif isinstance(auto_end, pd.Timestamp):
at_end = pd.to_datetime(auto_end)
auto_end = at_end.strftime('%Y-%m-%d')
# Auto start
if auto_start == None or auto_start == 'ytd':
at_st = first_day_of_current_year(time=False, utc=False)
auto_start = at_st.strftime('%Y-%m-%d')
elif auto_start == '1yr':
at_st = t - timedelta(days=365)
auto_start = at_st.strftime('%Y-%m-%d')
elif (auto_start != None) and (isinstance(auto_start, str)):
at_start = datetime.strptime(auto_start, '%Y-%m-%d')
auto_start = at_start.strftime('%Y-%m-%d')
elif (auto_start != None) and (type(auto_start) == datetime):
at_start = auto_start
auto_start = at_start.strftime('%Y-%m-%d')
elif (auto_start != None) and (type(auto_start) == date):
at_start = auto_start
auto_start = at_start.strftime('%Y-%m-%d')
elif isinstance(auto_start, pd.Timestamp):
at_start = pd.to_datetime(auto_start)
auto_start = at_start.strftime('%Y-%m-%d')
return chart_start, chart_end, auto_start, auto_end
| 5,350,410 |
def run_benchmark_suite(analyser, suite, verbose, debug, timeout, files, bench):
""" Run an analyzer (like Mythril) on a benchmark suite.
:param analyser: BaseAnalyser child instance
:param suite: Name of test suite
:param verbose: Verbosity
:param debug: Whether debug is on
:param timeout: Test execution timeout
:param files: When True, prints list of solidity files and exits
:param bench: When not None, gives a list of solidity files to filter on
:return:
"""
print("Using {} {}".format(analyser.get_name(), analyser.version))
testsuite_conf = get_benchmark_yaml(project_root_dir, suite, analyser.get_name(), debug)
benchmark_files = gather_benchmark_files(code_root_dir, suite,
testsuite_conf['benchmark_subdir'])
if not benchmark_files:
print("No benchmark files found in suite {}".format(suite))
return 1
if bench:
benchmark_files = [path for path in benchmark_files
if basename(path) in bench]
if not benchmark_files:
print("No benchmark files found in suite {} after filtering {}"
.format(suite, bench))
return 1
out_data = {
'analyzer': analyser.get_name(),
'date': time.strftime('%Y-%m-%d %H:%M:%S', time.localtime()),
'version': analyser.version,
'suite': testsuite_conf['suite'],
}
for field in 'benchmark_subdir benchmark_link benchmark_url_dir'.split():
out_data[field] = testsuite_conf[field]
# Zero counters
unconfigured = invalid_execution = error_execution = 0
ignored_benchmarks = unfound_issues = benchmarks = 0
timed_out = expected = 0
total_time = 0.0
out_data['benchmark_files'] = benchmark_files
if files:
print("Benchmark suite {} contains {} files:".format(suite, len(benchmark_files)))
for bench_name in benchmark_files:
print("\t", bench_name)
sys.exit(0)
print("Running {} benchmark suite".format(suite))
out_data['benchmarks'] = {}
for sol_file in benchmark_files:
benchmarks += 1
print('-' * 40)
print("Checking {}".format(sol_file))
# Generate path to solidity file
sol_path = Path(sol_file)
test_name = str(sol_path.parent / sol_path.stem)
# Read expected data and initialize output variables
expected_data = testsuite_conf.get(test_name, None)
run_opts = expected_data.get('options', [])
bench_data = out_data['benchmarks'][test_name] = {}
if expected_data:
bench_data['bug_type'] = expected_data.get('bug_type', 'Unknown')
bench_data['expected_data'] = expected_data
run_time = expected_data.get('run_time', timeout)
if expected_data.get('ignore', None):
# Test case ignored
print('Benchmark "{}" marked for ignoring; reason: {}'
.format(test_name, expected_data['reason']))
ignored_benchmarks += 1
bench_data['result'] = 'Ignored'
bench_data['elapsed_str'] = 'ignored'
continue
elif timeout < run_time:
# When the code is too long, we skip it in the YAML
print('Benchmark "{}" skipped because it is noted to take a long time; '
'{} seconds'
.format(test_name, run_time))
ignored_benchmarks += 1
bench_data['result'] = 'Too Long'
bench_data['elapsed_str'] = secs_to_human(run_time)
continue
try:
res = analyser.run_test(sol_file, run_opts)
except AnalyserError as e:
print("{} invocation:\n\t{}\n failed with return code {}.\n\tError: {}"
.format(analyser.get_name(), e.cmd, e.returncode, str(e)))
invalid_execution += 1
bench_data['elapsed_str'] = 'errored'
bench_data['result'] = 'Errored'
bench_data['execution_returncode'] = e.returncode
continue
except AnalyserTimeoutError as e:
elapsed_str = secs_to_human(e.elapsed)
print('Benchmark "{}" timed out after {}'.format(test_name, elapsed_str))
timed_out += 1
bench_data['elapsed'] = e.elapsed
bench_data['elapsed_str'] = elapsed_str
bench_data['execution_returncode'] = 0
bench_data['result'] = 'Timed Out'
continue
elapsed_str = secs_to_human(res.elapsed)
bench_data['elapsed'] = res.elapsed
bench_data['elapsed_str'] = elapsed_str
bench_data['execution_returncode'] = 0
total_time += res.elapsed
print(elapsed_str)
if not expected_data:
unconfigured += 1
bench_data['result'] = 'Unconfigured'
print('Benchmark "{}" results not configured, '
'so I cannot pass judgement on this'.format(test_name))
pp.pprint(res.issues)
print("=" * 30)
if unconfigured > 5:
break
continue
if res.failed:
print('Benchmark "{}" errored'.format(test_name))
bench_data['result'] = 'Unconfigured'
bench_data['error'] = res.error
print(bench_data['error'])
error_execution += 1
continue
bench_data['issues'] = res.issues
if not res.issues:
if (not expected_data['has_bug']) or expected_data['has_bug'] == 'benign':
print("No problems found and none expected")
bench_data['result'] = 'True Negative'
expected += 1
continue
else:
print("No problems found when issues were expected")
bench_data['result'] = 'False Negative'
error_execution += 1
continue
else:
if not expected_data['has_bug']:
print("Found a problem where none was expected")
bench_data['result'] = 'False Positive'
error_execution += 1
elif expected_data['has_bug'] == 'benign':
print("Found a benign problem")
bench_data['result'] = 'Benign'
expected += 1
continue
# The test has a bug, and analysis terminated normally
# finding some sort of problem. Did we detect the right problem?
expected_issues = expected_data.get('issues', [])
if len(expected_issues) != len(res.issues):
print("Expecting to find {} issue(s), got {}"
.format(len(expected_issues), len(res.issues)))
bench_data['result'] = 'Wrong Vulnerability'
error_execution += 1
pp.pprint(res.issues)
print("=" * 30)
continue
unfound_issues = res.compare_issues(test_name, expected_issues)
benchmark_success = unfound_issues == 0
bench_data['benchmark_success'] = benchmark_success
bench_data['result'] = 'True Positive'
if benchmark_success:
expected += 1
print('Benchmark "{}" checks out'.format(test_name))
if verbose:
for num, issue in enumerate(res.issues):
print(" Issue {1}. {2} {0[title]} "
"at address {0[address]}:\n\t{0[code]}"
.format(issue, bench_data['bug_type'], num))
print('-' * 40)
print("\nSummary: {} benchmarks; {} expected results, {} unconfigured, {} aborted abnormally, "
"{} unexpected results, {} timed out, {} ignored.\n"
.format(benchmarks, expected, unconfigured, invalid_execution, error_execution,
timed_out, ignored_benchmarks))
total_time_str = secs_to_human(total_time)
out_data['total_time'] = total_time
out_data['total_time_str'] = secs_to_human(total_time)
print("Total elapsed execution time: {}".format(total_time_str))
for field in """expected unconfigured invalid_execution error_execution
timed_out ignored_benchmarks""".split():
out_data[field] = locals()[field]
out_data['total_time'] = total_time
out_data['benchmark_count'] = benchmarks
benchdir = code_root_dir.parent / 'benchdata' / suite
os.makedirs(benchdir, exist_ok=True)
with open(benchdir / (analyser.get_name() + '.yaml'), 'w') as fp:
yaml.dump(out_data, fp)
| 5,350,411 |
def Eip1(name, ospaces, index_key=None):
"""
Return the tensor representation of a Fermion ionization
name (string): name of the tensor
ospaces (list): list of occupied spaces
"""
terms = []
for os in ospaces:
i = Idx(0, os)
sums = [Sigma(i)]
tensors = [Tensor([i], name)]
operators = [FOperator(i, False)]
e1 = Term(1, sums, tensors, operators, [], index_key=index_key)
terms.append(e1)
return Expression(terms)
| 5,350,412 |
def load_rtma_data(rtma_data, bbox):
"""
Load relevant RTMA fields and return them
:param rtma_data: a dictionary mapping variable names to local paths
:param bbox: the bounding box of the data
:return: a tuple containing t2, rh, lats, lons
"""
gf = GribFile(rtma_data['temp'])[1]
lats, lons = gf.latlons()
# bbox format: minlat, minlon, maxlat, maxlon
i1, i2, j1, j2 = find_region_indices(lats, lons, bbox[0], bbox[2], bbox[1], bbox[3])
t2 = np.ma.array(gf.values())[i1:i2,j1:j2] # temperature at 2m in K
td = np.ma.array(GribFile(rtma_data['td'])[1].values())[i1:i2,j1:j2] # dew point in K
precipa = np.ma.array(GribFile(rtma_data['precipa'])[1].values())[i1:i2,j1:j2] # precipitation
hgt = np.ma.array(GribFile('static/ds.terrainh.bin')[1].values())[i1:i2,j1:j2]
logging.info('t2 min %s max %s' % (np.min(t2),np.max(t2)))
logging.info('td min %s max %s' % (np.min(td),np.max(td)))
logging.info('precipa min %s max %s' % (np.min(precipa),np.max(precipa)))
logging.info('hgt min %s max %s' % (np.min(hgt),np.max(hgt)))
# compute relative humidity
rh = 100*np.exp(17.625*243.04*(td - t2) / (243.04 + t2 - 273.15) / (243.0 + td - 273.15))
return td, t2, rh, precipa, hgt, lats[i1:i2,j1:j2], lons[i1:i2,j1:j2]
| 5,350,413 |
def print_message(message):
"""
log the given message to to stdout on any
modules that are being used for the current test run.
"""
for rest_endpoint in _get_rest_endpoints():
thread = pool.apply_async(
rest_endpoint.log_message, ({"message": "PYTEST: " + message},)
)
try:
thread.get(print_message_timeout)
except TimeoutError:
print("PYTEST: timeout logging to " + str(rest_endpoint))
# swallow this exception. logs are allowed to fail (especially if we're testing disconnection scenarios)
| 5,350,414 |
def serialize_remote_exception(failure_info, log_failure=True):
"""Prepares exception data to be sent over rpc.
Failure_info should be a sys.exc_info() tuple.
"""
tb = traceback.format_exception(*failure_info)
failure = failure_info[1]
if log_failure:
LOG.error(_("Returning exception %s to caller"),
six.text_type(failure))
LOG.error(tb)
kwargs = {}
if hasattr(failure, 'kwargs'):
kwargs = failure.kwargs
# NOTE(matiu): With cells, it's possible to re-raise remote, remote
# exceptions. Lets turn it back into the original exception type.
cls_name = str(failure.__class__.__name__)
mod_name = str(failure.__class__.__module__)
if (cls_name.endswith(_REMOTE_POSTFIX) and
mod_name.endswith(_REMOTE_POSTFIX)):
cls_name = cls_name[:-len(_REMOTE_POSTFIX)]
mod_name = mod_name[:-len(_REMOTE_POSTFIX)]
data = {
'class': cls_name,
'module': mod_name,
'message': six.text_type(failure),
'tb': tb,
'args': failure.args,
'kwargs': kwargs
}
json_data = jsonutils.dumps(data)
return json_data
| 5,350,415 |
def test_atomic_time_enumeration_nistxml_sv_iv_atomic_time_enumeration_1_5(mode, save_output, output_format):
"""
Type atomic/time is restricted by facet enumeration.
"""
assert_bindings(
schema="nistData/atomic/time/Schema+Instance/NISTSchema-SV-IV-atomic-time-enumeration-1.xsd",
instance="nistData/atomic/time/Schema+Instance/NISTXML-SV-IV-atomic-time-enumeration-1-5.xml",
class_name="NistschemaSvIvAtomicTimeEnumeration1",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
| 5,350,416 |
def combine_arg_list_opts(opt_args):
"""Helper for processing arguments like impalad_args. The input is a list of strings,
each of which is the string passed into one instance of the argument, e.g. for
--impalad_args="-foo -bar" --impalad_args="-baz", the input to this function is
["-foo -bar", "-baz"]. This function combines the argument lists by tokenised each
string into separate arguments, if needed, e.g. to produce the output
["-foo", "-bar", "-baz"]"""
return list(itertools.chain(*[shlex.split(arg) for arg in opt_args]))
| 5,350,417 |
def in_data():
"""Na funçao `in_data` é tratado os dados da matriz lida do arquivo txt."""
points = {}
i, j = map(int, file.readline().split(' '))
for l in range(i):
line = file.readline().split(' ')
if len(line)==j:
for colun in range(len(line)):
if line[colun].find("\n")!= -1:
line[colun] = line[colun][-2]
if line[colun] not in '0' :
points[line[colun]] = (l, colun)
else:
raise ValueError('Incosistence number of coluns in line. ')
return points
| 5,350,418 |
def doom_action_space_extended():
"""
This function assumes the following list of available buttons:
TURN_LEFT
TURN_RIGHT
MOVE_FORWARD
MOVE_BACKWARD
MOVE_LEFT
MOVE_RIGHT
ATTACK
"""
space = gym.spaces.Tuple((
Discrete(3), # noop, turn left, turn right
Discrete(3), # noop, forward, backward
Discrete(3), # noop, strafe left, strafe right
Discrete(2), # noop, attack
))
return space
| 5,350,419 |
def svn_auth_provider_invoke_first_credentials(*args):
"""
svn_auth_provider_invoke_first_credentials(svn_auth_provider_t _obj, void provider_baton, apr_hash_t parameters,
char realmstring, apr_pool_t pool) -> svn_error_t
"""
return _core.svn_auth_provider_invoke_first_credentials(*args)
| 5,350,420 |
def test_vstart_without_rmq_init(request, instance):
"""
Test error where volttron is started with message bus as rmq but without
any certs
:param request: pytest request object
:parma instance: volttron instance for testing
"""
try:
assert instance.instance_name == os.path.basename(instance.volttron_home), \
"instance name doesn't match volttron_home basename"
os.rename(
os.path.join(instance.volttron_home, "certificates"),
os.path.join(instance.volttron_home, "certs_backup")
)
try:
instance.startup_platform(vip_address=get_rand_vip())
pytest.fail("Instance should not start without certs, but it does!")
except Exception as e:
assert str(e).startswith("Platform startup failed. Please check volttron.log")
assert not (instance.is_running())
except Exception as e:
pytest.fail("Test failed with exception: {}".format(e))
| 5,350,421 |
def test_led_state() -> None:
"""Test the state property of an LED."""
led = LED(0, MockLEDDriver())
led.state = True
assert led.state
| 5,350,422 |
def kill_process(device, process="tcpdump", pid=None, sync=True, port=None):
"""Kill any active process
:param device: lan or wan
:type device: Object
:param process: process to kill, defaults to tcpdump
:type process: String, Optional
:param pid: process id to kill, defaults to None
:type pid: String, Optional
:param sync: Marked False if sync should not be executed;defaults to True
:type sync: Boolean,optional
:param port: port number to kill
:type port: int
:return: Console output of sync sendline command after kill process
:rtype: string
"""
if pid:
device.sudo_sendline("kill %s" % pid)
elif port:
device.sudo_sendline(r"kill $(lsof -t -i:%s)" % str(port))
else:
device.sudo_sendline("killall %s" % process)
device.expect(device.prompt)
if sync:
device.sudo_sendline("sync")
retry_on_exception(device.expect, (device.prompt,), retries=5, tout=60)
return device.before
| 5,350,423 |
def get_sentence(soup, ets_series, cache, get_verb=False):
"""
Given an ETS example `ets_series`, find the corresponding fragment, and
retrieve the sentence corresponding to the ETS example.
"""
frg = load_fragment(soup, ets_series.text_segment_id, cache)
sentence = frg.find('s', {'n': ets_series.sentence_number})
if get_verb:
tokenized, raw_tokens = tokenize_vuamc(sentence, raw=True)
# Offset starts from 1
verb = raw_tokens[ets_series['word_offset'] - 1].lower()
return tokenized, raw_tokens, verb
tokenized, raw_tokens = tokenize_vuamc(sentence, raw=True)
return tokenized, raw_tokens
| 5,350,424 |
def PreAuiNotebook(*args, **kwargs):
"""PreAuiNotebook() -> AuiNotebook"""
val = _aui.new_PreAuiNotebook(*args, **kwargs)
val._setOORInfo(val)
return val
| 5,350,425 |
def get_nsx_security_group_id(session, cluster, neutron_id):
"""Return the NSX sec profile uuid for a given neutron sec group.
First, look up the Neutron database. If not found, execute
a query on NSX platform as the mapping might be missing.
NOTE: Security groups are called 'security profiles' on the NSX backend.
"""
nsx_id = nsx_db.get_nsx_security_group_id(session, neutron_id)
if not nsx_id:
# Find security profile on backend.
# This is a rather expensive query, but it won't be executed
# more than once for each security group in Neutron's lifetime
nsx_sec_profiles = secgrouplib.query_security_profiles(
cluster, '*',
filters={'tag': neutron_id,
'tag_scope': 'q_sec_group_id'})
# Only one result expected
# NOTE(salv-orlando): Not handling the case where more than one
# security profile is found with the same neutron port tag
if not nsx_sec_profiles:
LOG.warn(_("Unable to find NSX security profile for Neutron "
"security group %s"), neutron_id)
return
elif len(nsx_sec_profiles) > 1:
LOG.warn(_("Multiple NSX security profiles found for Neutron "
"security group %s"), neutron_id)
nsx_sec_profile = nsx_sec_profiles[0]
nsx_id = nsx_sec_profile['uuid']
with session.begin(subtransactions=True):
# Create DB mapping
nsx_db.add_neutron_nsx_security_group_mapping(
session, neutron_id, nsx_id)
return nsx_id
| 5,350,426 |
def customiseGlobalTagForOnlineBeamSpot(process):
"""Customisation of GlobalTag for Online BeamSpot
- edits the GlobalTag ESSource to load the tags used to produce the HLT beamspot
- these tags are not available in the Offline GT, which is the GT presently used in HLT+RECO tests
- not loading these tags (i.e. not using this customisation) does not result in a runtime error,
but it leads to an HLT beamspot different to the one obtained when running HLT alone
"""
if hasattr(process, 'GlobalTag'):
if not hasattr(process.GlobalTag, 'toGet'):
process.GlobalTag.toGet = cms.VPSet()
process.GlobalTag.toGet += [
cms.PSet(
record = cms.string('BeamSpotOnlineLegacyObjectsRcd'),
tag = cms.string('BeamSpotOnlineLegacy')
),
cms.PSet(
record = cms.string('BeamSpotOnlineHLTObjectsRcd'),
tag = cms.string('BeamSpotOnlineHLT')
)
]
return process
| 5,350,427 |
def path_to_filename(username, path_to_file):
""" Converts a path formated as path/to/file.txt to a filename, ie. path_to_file.txt """
filename = '{}_{}'.format(username, path_to_file)
filename = filename.replace('/','_')
print(filename)
return filename
| 5,350,428 |
def _convert_artist_format(artists: List[str]) -> str:
"""Returns converted artist format"""
formatted = ""
for x in artists:
formatted += x + ", "
return formatted[:-2]
| 5,350,429 |
def slerp(val, low, high):
"""
Spherical interpolation. val has a range of 0 to 1.
From Tom White 2016
:param val: interpolation mixture value
:param low: first latent vector
:param high: second latent vector
:return:
"""
if val <= 0:
return low
elif val >= 1:
return high
elif np.allclose(low, high):
return low
omega = np.arccos(np.dot(low/np.linalg.norm(low), high/np.linalg.norm(high)))
so = np.sin(omega)
return np.sin((1.0-val)*omega) / so * low + np.sin(val*omega)/so * high
| 5,350,430 |
async def verify_input_body_is_json(
request: web.Request, handler: Handler
) -> web.StreamResponse:
"""
Middleware to verify that input body is of json format
"""
if request.can_read_body:
try:
await request.json()
except json.decoder.JSONDecodeError:
raise web.HTTPBadRequest(reason="Malformed JSON.")
return await handler(request)
| 5,350,431 |
def svn_repos_get_logs4(*args):
"""
svn_repos_get_logs4(svn_repos_t repos, apr_array_header_t paths, svn_revnum_t start,
svn_revnum_t end, int limit, svn_boolean_t discover_changed_paths,
svn_boolean_t strict_node_history,
svn_boolean_t include_merged_revisions,
apr_array_header_t revprops,
svn_repos_authz_func_t authz_read_func,
svn_log_entry_receiver_t receiver, apr_pool_t pool) -> svn_error_t
"""
return _repos.svn_repos_get_logs4(*args)
| 5,350,432 |
def _setup_lte(hass, lte_config) -> None:
"""Set up Huawei LTE router."""
from huawei_lte_api.AuthorizedConnection import AuthorizedConnection
from huawei_lte_api.Client import Client
url = lte_config[CONF_URL]
username = lte_config[CONF_USERNAME]
password = lte_config[CONF_PASSWORD]
connection = AuthorizedConnection(
url,
username=username,
password=password,
)
client = Client(connection)
data = RouterData(client)
hass.data[DATA_KEY].data[url] = data
def cleanup(event):
"""Clean up resources."""
client.user.logout()
hass.bus.listen_once(EVENT_HOMEASSISTANT_STOP, cleanup)
| 5,350,433 |
def linreg_predict(model, X, v=False):
"""
Prediction with linear regression
yhat[i] = E[y|X[i, :]], model]
v[i] = Var[y|X[i, :], model]
"""
if 'preproc' in model:
X = preprocessor_apply_to_test(model['preproc'], X)
yhat = X.dot(model['w'])
return yhat
| 5,350,434 |
def create_db_with_tables():
"""Creates testing database if it doesn't exist, and then loads in
the Mosaiq mimic tables.
"""
mocks.check_create_test_db(database=DATABASE)
create_mimic_tables(DATABASE)
| 5,350,435 |
def parse_hostportstr(hostportstr):
""" Parse hostportstr like 'xxx.xxx.xxx.xxx:xxx'
"""
host = hostportstr.split(':')[0]
port = int(hostportstr.split(':')[1])
return host, port
| 5,350,436 |
def sigmoid_grad_input(x_input, grad_output):
"""sigmoid nonlinearity gradient.
Calculate the partial derivative of the loss
with respect to the input of the layer
# Arguments
x_input: np.array of size `(n_objects, n_in)`
grad_output: np.array of size `(n_objects, n_in)`
dL / df
# Output
the partial derivative of the loss
with respect to the input of the function
np.array of size `(n_objects, n_in)`
dL / dh
"""
#################
### YOUR CODE ###
#################
output = []
for x in x_input:
one = (1/(1+np.exp(-x)))
two = (np.exp(-x)/(1+np.exp(-x)))
output.append(one*two)
output = np.asarray(output*grad_output)
return output
| 5,350,437 |
def test_406_gpload_external_schema_merge():
"""406 test gpload works with schema test and write as "Test" in config"""
TestBase.drop_tables()
schema = '"Test"'
f = open(TestBase.mkpath('query406.sql'), 'a')
f.write("\\! gpload -f "+TestBase.mkpath('config/config_file1')+'\n')
f.write("\\! psql -d reuse_gptest -c \"select count(*) from pg_tables where schemaname = 'test';\"")
f.close()
TestBase.write_config_file(externalSchema=schema, reuse_tables=True, mode='merge')
TestBase.write_config_file(config='config/config_file1',externalSchema=schema, reuse_tables=True, mode='merge')
| 5,350,438 |
def p(draw_p: Turtle) -> None:
"""Draws a flower."""
r: int = randint(0, 256)
g: int = randint(0, 256)
b: int = randint(0, 256)
x: float = random.uniform(-250, 250)
y: float = random.uniform(-250, 250)
draw_p.penup()
draw_p.goto(x, y)
draw_p.pendown() # begins drawing
i: int = 0
# creates seven petals
while i < 7:
draw_p.penup()
draw_p.goto(x, y)
draw_p.pendown() # begins drawing
draw_p.fillcolor(r, g, b)
draw_p.begin_fill()
count: int = 0
# draws one half of a petal
while count < 50:
draw_p.right(2)
draw_p.forward(1)
count += 1
count = 0
draw_p.left(99)
# draws the other half
while count < 50:
draw_p.right(2)
draw_p.backward(1)
count += 1
draw_p.end_fill()
draw_p.penup()
draw_p.goto(x, y)
draw_p.right(99)
draw_p.left(45)
i += 1
| 5,350,439 |
def f_is3byte(*args):
"""f_is3byte(flags_t F, void ?) -> bool"""
return _idaapi.f_is3byte(*args)
| 5,350,440 |
def make_desired_disp(vertices, DeformType = DispType.random, num_of_vertices = -1):
"""
DispType.random: Makes a random displacement field. The first 3 degrees of freedom are assumed to
be zero in order to fix rotation and translation of the lattice.
DispType.isotropic: Every point moves towards the origin with an amount propotional to the distance from the origin
"""
if(num_of_vertices < 1):
get_num_of_verts(vertices)
if(DeformType == DispType.random):
return normalizeVec(npr.rand(2*num_of_vertices))
elif(DeformType == DispType.isotropic):
return normalizeVec(vertices.flatten())
elif(DeformType == DispType.explicit_1):
return np.vstack ((np.array([[0.0, 0.0], [0, -2], [-1, -1]]), npr.rand(num_of_vertices - 3, 2))).flatten()
elif(DeformType == DispType.explicit_2):
return np.vstack ((np.array([[0.0, 0.0], [0, 0], [-0.5 + 1.5*np.sin(np.pi/6), 0.3 - 1.5*np.cos(np.pi/6)]]),
npr.rand(num_of_vertices - 3, 2))).flatten()
| 5,350,441 |
def unix_sort_ranks(corpus: set,
tmp_folder_path: str):
"""
Function that takes a corpus sorts it with the unix sort -n command and generates the global ranks
for each value in the corpus.
Parameters
----------
corpus: set
The corpus (all the unique values from every column)
tmp_folder_path: str
The path of the temporary folder that will serve as a cache for the run
Returns
-------
dict
The ranks in the form of k: value, v: the rank of the value
"""
unsorted_file_path = os.path.join(tmp_folder_path, 'unsorted_file.txt')
sorted_file_path = os.path.join(tmp_folder_path, 'sorted_file.txt')
with open(unsorted_file_path, 'w') as out:
for var in corpus:
print(str(var), file=out)
with open(sorted_file_path, 'w') as f:
if os.name == 'nt':
subprocess.call(['sort',
unsorted_file_path],
stdout=f)
else:
sort_env = os.environ.copy()
sort_env['LC_ALL'] = 'C'
subprocess.call(['sort', '-n',
unsorted_file_path],
stdout=f, env=sort_env)
rank = 1
ranks = []
with open(sorted_file_path, 'r') as f:
txt = f.read()
for var in txt.splitlines():
ranks.append((convert_data_type(var.replace('\n', '')), rank))
rank = rank + 1
return dict(ranks)
| 5,350,442 |
def test_invalid_input(invalid_array):
"""Validates taht array given is return an error for wrong input datatype."""
with pytest.raises(TypeError):
quickSort(invalid_array)
| 5,350,443 |
def do_filter():
"""Vapoursynth filtering"""
opstart_ep10 = 768
ncop = JPBD_NCOP.src_cut
ep10 = JPBD_10.src_cut
ncop = lvf.rfs(ncop, ep10[opstart_ep10:], [(0, 79), (1035, 1037)])
return ncop
| 5,350,444 |
def test_returns_less_than_expected_errors(configured_test_manager):
"""A function that doesn't return the same number of objects as specified in the stage outputs should throw an OutputSignatureError."""
@stage([], ["test1", "test2"])
def output_stage(record):
return "hello world"
record = Record(configured_test_manager, None)
with pytest.raises(OutputSignatureError):
output_stage(record)
| 5,350,445 |
async def test_emergency_ssl_certificate_when_invalid(hass, tmpdir, caplog):
"""Test http can startup with an emergency self signed cert when the current one is broken."""
cert_path, key_path = await hass.async_add_executor_job(
_setup_broken_ssl_pem_files, tmpdir
)
hass.config.safe_mode = True
assert (
await async_setup_component(
hass,
"http",
{
"http": {"ssl_certificate": cert_path, "ssl_key": key_path},
},
)
is True
)
await hass.async_start()
await hass.async_block_till_done()
assert (
"Home Assistant is running in safe mode with an emergency self signed ssl certificate because the configured SSL certificate was not usable"
in caplog.text
)
assert hass.http.site is not None
| 5,350,446 |
def test_ending():
"""Game ends"""
rv, out = getstatusoutput(f'{prg} Lia Humberto -s 1')
assert re.search("GAME OVER! Final Scores: {'Lia': 10, 'Humberto': 20}")
| 5,350,447 |
def main():
""" エントリポイント """
config.read(CONFIG_FILE)
if 'PHPSESSID' not in config['DEFAULT']:
phpsessid = get_phpsessionid()
if not phpsessid:
sys.stderr.write('PHPSESSIDの取得に失敗しました。\n')
sys.exit(1)
else:
phpsessid = config['DEFAULT']['PHPSESSID']
cookie['PHPSESSID'] = phpsessid
(img_num, sessid) = get_img_num_and_sessionid()
if not sessid:
sys.stderr.write('sessidの取得に失敗しました。\n'
'PHPSESSIDが無効になっている場合は削除して再実行してください。\n')
sys.exit(1)
config['DEFAULT']['sessid'] = sessid
new_file_name = get_new_file()
if not new_file_name:
sys.stderr.write('画像ファイルが見つかりませんでした。\n')
sys.exit(1)
if img_num == 3:
delete_image(3)
img_num -= 1
upload_file(new_file_name)
if img_num != 0:
activate_image(img_num + 1)
| 5,350,448 |
async def test_http_error401(aresponses, status):
"""Test HTTP 401 response handling."""
aresponses.add(
"example.com",
"/api/v1/smartmeter",
"GET",
aresponses.Response(text="Give me energy!", status=status),
)
async with aiohttp.ClientSession() as session:
client = P1Monitor(host="example.com", session=session)
with pytest.raises(P1MonitorConnectionError):
assert await client.request("test")
| 5,350,449 |
def xgb(validate = True):
"""
Load XGB language detection model.
Parameters
----------
validate: bool, optional (default=True)
if True, malaya will check model availability and download if not available.
Returns
-------
LANGUAGE_DETECTION : malaya._models._sklearn_model.LANGUAGE_DETECTION class
"""
if validate:
check_file(PATH_LANG_DETECTION['xgb'], S3_PATH_LANG_DETECTION['xgb'])
else:
if not check_available(PATH_LANG_DETECTION['xgb']):
raise Exception(
'language-detection/xgb is not available, please `validate = True`'
)
try:
with open(PATH_LANG_DETECTION['xgb']['vector'], 'rb') as fopen:
vector = pickle.load(fopen)
with open(PATH_LANG_DETECTION['xgb']['model'], 'rb') as fopen:
model = pickle.load(fopen)
except:
raise Exception(
"model corrupted due to some reasons, please run malaya.clear_cache('language-detection/xgb') and try again"
)
return LANGUAGE_DETECTION(model, lang_labels, vector, mode = 'xgb')
| 5,350,450 |
def get_current_data(csv_file):
"""
Gathers and returns list of lists of current information based in hourly data from NOAA's National Data Buoy Center
archived data. Returned list format is [current depths, current speeds, current directions].
Input parameter is any CSV or text file with the same formatting at the NDBC website.
"""
current_speed = []
current_dir = []
with open(csv_file) as data_file:
reader = csv.reader(data_file, delimiter=' ')
next(reader) # skips header line of CSV file
next(reader)
for row in reader:
while '' in row:
row.remove('')
current_depth = float(row[5])
try:
current_current_speed = float(row[7])
except ValueError:
current_current_speed = np.nan
current_current_dir = 360 - int(row[6])
if math.isclose(current_current_speed, 99.):
current_current_speed = np.nan
if math.isclose(current_current_dir, -639) or current_current_dir == 'MM':
current_current_dir = np.nan
current_speed.append(float(current_current_speed))
current_dir.append(float(current_current_dir))
current_data = {'Current Speed': current_speed, 'Current Direction': current_dir}
current_data = pd.DataFrame(data=current_data)
return current_data, current_depth
| 5,350,451 |
def _get_batched_jittered_initial_points(
model: Model,
chains: int,
initvals: Optional[Union[StartDict, Sequence[Optional[StartDict]]]],
random_seed: int,
jitter: bool = True,
jitter_max_retries: int = 10,
) -> Union[np.ndarray, List[np.ndarray]]:
"""Get jittered initial point in format expected by NumPyro MCMC kernel
Returns
-------
out: list of ndarrays
list with one item per variable and number of chains as batch dimension.
Each item has shape `(chains, *var.shape)`
"""
random_seed = np.random.default_rng(random_seed).integers(2**30, size=chains)
assert len(random_seed) == chains
initial_points = _init_jitter(
model,
initvals,
seeds=random_seed,
jitter=jitter,
jitter_max_retries=jitter_max_retries,
)
initial_points = [list(initial_point.values()) for initial_point in initial_points]
if chains == 1:
initial_points = initial_points[0]
else:
initial_points = [np.stack(init_state) for init_state in zip(*initial_points)]
return initial_points
| 5,350,452 |
def load_schema(url, resolver=None, resolve_references=False,
resolve_local_refs=False):
"""
Load a schema from the given URL.
Parameters
----------
url : str
The path to the schema
resolver : callable, optional
A callback function used to map URIs to other URIs. The
callable must take a string and return a string or `None`.
This is useful, for example, when a remote resource has a
mirror on the local filesystem that you wish to use.
resolve_references : bool, optional
If `True`, resolve all `$ref` references.
resolve_local_refs : bool, optional
If `True`, resolve all `$ref` references that refer to other objects
within the same schema. This will automatically be handled when passing
`resolve_references=True`, but it may be desirable in some cases to
control local reference resolution separately.
This parameter is deprecated.
"""
if resolve_local_refs is True:
warnings.warn(
"The 'resolve_local_refs' parameter is deprecated.",
AsdfDeprecationWarning
)
if resolver is None:
# We can't just set this as the default in load_schema's definition
# because invoking get_default_resolver at import time leads to a circular import.
resolver = extension.get_default_resolver()
# We want to cache the work that went into constructing the schema, but returning
# the same object is treacherous, because users who mutate the result will not
# expect that they're changing the schema everywhere.
return copy.deepcopy(
_load_schema_cached(url, resolver, resolve_references, resolve_local_refs)
)
| 5,350,453 |
def fastaDecodeHeader(fastaHeader):
"""Decodes the fasta header
"""
return fastaHeader.split("|")
| 5,350,454 |
def run_sc_test (config) :
"""
Test model.
"""
"""Load problem."""
if not os.path.exists (config.probfn):
raise ValueError ("Problem file not found.")
else:
p = problem.load_problem (config.probfn)
"""Load testing data."""
xt = np.load (config.xtest)
"""Set up input for testing."""
config.SNR = np.inf if config.SNR == 'inf' else float (config.SNR)
data_set = DataSet.DataSet(config, p)
input_, label_ = (
train.setup_input_sc (config.test, p, xt.shape [1], None, False,
config.supp_prob, config.SNR,
config.magdist, **config.distargs))
"""Set up model."""
model = setup_model (config , A=p.A)
xhs_ = model.inference (input_, None)
"""Create session and initialize the graph."""
tfconfig = tf.compat.v1.ConfigProto (allow_soft_placement=True)
tfconfig.gpu_options.allow_growth = True
with tf.compat.v1.Session (config=tfconfig) as sess:
# graph initialization
sess.run (tf.compat.v1.global_variables_initializer ())
# load model
model.load_trainable_variables (config.modelfn)
nmse_denom = np.sum (np.square (xt))
supp_gt = xt != 0
lnmse = []
lspar = []
lsperr = []
lflspo = []
lflsne = []
# test model
for xh_ in xhs_ :
xh = sess.run (xh_ , feed_dict={label_:xt})
# nmse:
loss = np.sum (np.square (xh - xt))
nmse_dB = 10.0 * np.log10 (loss / nmse_denom)
print (nmse_dB)
lnmse.append (nmse_dB)
supp = xh != 0.0
# intermediate sparsity
spar = np.sum (supp , axis=0)
lspar.append (spar)
# support error
sperr = np.logical_xor(supp, supp_gt)
lsperr.append (np.sum (sperr , axis=0))
# false positive
flspo = np.logical_and (supp , np.logical_not (supp_gt))
lflspo.append (np.sum (flspo , axis=0))
# false negative
flsne = np.logical_and (supp_gt , np.logical_not (supp))
lflsne.append (np.sum (flsne , axis=0))
res = dict (nmse=np.asarray (lnmse),
spar=np.asarray (lspar),
sperr=np.asarray (lsperr),
flspo=np.asarray (lflspo),
flsne=np.asarray (lflsne))
np.savez (config.resfn , **res)
# end of test
| 5,350,455 |
def setup_module(module):
""" setup any state specific to the execution of the given module."""
child = pexpect.spawn('mlsteam login --address {} --username superuser --data-port {}'.format(API_SERVER_ADDRESS, DATA_PORT))
child.expect ('password:')
child.sendline ('superuser')
child.expect(pexpect.EOF)
out=child.before
exp=re.findall(b"Login success", out)
assert exp==[b'Login success']
config_path = os.path.join(os.getenv('HOME'), '.mlsteam', 'cred')
with open(config_path, 'r') as cred:
data = json.load(cred)
headers.update({'Authorization': 'Bearer {}'.format(data['access_token'])})
system("mlsteam data mb bk/cifar10")
system("mlsteam data cp -r /workspace/cifar10/* bk/cifar10")
system("mlsteam data ls bk/cifar10")
| 5,350,456 |
def test_modulesSimpleFlow(env):
"""
This simple test ensures that we can load two modules on RLTest and their commands are properly accessible
@param env:
"""
checkSampleModules(env)
| 5,350,457 |
def tau_for_x(x, beta):
"""Rescales tau axis to x -1 ... 1"""
if x.min() < -1 or x.max() > 1:
raise ValueError("domain of x")
return .5 * beta * (x + 1)
| 5,350,458 |
def _add_to_dataset(
client,
urls,
name,
external=False,
force=False,
overwrite=False,
create=False,
sources=(),
destination="",
ref=None,
):
"""Add data to a dataset."""
if not client.has_datasets_provenance():
raise errors.OperationError("Dataset provenance is not generated. Run `renku graph generate-dataset`.")
if len(urls) == 0:
raise errors.UsageError("No URL is specified")
if sources and len(urls) > 1:
raise errors.UsageError("Cannot use `--source` with multiple URLs.")
try:
with client.with_dataset_provenance(name=name, create=create) as dataset:
client.add_data_to_dataset(
dataset,
urls=urls,
external=external,
force=force,
overwrite=overwrite,
sources=sources,
destination=destination,
ref=ref,
)
client.update_datasets_provenance(dataset)
except errors.DatasetNotFound:
raise errors.DatasetNotFound(
message=f"Dataset `{name}` does not exist.\nUse `renku dataset create {name}` to create the dataset or "
f"retry with `--create` option for automatic dataset creation."
)
except (FileNotFoundError, git.exc.NoSuchPathError) as e:
message = "\n\t".join(urls)
raise errors.ParameterError(f"Could not find paths/URLs: \n\t{message}") from e
| 5,350,459 |
def test_for_404(api, json_response):
"""Test for 404 handling"""
httpretty.enable()
body = json.dumps({'message': 'not found'})
httpretty.register_uri(httpretty.POST, "https://api.rosette.com/rest/v1/info",
body=json_response, status=200, content_type="application/json")
httpretty.register_uri(httpretty.GET, "https://api.rosette.com/rest/v1/info",
body=body, status=404, content_type="application/json")
with pytest.raises(RosetteException) as e_rosette:
api.info()
assert e_rosette.value.status == 404
assert e_rosette.value.message == 'not found'
httpretty.disable()
httpretty.reset()
| 5,350,460 |
def load_env():
"""Get the path to the .env file and load it."""
env_file = os.path.join(os.path.dirname(__file__), os.pardir, ".env")
dotenv.read_dotenv(env_file)
| 5,350,461 |
def _generate_element(name: str,
text: Optional[str] = None,
attributes: Optional[Dict] = None) -> etree.Element:
"""
generate an ElementTree.Element object
:param name: namespace+tag_name of the element
:param text: Text of the element. Default is None
:param attributes: Attributes of the elements in form of a dict {"attribute_name": "attribute_content"}
:return: ElementTree.Element object
"""
et_element = etree.Element(name)
if text:
et_element.text = text
if attributes:
for key, value in attributes.items():
et_element.set(key, value)
return et_element
| 5,350,462 |
def CoA_Cropland_URL_helper(*, build_url, config, **_):
"""
This helper function uses the "build_url" input from flowbyactivity.py,
which is a base url for data imports that requires parts of the url text
string to be replaced with info specific to the data year. This function
does not parse the data, only modifies the urls from which data is
obtained.
:param build_url: string, base url
:param config: dictionary, items in FBA method yaml
:return: list, urls to call, concat, parse, format into Flow-By-Activity
format
"""
# initiate url list for coa cropland data
urls = []
# call on state acronyms from common.py (and remove entry for DC)
state_abbrevs = abbrev_us_state
state_abbrevs = {k: v for (k, v) in state_abbrevs.items() if k != "DC"}
# replace "__aggLevel__" in build_url to create three urls
for x in config['agg_levels']:
for y in config['sector_levels']:
# at national level, remove the text string calling for
# state acronyms
if x == 'NATIONAL':
url = build_url
url = url.replace("__aggLevel__", x)
url = url.replace("__secLevel__", y)
url = url.replace("&state_alpha=__stateAlpha__", "")
if y == "ECONOMICS":
url = url.replace(
"AREA%20HARVESTED&statisticcat_desc=AREA%20IN%20"
"PRODUCTION&statisticcat_desc=TOTAL&statisticcat_desc="
"AREA%20BEARING%20%26%20NON-BEARING",
"AREA&statisticcat_desc=AREA%20OPERATED")
else:
url = url.replace("&commodity_desc=AG%20LAND&"
"commodity_desc=FARM%20OPERATIONS", "")
urls.append(url)
else:
# substitute in state acronyms for state and county url calls
for z in state_abbrevs:
url = build_url
url = url.replace("__aggLevel__", x)
url = url.replace("__secLevel__", y)
url = url.replace("__stateAlpha__", z)
if y == "ECONOMICS":
url = url.replace(
"AREA%20HARVESTED&statisticcat_desc=AREA%20IN%20"
"PRODUCTION&statisticcat_desc=TOTAL&"
"statisticcat_desc=AREA%20BEARING%20%26%20NON-BEARING",
"AREA&statisticcat_desc=AREA%20OPERATED")
else:
url = url.replace("&commodity_desc=AG%20LAND&commodity_"
"desc=FARM%20OPERATIONS", "")
urls.append(url)
return urls
| 5,350,463 |
def _centered_bias(logits_dimension, head_name=None):
"""Returns `logits`, optionally with centered bias applied.
Args:
logits_dimension: Last dimension of `logits`. Must be >= 1.
head_name: Optional name of the head.
Returns:
Centered bias `Variable`.
Raises:
ValueError: if `logits_dimension` is invalid.
"""
if (logits_dimension is None) or (logits_dimension < 1):
raise ValueError("Invalid logits_dimension %s." % logits_dimension)
centered_bias = variable_scope.get_variable(
name="centered_bias_weight",
shape=(logits_dimension,),
initializer=init_ops.zeros_initializer(),
trainable=True)
for dim in range(logits_dimension):
if head_name:
summary.scalar("centered_bias/bias_%d/%s" % (dim, head_name),
centered_bias[dim])
else:
summary.scalar("centered_bias/bias_%d" % dim, centered_bias[dim])
return centered_bias
| 5,350,464 |
def get_columns(dataframe: pd.DataFrame,
columns: Union[str, List[str]]) -> Union[pd.Series, pd.DataFrame]:
"""Get the column names, and can rename according to list"""
return dataframe[list(columns)].copy(True)
| 5,350,465 |
def get_vote_activity(session):
"""Create a plot showing the inline usage statistics."""
creation_date = func.date_trunc("day", Vote.created_at).label("creation_date")
votes = (
session.query(creation_date, func.count(Vote.id).label("count"))
.group_by(creation_date)
.order_by(creation_date)
.all()
)
total_votes = [("Total votes", q[0], q[1]) for q in votes]
# Grid style
plt.style.use("seaborn-whitegrid")
# Combine the results in a single dataframe and name the columns
dataframe = pandas.DataFrame(total_votes, columns=["type", "date", "votes"])
months = mdates.MonthLocator() # every month
months_fmt = mdates.DateFormatter("%Y-%m")
max_value = max([vote[2] for vote in total_votes])
magnitude = get_magnitude(max_value)
# Plot each result set
fig, ax = plt.subplots(figsize=(30, 15), dpi=120)
for key, group in dataframe.groupby(["type"]):
ax = group.plot(ax=ax, kind="bar", x="date", y="votes", label=key)
ax.xaxis.set_major_locator(months)
ax.xaxis.set_major_formatter(months_fmt)
ax.yaxis.set_ticks(np.arange(0, max_value, math.pow(10, magnitude - 1)))
image = image_from_figure(fig)
image.name = "vote_statistics.png"
return image
| 5,350,466 |
def notebook(ctx):
"""
Start IPython notebook server.
"""
with setenv(PYTHONPATH='{root}/core:{root}:{root}/tests'.format(root=ROOT_DIR),
JUPYTER_CONFIG_DIR='{root}/notebooks'.format(root=ROOT_DIR)):
os.chdir('notebooks')
# Need pty=True to let Ctrl-C kill the notebook server. Shrugs.
try:
run('jupyter nbextension enable --py widgetsnbextension')
run('jupyter notebook --ip=*', pty=True)
except KeyboardInterrupt:
pass
print("If notebook does not open on your chorme automagically, try adding this to your bash_profie")
print("export BROWSER=/Applications/Google\ Chrome.app/Contents/MacOS/Google\ Chrome")
print("*for MacOS and Chrome only")
| 5,350,467 |
def viz_graph(obj):
"""
Generate the visulization of the graph in the JupyterLab
Arguments
-------
obj: list
a list of Python object that defines the nodes
Returns
-----
nx.DiGraph
"""
G = nx.DiGraph()
# instantiate objects
for o in obj:
for i in o['inputs']:
G.add_edge(i, o['id'])
return G
| 5,350,468 |
def zrand_convolve(labelgrid, neighbors='edges'):
"""
Calculates the avg and std z-Rand index using kernel over `labelgrid`
Kernel is determined by `neighbors`, which can include all entries with
touching edges (i.e., 4 neighbors) or corners (i.e., 8 neighbors).
Parameters
----------
grid : (S, K, N) array_like
Array containing cluster labels for each `N` samples, where `S` is mu
and `K` is K.
neighbors : str, optional
How many neighbors to consider when calculating Z-rand kernel. Must be
in ['edges', 'corners']. Default: 'edges'
Returns
-------
zrand_avg : (S, K) np.ndarray
Array containing average of the z-Rand index calculated using provided
neighbor kernel
zrand_std : (S, K) np.ndarray
Array containing standard deviation of the z-Rand index
"""
inds = cartesian([range(labelgrid.shape[0]), range(labelgrid.shape[1])])
zrand = np.empty(shape=labelgrid.shape[:-1] + (2,))
for x, y in inds:
ninds = get_neighbors(x, y, neighbors=neighbors, shape=labelgrid.shape)
zrand[x, y] = zrand_partitions(labelgrid[ninds].T)
return zrand[..., 0], zrand[..., 1]
| 5,350,469 |
def msg_receiver():
"""
消息已收界面
:return:
"""
return render_template('sysadmin/sysmsg/sys_msg_received.html', **locals())
| 5,350,470 |
def abs_timedelta(delta):
"""Returns an "absolute" value for a timedelta, always representing a
time distance."""
if delta.days < 0:
now = datetime.datetime.now()
return now - (now + delta)
return delta
| 5,350,471 |
def is_word_file(file):
"""
Check to see if the given file is a Word file.
@param file (str) The path of the file to check.
@return (bool) True if the file is a Word file, False if not.
"""
typ = subprocess.check_output(["file", file])
return ((b"Microsoft Office Word" in typ) or
(b"Word 2007+" in typ) or
(b"Microsoft OOXML" in typ))
| 5,350,472 |
def make_new_get_user_response(row):
""" Returns an object containing only what needs to be sent back to the user. """
return {
'userName': row['userName'],
'categories': row['categories'],
'imageName': row['imageName'],
'refToImage': row['refToImage'],
'imgDictByTag': row['imgDictByTag'],
'canView': row['canView']
}
| 5,350,473 |
def lock_retention_policy(bucket_name):
"""Locks the retention policy on a given bucket"""
# bucket_name = "my-bucket"
storage_client = storage.Client()
# get_bucket gets the current metageneration value for the bucket,
# required by lock_retention_policy.
bucket = storage_client.get_bucket(bucket_name)
# Warning: Once a retention policy is locked it cannot be unlocked
# and retention period can only be increased.
bucket.lock_retention_policy()
print("Retention policy for {} is now locked".format(bucket_name))
print(
"Retention policy effective as of {}".format(
bucket.retention_policy_effective_time
)
)
| 5,350,474 |
def upload_bus_data(number: int) -> dict:
"""Загружает данные по матерям из базы"""
logger.debug("Стартует upload_bus_data")
try:
query = ProfilsCows.select().where(
ProfilsCows.number == number
)
if query.exists():
bus = ProfilsCows.select().where(
ProfilsCows.number == number
).get()
res = {
"BM1818_mutter": bus.BM1818,
"BM1824_mutter": bus.BM1824,
"BM2113_mutter": bus.BM2113,
"CSRM60_mutter": bus.CSRM60,
"CSSM66_mutter": bus.CSSM66,
"CYP21_mutter": bus.CYP21,
"ETH10_mutter": bus.ETH10,
"ETH225_mutter": bus.ETH225,
"ETH3_mutter": bus.ETH3,
"ILSTS6_mutter": bus.ILSTS6,
"INRA023_mutter": bus.INRA023,
"RM067_mutter": bus.RM067,
"SPS115_mutter": bus.SPS115,
"TGLA122_mutter": bus.TGLA122,
"TGLA126_mutter": bus.TGLA126,
"TGLA227_mutter": bus.TGLA227,
"TGLA53_mutter": bus.TGLA53,
"MGTG4B_mutter": bus.MGTG4B,
"SPS113_mutter": bus.SPS113,
}
logger.debug("Конец upload_bus_data")
return res
else:
res = {
"BM1818_mutter": '-',
"BM1824_mutter": '-',
"BM2113_mutter": '-',
"CSRM60_mutter": '-',
"CSSM66_mutter": '-',
"CYP21_mutter": '-',
"ETH10_mutter": '-',
"ETH225_mutter": '-',
"ETH3_mutter": '-',
"ILSTS6_mutter": '-',
"INRA023_mutter": '-',
"RM067_mutter": '-',
"SPS115_mutter": '-',
"TGLA122_mutter": '-',
"TGLA126_mutter": '-',
"TGLA227_mutter": '-',
"TGLA53_mutter": '-',
"MGTG4B_mutter": '-',
"SPS113_mutter": '-',
}
logger.debug("Конец upload_bus_data")
return res
except Exception as e:
logger.error(e)
name = '\njob_db.py\nupload_bus_data\n'
QMessageBox.critical(
None,
'Ошибка ввода',
f'{answer_error()}{name}Подробности:\n {e}'
)
logger.debug("Конец upload_bus_data")
| 5,350,475 |
def is_feature_enabled():
"""
Helper to check Site Configuration for ENABLE_COURSE_ACCESS_GROUPS.
:return: bool
"""
is_enabled = bool(configuration_helpers.get_value('ENABLE_COURSE_ACCESS_GROUPS', default=False))
if is_enabled:
# Keep the line below in sync with `util.organizations_helpers.organizations_enabled`
if not settings.FEATURES.get('ORGANIZATIONS_APP', False):
raise ConfigurationError(
'The Course Access Groups feature is enabled but the Oragnizations App is not. '
'Please enable the feature flag `ORGANIZATIONS_APP` to fix this exception.'
)
return is_enabled
| 5,350,476 |
def solve_disp_eq(betbn, betbt, bet, Znak, c, It, Ia, nb, var):
"""
Решение дисперсионного уравнения.
Znak = -1 при преломлении
Znak = 1 при отражении
"""
betb = sqrt(betbn ** 2. + betbt ** 2.)
gamb = 1. / sqrt(1. - betb ** 2.)
d = c * It / Ia
Ab = 1. + (nb ** 2. - 1.) * gamb ** 2. * (1. - betbn ** 2.)
Bb = d ** 2. * (1. - bet ** 2. - (nb ** 2. - 1.) * (gamb * (bet - betbn)) ** 2.)
Cb = (nb ** 2. - 1.) * gamb ** 2. * d * betbt * (2. - 2. * bet * betbn - d * betbt * (1. - bet ** 2.))
Qb = Ab - Bb - Cb
CHb = bet + (nb ** 2. - 1.) * gamb ** 2. * (bet - betbn) * (1. - betbt * d)
ZNb = 1. - bet ** 2. - (nb ** 2. - 1.) * (gamb * (bet - betbn)) ** 2.
kbna = Ia * (CHb + Znak * sqrt(Qb)) / (c * ZNb) # норм.проекция волн.вектора
kbt = It # Тангенц.проекция волн.вектора
iQb = arctan(abs(kbt / kbna))
wi = kbna * bet * c + Ia # Частота прел. волны
ci = wi * cos(iQb) / abs(kbna) # Скорость света в среде
if var < 0:
iQb = -iQb
# k = kbna / cos(arctan(abs(kbt / kbna)))
# # ui=betb*c
# uit = betbt * c
# uin = betbn * c
# V = bet * c
# Ai = -1 / pow(c, 2.) - (pow(nb, 2.) - 1.) * pow(1. - uin / V, 2.) / pow(c, 2.) / (1. - pow(betb, 2.))
# Bi = -2 * (pow(nb, 2.) - 1.) * (-kbt * uit + Ia * uin / V) * (1. - uin / V) / (pow(c, 2.) * (1. - pow(betb, 2.)))
# Ci = pow(k, 2.) - (pow(nb, 2.) - 1.) * pow(-kbt * uit + Ia * uin / V, 2.) / pow(c, 2.) / (1. - pow(betb, 2.))
# '''print "Ai = %s"%Ai
# print "Bi = %s"%Bi
# print "Ci = %s"%Ci'''
# # wi=(-Bi-sqrt(pow(Bi,2)-4*Ai*Ci))/(2*Ai)
# dispeq = Ai * wi * wi + Bi * wi + Ci
# '''print "dispeq = %s"%(dispeq,)
# print "wi= %s"%wi'''
return (kbna, kbt, iQb, wi, ci)
| 5,350,477 |
def phistogram(view, a, bins=10, rng=None, normed=False):
"""Compute the histogram of a remote array a.
Parameters
----------
view
IPython DirectView instance
a : str
String name of the remote array
bins : int
Number of histogram bins
rng : (float, float)
Tuple of min, max of the range to histogram
normed : boolean
Should the histogram counts be normalized to 1
"""
nengines = len(view.targets)
# view.push(dict(bins=bins, rng=rng))
with view.sync_imports():
import numpy
rets = view.apply_sync(lambda a, b, rng: numpy.histogram(a,b,rng), Reference(a), bins, rng)
hists = [ r[0] for r in rets ]
lower_edges = [ r[1] for r in rets ]
# view.execute('hist, lower_edges = numpy.histogram(%s, bins, rng)' % a)
lower_edges = view.pull('lower_edges', targets=0)
hist_array = numpy.array(hists).reshape(nengines, -1)
# hist_array.shape = (nengines,-1)
total_hist = numpy.sum(hist_array, 0)
if normed:
total_hist = total_hist/numpy.sum(total_hist,dtype=float)
return total_hist, lower_edges
| 5,350,478 |
async def test_reauth(opp):
"""Test we get the form."""
await setup.async_setup_component(opp, "persistent_notification", {})
mock_config = MockConfigEntry(
domain=DOMAIN,
unique_id="test-mac",
data={
"host": "1.1.1.1",
"hostname": "test-mac",
"ssl_certificate": "test-cert.pem",
"ssl_key": "test-key.pem",
},
title="shc012345",
)
mock_config.add_to_opp(opp)
result = await opp.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_REAUTH},
data=mock_config.data,
)
assert result["type"] == "form"
assert result["step_id"] == "reauth_confirm"
with patch(
"boschshcpy.session.SHCSession.mdns_info",
return_value=SHCInformation,
), patch(
"boschshcpy.information.SHCInformation.name",
new_callable=PropertyMock,
return_value="shc012345",
), patch(
"boschshcpy.information.SHCInformation.unique_id",
new_callable=PropertyMock,
return_value="test-mac",
):
result2 = await opp.config_entries.flow.async_configure(
result["flow_id"],
{"host": "2.2.2.2"},
)
assert result2["type"] == "form"
assert result2["step_id"] == "credentials"
assert result2["errors"] == {}
with patch(
"boschshcpy.register_client.SHCRegisterClient.register",
return_value={
"token": "abc:123",
"cert": b"content_cert",
"key": b"content_key",
},
), patch("os.mkdir"), patch("builtins.open"), patch(
"boschshcpy.session.SHCSession.authenticate"
), patch(
"openpeerpower.components.bosch_shc.async_setup_entry",
return_value=True,
) as mock_setup_entry:
result3 = await opp.config_entries.flow.async_configure(
result2["flow_id"],
{"password": "test"},
)
await opp.async_block_till_done()
assert result3["type"] == "abort"
assert result3["reason"] == "reauth_successful"
assert mock_config.data["host"] == "2.2.2.2"
assert len(mock_setup_entry.mock_calls) == 1
| 5,350,479 |
def windowed(it: Iterable[_T], size: int) -> Iterator[tuple[_T, ...]]:
"""Retrieve overlapped windows from iterable.
>>> [*windowed(range(5), 3)]
[(0, 1, 2), (1, 2, 3), (2, 3, 4)]
"""
return zip(*(islice(it_, start, None)
for start, it_ in enumerate(tee(it, size))))
| 5,350,480 |
def minmax(data):
"""Solution to exercise R-1.3.
Takes a sequence of one or more numbers, and returns the smallest and
largest numbers, in the form of a tuple of length two. Do not use the
built-in functions min or max in implementing the solution.
"""
min_idx = 0
max_idx = 0
for idx, num in enumerate(data):
if num > data[max_idx]:
max_idx = idx
if num < data[min_idx]:
min_idx = idx
return (data[min_idx], data[max_idx])
| 5,350,481 |
def server_func(port):
"""
服务端
"""
# 1. 创建socket对象
server = socket.socket()
# 2. 绑定ip和端口
server.bind(("127.0.0.1", port))
# 3. 监听是否有客户端连接
server.listen(10)
print("服务端已经启动%s端口......" % port)
# 4. 接收客户端连接
sock_obj, address = server.accept()
sock_obj.settimeout(3)
print("客户端:%s,超时时间:%s" % (address, sock_obj.gettimeout()))
while True:
try:
# 5. 接收客户端发送的消息
recv_data = sock_obj.recv(1024).decode("utf-8")
print("客户端端 -> 服务端: %s" % recv_data)
if recv_data == "quit":
break
# 6. 给客户端回复消息
send_data = "received[%s]" % recv_data
sock_obj.send(send_data.encode("utf-8"))
print("服务端 -> 客户端: %s" % send_data)
except Exception as excep:
print("error: ", excep)
# 7. 关闭socket对象
sock_obj.close()
server.close()
| 5,350,482 |
def sample_discreate(prob, n_samples):
"""根据类先验分布对标签值进行采样
M = sample_discreate(prob, n_samples)
Input:
prob: 类先验分布 shape=(n_classes,)
n_samples: 需要采样的数量 shape = (n_samples,)
Output:
M: 采样得到的样本类别 shape = (n_samples,)
例子:
sample_discreate([0.8,0.2],n_samples)
从类别[0,1]中采样产生n_samples个样本
其中采样得到0的概率为0.8,得到1的概率为0.2.
"""
np.random.seed(1) # 使每一次生成的随机数一样
n = prob.size # 类别的数量
R = np.random.rand(n_samples) # 生成服从均匀分布的随机数
M = np.zeros(n_samples) # 初始化最终结果
cumprob = np.cumsum(prob) # 累积概率分布
if n < n_samples: # 如果采样的样本数量大于类别数量
for i in range(n-1):
M = M + np.array(R > cumprob[i])
else: # 如果采样的样本数量小于类别数量
cumprob2 = cumprob[:-1]
for i in range(n_samples):
M[i] = np.sum(R[i] > cumprob2)
return M
| 5,350,483 |
def get_catalyst_pmids(first, middle, last, email, affiliation=None):
"""
Given an author's identifiers and affiliation information, optional lists of pmids, call the catalyst service
to retrieve PMIDS for the author and return a list of PMIDS
:param first: author first name
:param middle: author middle name
:param last: author last name
:param email: author email(s) as a list
:param affiliation: author affiliation as a list
:return: list of pmids identified by the catalyst service that have a high probability of being written by the
author
"""
from xml.dom.minidom import parseString # tools for handling XML in python
result = get_catalyst_pmids_xml(first, middle, last, email, affiliation)
dom = parseString(result) # create a document Object Model (DOM) from the Harvard Catalyst result
return [node.childNodes[0].data for node in dom.getElementsByTagName('PMID')]
| 5,350,484 |
def validar(request, op):
"""
Método que verifica consistência a partir de um log
"""
lista_datas = []
# arquivo de log para consistência
with open(settings.BASE_DIR + "/log.txt", "r+") as fileobj:
for line in fileobj: #pega cada linha do arquivo
if "inicio" in line:
lista_datas.append(line[:8]) #insere na lista de datas com problema
elif "fim" in line:
lista_datas.remove(line[:8]) #remove da lista de datas com problemas
if(len(lista_datas) != 0):
#mensagem para ser printada na tela
messages.error(request, " operacões inconsistentes refazer dia ou dias -- " + str(lista_datas))
else:
# mensagem para ser printada na tela
messages.success(request, " operacões consistentes")
if(op == 2): #tratando retorno da func do botao executar/gerar
if(len(lista_datas) != 0):
return False
return True
| 5,350,485 |
def upload_categories_to_fyle(workspace_id):
"""
Upload categories to Fyle
"""
try:
fyle_credentials: FyleCredential = FyleCredential.objects.get(workspace_id=workspace_id)
xero_credentials: XeroCredentials = XeroCredentials.objects.get(workspace_id=workspace_id)
fyle_connection = FyleConnector(
refresh_token=fyle_credentials.refresh_token
)
platform = PlatformConnector(fyle_credentials)
xero_connection = XeroConnector(
credentials_object=xero_credentials,
workspace_id=workspace_id
)
platform.categories.sync()
xero_connection.sync_accounts()
xero_attributes = DestinationAttribute.objects.filter(attribute_type='ACCOUNT', workspace_id=workspace_id)
xero_attributes = remove_duplicates(xero_attributes)
fyle_payload: List[Dict] = create_fyle_categories_payload(xero_attributes, workspace_id)
if fyle_payload:
fyle_connection.connection.Categories.post(fyle_payload)
platform.categories.sync()
return xero_attributes
except XeroCredentials.DoesNotExist:
logger.error(
'Xero Credentials not found for workspace_id %s',
workspace_id,
)
| 5,350,486 |
def georegister_px_df(df, im_fname=None, affine_obj=None, crs=None,
geom_col='geometry', precision=None):
"""Convert a dataframe of geometries in pixel coordinates to a geo CRS.
Arguments
---------
df : :class:`pandas.DataFrame`
A :class:`pandas.DataFrame` with polygons in a column named
``"geometry"``.
im_fname : str, optional
A filename or :class:`rasterio.DatasetReader` object containing an
image that has the same bounds as the pixel coordinates in `df`. If
not provided, `affine_obj` and `crs` must both be provided.
affine_obj : `list` or :class:`affine.Affine`, optional
An affine transformation to apply to `geom` in the form of an
``[a, b, d, e, xoff, yoff]`` list or an :class:`affine.Affine` object.
Required if not using `raster_src`.
crs : dict, optional
The coordinate reference system for the output GeoDataFrame. Required
if not providing a raster image to extract the information from. Format
should be ``{'init': 'epsgxxxx'}``, replacing xxxx with the EPSG code.
geom_col : str, optional
The column containing geometry in `df`. If not provided, defaults to
``"geometry"``.
precision : int, optional
The decimal precision for output geometries. If not provided, the
vertex locations won't be rounded.
"""
if im_fname is not None:
affine_obj = rasterio.open(im_fname).transform
crs = rasterio.open(im_fname).crs
else:
if not affine_obj or not crs:
raise ValueError(
'If an image path is not provided, ' +
'affine_obj and crs must be.')
tmp_df = affine_transform_gdf(df, affine_obj, geom_col=geom_col,
precision=precision)
return gpd.GeoDataFrame(tmp_df, crs=crs)
| 5,350,487 |
def load_ui_type(ui_file):
"""
Pyside "load_ui_type" command like PyQt4 has one, so we have to convert the
ui file to py code in-memory first and then execute it in a special frame
to retrieve the form_class.
"""
parsed = xml.parse(ui_file)
widget_class = parsed.find('widget').get('class')
form_class = parsed.find('class').text
with open(ui_file, 'r') as f:
o = StringIO()
frame = {}
uic.compileUi(f, o, indent=0)
pyc = compile(o.getvalue(), '<string>', 'exec')
exec pyc in frame
# Fetch the base_class and form class based on their type
# in the xml from designer
form_class = frame['Ui_%s' % form_class]
base_class = eval('QtWidgets.%s' % widget_class)
return base_class, form_class
| 5,350,488 |
def how_did_I_do(MLP, df, samples, expected):
"""Simple report of expected inputs versus actual outputs."""
predictions = MLP.predict(df[samples].to_list())
_df = pd.DataFrame({"Expected": df[expected], "Predicted": predictions})
_df["Correct"] = _df["Expected"] == _df["Predicted"]
print(f'The network got {sum(_df["Correct"])} out of {len(_df)} correct.')
return _df
| 5,350,489 |
def all_bootstrap_os():
"""Return a list of all the OS that can be used to bootstrap Spack"""
return list(data()['images'])
| 5,350,490 |
def coords_to_volume(coords: np.ndarray, v_size: int,
noise_treatment: bool = False) -> np.ndarray:
"""Converts coordinates to binary voxels.""" # Input is centered on [0,0,0].
return weights_to_volume(coords=coords, weights=1, v_size=v_size, noise_treatment=noise_treatment)
| 5,350,491 |
def logo(symbol, external=False, vprint=False):
""":return: Google APIs link to the logo for the requested ticker.
:param symbol: The ticker or symbol of the stock you would like to request.
:type symbol: string, required
"""
instance = iexCommon('stock', symbol, 'logo', external=external)
return instance.execute()
| 5,350,492 |
def get_dependency_node(element):
""" Returns a Maya MFnDependencyNode from the given element
:param element: Maya node to return a dependency node class object
:type element: string
"""
# adds the elements into an maya selection list
m_selectin_list = OpenMaya.MSelectionList()
m_selectin_list.add(element)
# creates an MObject
m_object = OpenMaya.MObject()
# gets the MObject from the list
m_selectin_list.getDependNode(0, m_object)
return OpenMaya.MFnDependencyNode(m_object)
| 5,350,493 |
def create_lineal_data(slope=1, bias=0, spread=0.25, data_size=50):
"""
Helper function to create lineal data.
:param slope: slope of the lineal function.
:param bias: bias of the lineal function.
:param spread: spread of the normal distribution.
:param data_size: number of samples to generate.
:return x, y: data and labels
"""
x = np.linspace(0, 1, data_size)
y = x * slope + bias + np.random.normal(scale=spread, size=x.shape)
return x, y
| 5,350,494 |
def parse_station_resp(fid):
"""
Gather information from a single station IRIS response file
*fid*. Return the information as a :class:`RespMap`.
"""
resp_map = RespMap()
# sanity check initialization
network = None
stn = None
location = None
# skip initial header comment block
skip_block_header_comments(fid)
while True:
block_header, block, eof = parse_block(fid)
# sanity check (same network, station, and location across recorded blocks)
network = check(block_header, 'Network', network)
stn = check(block_header, 'Station', stn)
location = check(block_header, 'Location', location)
# store block information
interval = DateTimeInterval.closed_open(block_header['Start_date'],
block_header['End_date'])
resp_map.setdefault(interval, {})[block_header['Channel']] = block
if eof:
break
resp_map.network = network
resp_map.stn = stn
resp_map.location = location
return resp_map
| 5,350,495 |
def plot_pol(image, figsize=(8,8), print_stats=True, scaled=True, evpa_ticks=True):
"""Mimics the plot_pol.py script in ipole/scripts"""
fig, ax = plt.subplots(2, 2, figsize=figsize)
# Total intensity
plot_I(ax[0,0], image, xlabel=False)
# Quiver on intensity
if evpa_ticks:
plot_evpa_ticks(ax[0,0], image, n_evpa=30)
# Linear polarization fraction
plot_lpfrac(ax[0,1], image, xlabel=False, ylabel=False)
# evpa
plot_evpa_rainbow(ax[1,0], image)
# circular polarization fraction
plot_cpfrac(ax[1,1], image, ylabel=False)
if print_stats:
# print image-average quantities to command line
print("Flux [Jy]: {0:g} ({1:g} unpol)".format(image.flux(), image.flux_unpol()))
print("I,Q,U,V [Jy]: {0:g} {1:g} {2:g} {3:g}".format(image.Itot(), image.Qtot(),
image.Utot(), image.Vtot()))
print("LP [%]: {0:g}".format(100.*image.lpfrac_int()))
print("CP [%]: {0:g}".format(100.*image.cpfrac_int()))
print("EVPA [deg]: {0:g}".format(image.evpa_int()))
return fig
| 5,350,496 |
def dbrg(ds, T, r):
"""
Segmentation by density-based region growing (DBRG).
Parameters
----------
ds : np.ndarray
The mask image.
T : float
Initial mask threshold.
r : int
Density connectivity search radius.
"""
M = _generate_init_mask(ds, T)
D = _density_distribution(len(ds), M, r)
S = _generate_seeds(D)
# make sure at least one seed exists
assert(S.any())
# unlabeled
R = np.zeros_like(M, dtype=np.uint32)
V = np.full_like(M, np.NINF, dtype=np.float32)
logger.debug("initial labeling by density")
# label by density map
for i, d in enumerate(D):
R[(d > V) & S] = i+1
V[(d > V) & S] = d[(d > V) & S]
logger.debug("density conncetivity")
# label by density connectivity
n, m, l = M.shape
v = np.empty(len(D)+1, dtype=np.float32)
ps = [] # reset pixel coordinates
for z in range(0, n):
for y in range(0, m):
for x in range(0, l):
if R[z, y, x] > 0:
continue
pu = min(x+1, l-1)
pd = max(x-1, 0)
pr = min(y+1, m-1)
pl = max(y-1, 0)
pt = min(z+1, n-1)
pb = max(z-1, 0)
v.fill(0)
for zz in range(pb, pt+1):
for yy in range(pl, pr+1):
for xx in range(pd, pu+1):
if ((xx-x)**2 + (yy-y)**2 + (zz-z)*2 <= r*r):
v[R[zz, yy, xx]] += 1
R[z, y, x] = v.argmax()
if R[z, y, x] == 0:
ps.append((z, y, x))
logger.debug("nearest neighbor")
# label by nearest neighbor
psv = [] # filled result
for z, y, x in ps:
r = 1
while True:
pu = min(x+1, l-1)
pd = max(x-1, 0)
pr = min(y+1, m-1)
pl = max(y-1, 0)
pt = min(z+1, n-1)
pb = max(z-1, 0)
v = []
for zz in range(pb, pt+1):
for yy in range(pl, pr+1):
for xx in range(pd, pu+1):
v.append((R[zz, yy, xx], (xx-x)**2 + (yy-y)**2 + (zz-z)**2))
if len(v) == 0:
r += 1
else:
v.sort(key=lambda p: p[1])
psv.append(v[0][0])
break
for (z, y, x), v in zip(ps, psv):
R[z, y, x] = v
# make sure each position is assigned a mask value
assert(np.all(R != 0))
return R
| 5,350,497 |
def convert_ion_balance_tables(run_file, output_file, elements):
"""
Convert ascii ion balance tables to hdf5.
Parameters
----------
run_file : string
Path to the input file ending in .run.
output_file : string
HDF5 output file name.
elements : list
List of elements to be converted.
Examples
--------
>>> from cloudy_grids import convert_ion_balance_tables
>>> convert_ion_balance_tables(
... "ion_balance/ion_balance.run", "ion_balance.h5", ["C", "O"])
"""
for element in elements:
_ion_balance_convert(run_file, output_file, element)
| 5,350,498 |
def _ensure_dtype_type(value, dtype: DtypeObj):
"""
Ensure that the given value is an instance of the given dtype.
e.g. if out dtype is np.complex64_, we should have an instance of that
as opposed to a python complex object.
Parameters
----------
value : object
dtype : np.dtype or ExtensionDtype
Returns
-------
object
"""
# Start with exceptions in which we do _not_ cast to numpy types
if is_extension_array_dtype(dtype):
return value
elif dtype == np.object_:
return value
elif isna(value):
# e.g. keep np.nan rather than try to cast to np.float32(np.nan)
return value
return dtype.type(value)
| 5,350,499 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.