max_stars_repo_path
stringlengths 3
269
| max_stars_repo_name
stringlengths 4
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.05M
| score
float64 0.23
5.13
| int_score
int64 0
5
|
---|---|---|---|---|---|---|
backend-tests/tests/test_account_suspension.py | drewmoseley/integration | 0 | 11100 | <filename>backend-tests/tests/test_account_suspension.py<gh_stars>0
# Copyright 2020 Northern.tech AS
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
import random
import time
from testutils.api.client import ApiClient
import testutils.api.useradm as useradm
import testutils.api.deviceauth as deviceauth
import testutils.api.tenantadm as tenantadm
import testutils.api.deployments as deployments
from testutils.infra.cli import CliTenantadm, CliUseradm
import testutils.util.crypto
from testutils.common import (
User,
Device,
Tenant,
mongo,
clean_mongo,
create_org,
create_random_authset,
get_device_by_id_data,
change_authset_status,
)
@pytest.yield_fixture(scope="function")
def tenants(clean_mongo):
tenants = []
for n in ["tenant1", "tenant2"]:
username = "user@" + n + ".com"
password = "<PASSWORD>"
tenants.append(create_org(n, username, password))
yield tenants
@pytest.fixture(scope="function")
def tenants_users_devices(tenants, mongo):
uc = ApiClient(useradm.URL_MGMT)
devauthm = ApiClient(deviceauth.URL_MGMT)
devauthd = ApiClient(deviceauth.URL_DEVICES)
for t in tenants:
user = t.users[0]
r = uc.call("POST", useradm.URL_LOGIN, auth=(user.name, user.pwd))
assert r.status_code == 200
utoken = r.text
for _ in range(2):
aset = create_random_authset(devauthd, devauthm, utoken, t.tenant_token)
dev = Device(aset.did, aset.id_data, aset.pubkey, t.tenant_token)
dev.authsets.append(aset)
t.devices.append(dev)
yield tenants
class TestAccountSuspensionEnterprise:
def test_user_cannot_log_in(self, tenants):
tc = ApiClient(tenantadm.URL_INTERNAL)
uc = ApiClient(useradm.URL_MGMT)
for u in tenants[0].users:
r = uc.call("POST", useradm.URL_LOGIN, auth=(u.name, u.pwd))
assert r.status_code == 200
# tenant's users can log in
for u in tenants[0].users:
r = uc.call("POST", useradm.URL_LOGIN, auth=(u.name, u.pwd))
assert r.status_code == 200
assert r.status_code == 200
# suspend tenant
r = tc.call(
"PUT",
tenantadm.URL_INTERNAL_SUSPEND,
tenantadm.req_status("suspended"),
path_params={"tid": tenants[0].id},
)
assert r.status_code == 200
time.sleep(10)
# none of tenant's users can log in
for u in tenants[0].users:
r = uc.call("POST", useradm.URL_LOGIN, auth=(u.name, u.pwd))
assert r.status_code == 401
# but other users still can
for u in tenants[1].users:
r = uc.call("POST", useradm.URL_LOGIN, auth=(u.name, u.pwd))
assert r.status_code == 200
def test_authenticated_user_is_rejected(self, tenants):
tc = ApiClient(tenantadm.URL_INTERNAL)
uc = ApiClient(useradm.URL_MGMT)
dc = ApiClient(deviceauth.URL_MGMT)
u = tenants[0].users[0]
# log in
r = uc.call("POST", useradm.URL_LOGIN, auth=(u.name, u.pwd))
assert r.status_code == 200
token = r.text
# check can access an api
r = dc.with_auth(token).call("GET", deviceauth.URL_MGMT_DEVICES)
assert r.status_code == 200
# suspend tenant
r = tc.call(
"PUT",
tenantadm.URL_INTERNAL_SUSPEND,
tenantadm.req_status("suspended"),
path_params={"tid": tenants[0].id},
)
assert r.status_code == 200
time.sleep(10)
# check token is rejected
r = dc.with_auth(token).call("GET", deviceauth.URL_MGMT_DEVICES)
assert r.status_code == 401
def test_accepted_dev_cant_authenticate(self, tenants_users_devices):
dacd = ApiClient(deviceauth.URL_DEVICES)
devauthm = ApiClient(deviceauth.URL_MGMT)
uc = ApiClient(useradm.URL_MGMT)
tc = ApiClient(tenantadm.URL_INTERNAL)
# accept a dev
device = tenants_users_devices[0].devices[0]
user = tenants_users_devices[0].users[0]
r = uc.call("POST", useradm.URL_LOGIN, auth=(user.name, user.pwd))
assert r.status_code == 200
utoken = r.text
aset = device.authsets[0]
change_authset_status(devauthm, aset.did, aset.id, "accepted", utoken)
# suspend
r = tc.call(
"PUT",
tenantadm.URL_INTERNAL_SUSPEND,
tenantadm.req_status("suspended"),
path_params={"tid": tenants_users_devices[0].id},
)
assert r.status_code == 200
time.sleep(10)
# try requesting auth
body, sighdr = deviceauth.auth_req(
aset.id_data,
aset.pubkey,
aset.privkey,
tenants_users_devices[0].tenant_token,
)
r = dacd.call("POST", deviceauth.URL_AUTH_REQS, body, headers=sighdr)
assert r.status_code == 401
assert r.json()["error"] == "Account suspended"
def test_authenticated_dev_is_rejected(self, tenants_users_devices):
dacd = ApiClient(deviceauth.URL_DEVICES)
devauthm = ApiClient(deviceauth.URL_MGMT)
uc = ApiClient(useradm.URL_MGMT)
tc = ApiClient(tenantadm.URL_INTERNAL)
dc = ApiClient(deployments.URL_DEVICES)
# accept a dev
user = tenants_users_devices[0].users[0]
r = uc.call("POST", useradm.URL_LOGIN, auth=(user.name, user.pwd))
assert r.status_code == 200
utoken = r.text
aset = tenants_users_devices[0].devices[0].authsets[0]
change_authset_status(devauthm, aset.did, aset.id, "accepted", utoken)
# request auth
body, sighdr = deviceauth.auth_req(
aset.id_data,
aset.pubkey,
aset.privkey,
tenants_users_devices[0].tenant_token,
)
r = dacd.call("POST", deviceauth.URL_AUTH_REQS, body, headers=sighdr)
assert r.status_code == 200
dtoken = r.text
# check device can access APIs
r = dc.with_auth(dtoken).call(
"GET",
deployments.URL_NEXT,
qs_params={"device_type": "foo", "artifact_name": "bar"},
)
assert r.status_code == 204
# suspend
r = tc.call(
"PUT",
tenantadm.URL_INTERNAL_SUSPEND,
tenantadm.req_status("suspended"),
path_params={"tid": tenants_users_devices[0].id},
)
assert r.status_code == 200
time.sleep(10)
# check device is rejected
r = dc.with_auth(dtoken).call(
"GET",
deployments.URL_NEXT,
qs_params={"device_type": "foo", "artifact_name": "bar"},
)
assert r.status_code == 401
| 1.796875 | 2 |
src/RepairManager/rules/ecc_reboot_node_rule.py | RichardZhaoW/DLWorkspace | 2 | 11101 | import os, sys
sys.path.append(os.path.dirname(os.path.abspath(__file__)))
import json
import logging
import yaml
import requests
import time
from actions.migrate_job_action import MigrateJobAction
from actions.send_alert_action import SendAlertAction
from actions.reboot_node_action import RebootNodeAction
from actions.uncordon_action import UncordonAction
from datetime import datetime, timedelta, timezone
from rules_abc import Rule
from utils import prometheus_util, k8s_util
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
activity_log = logging.getLogger('activity')
def _extract_node_boot_time_info(response):
node_boot_times = {}
if response is not None and "data" in response:
if "result" in response["data"]:
for m in response["data"]["result"]:
instance = m["metric"]["instance"].split(":")[0]
boot_datetime = datetime.utcfromtimestamp(float(m["value"][1]))
node_boot_times[instance] = boot_datetime
return node_boot_times
def _create_email_for_pause_resume_job(job_id, node_names, job_link, job_owner_email):
message = MIMEMultipart()
message['Subject'] = f'Repair Manager Alert [{job_id} paused/resumed]'
message['To'] = job_owner_email
body = f'''<p>As previously notified, the following node(s) require reboot due to uncorrectable ECC error:</p>
<table border="1">'''
for node in node_names:
body += f'''<tr><td>{node}</td></tr>'''
body += f'''</table><p>
<p> Job <a href="{job_link}">{job_id}</a> has been paused/resumed so node(s) can be repaired.</p>'''
message.attach(MIMEText(body, 'html'))
return message
class EccRebootNodeRule(Rule):
def __init__(self, alert, config):
self.rule = 'ecc_rule'
self.alert = alert
self.config = config
self.ecc_config = self.load_ecc_config()
self.etcd_config = self.load_etcd_config()
self.all_jobs_indexed_by_node = {}
self.nodes_ready_for_action = set()
self.jobs_ready_for_migration = {}
def load_ecc_config(self):
with open('/etc/RepairManager/config/ecc-config.yaml', 'r') as file:
return yaml.safe_load(file)
def load_etcd_config(self):
with open('/etc/RepairManager/config/etcd.conf.yaml', 'r') as file:
return yaml.safe_load(file)
def check_for_rebooted_nodes_and_uncordon(self, dry_run):
# if node has been rebooted since ecc error initially detected,
# uncordon, remove from rule_cache, and mark as resolved
url = f"http://{self.config['prometheus']['ip']}:{self.config['prometheus']['port']}"
query = self.config['prometheus']['node_boot_time_query']
reboot_times_url = prometheus_util.format_url_query(url, query)
uncordon_action = UncordonAction()
try:
response = requests.get(reboot_times_url, timeout=10)
if response:
reboot_data = response.json()
reboot_times = _extract_node_boot_time_info(reboot_data)
bad_nodes = self.alert.get_rule_cache_keys(self.rule)
for node in bad_nodes:
instance = self.alert.get_rule_cache(self.rule, node)["instance"]
time_found_string = self.alert.get_rule_cache(self.rule, node)["time_found"]
time_found_datetime = datetime.strptime(time_found_string, self.config['date_time_format'])
last_reboot_time = reboot_times[instance]
if last_reboot_time > time_found_datetime:
uncordon_action.execute(node, dry_run)
self.alert.remove_from_rule_cache(self.rule, node)
activity_log.info({"action":"marked as resolved from incorrectable ecc error","node":node})
except:
logging.exception(f'Error checking if nodes have rebooted')
def check_for_nodes_with_no_jobs(self):
# if no jobs are running on node, take action on node
bad_nodes = self.alert.get_rule_cache_keys(self.rule)
self.all_jobs_indexed_by_node = k8s_util.get_job_info_indexed_by_node(
nodes=bad_nodes,
portal_url=self.config['portal_url'],
cluster_name=self.config['cluster_name'])
for node in bad_nodes:
node_has_no_jobs = node not in self.all_jobs_indexed_by_node
node_reboot_pending = 'reboot_requested' in self.alert.get_rule_cache(self.rule, node)
if node_has_no_jobs and not node_reboot_pending:
logging.debug(f'node {node} has no running jobs')
self.nodes_ready_for_action.add(node)
def check_if_nodes_are_due_for_reboot(self):
# if configured time has elapsed since initial detection, take action on node
bad_nodes = self.alert.get_rule_cache_keys(self.rule)
for node in bad_nodes:
time_found_string = self.alert.rule_cache[self.rule][node]["time_found"]
time_found_datetime = datetime.strptime(time_found_string, self.config['date_time_format'])
delta = timedelta(days=self.ecc_config.get("days_until_node_reboot", 5))
now = datetime.utcnow()
node_reboot_pending = 'reboot_requested' in self.alert.get_rule_cache(self.rule, node)
if now - time_found_datetime > delta and not node_reboot_pending:
logging.debug(f'Configured time has passed for node {node}')
self.nodes_ready_for_action.add(node)
self.determine_jobs_to_be_migrated(node)
def determine_jobs_to_be_migrated(self, node):
if node in self.all_jobs_indexed_by_node:
jobs_on_node = self.all_jobs_indexed_by_node[node]
for job in jobs_on_node:
job_id = job["job_id"]
if job_id not in self.jobs_ready_for_migration:
self.jobs_ready_for_migration[job_id] = {
"user_name": job["user_name"],
"vc_name": job["vc_name"],
"node_names": [node],
"job_link": job["job_link"]
}
else:
self.jobs_ready_for_migration[job_id]["node_names"].append(node)
def migrate_jobs_and_alert_job_owners(self, dry_run):
alert_action = SendAlertAction(self.alert)
max_attempts = self.ecc_config.get("attempts_for_pause_resume_jobs", 5)
wait_time = self.ecc_config.get("time_sleep_after_pausing", 30)
for job_id in self.jobs_ready_for_migration:
job = self.jobs_ready_for_migration[job_id]
job_owner = job['user_name']
job_owner_email = f"{job_owner}@{self.config['job_owner_email_domain']}"
node_names = job["node_names"]
job_link = job['job_link']
rest_url = self.config["rest_url"]
# migrate all jobs
migrate_job = MigrateJobAction(rest_url, max_attempts)
success = migrate_job.execute(
job_id=job_id,
job_owner_email=job_owner_email,
wait_time=wait_time,
dry_run=dry_run)
# alert job owners
if success:
message = _create_email_for_pause_resume_job(job_id, node_names, job_link, job_owner_email)
alert_dry_run = dry_run or not self.ecc_config['enable_alert_job_owners']
alert_action.execute(
message=message,
dry_run=alert_dry_run,
additional_log={"job_id":job_id,"job_owner":job_owner})
else:
logging.warning(f"Could not pause/resume the following job: {job_id}")
# skip rebooting the node this iteration
# and try again later
for node in node_names:
self.nodes_ready_for_action.remove(node)
def reboot_bad_nodes(self, dry_run):
reboot_action = RebootNodeAction()
for node in self.nodes_ready_for_action:
success = reboot_action.execute(node, self.etcd_config, dry_run)
if success:
# update reboot status so action is not taken again
cache_value = self.alert.get_rule_cache(self.rule, node)
cache_value['reboot_requested'] = datetime.utcnow().strftime(self.config['date_time_format'])
self.alert.update_rule_cache(self.rule, node, cache_value)
def check_status(self):
dry_run = not self.ecc_config["enable_reboot"]
self.check_for_rebooted_nodes_and_uncordon(dry_run)
self.check_for_nodes_with_no_jobs()
self.check_if_nodes_are_due_for_reboot()
return len(self.nodes_ready_for_action) > 0
def take_action(self):
dry_run = not self.ecc_config["enable_reboot"]
self.migrate_jobs_and_alert_job_owners(dry_run)
self.reboot_bad_nodes(dry_run)
| 1.875 | 2 |
work/dib-ipa-element/virtmedia-netconf/ironic-bmc-hardware-manager/src/ironic_bmc_hardware_manager/bmc.py | alexandruavadanii/ipa-deployer | 0 | 11102 | <gh_stars>0
# Copyright 2019 Nokia
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
from ironic_python_agent import hardware
from ironic_python_agent import utils
from oslo_log import log
from oslo_concurrency import processutils
LOG = log.getLogger()
class BMCHardwareManager(hardware.GenericHardwareManager):
HARDWARE_MANAGER_NAME = 'BMCHardwareManager'
HARDWARE_MANAGER_VERSION = '1'
def evaluate_hardware_support(self):
"""Declare level of hardware support provided."""
LOG.info('Running in BMC environment')
return hardware.HardwareSupport.SERVICE_PROVIDER
def list_network_interfaces(self):
network_interfaces_list = []
bmc_mac = self.get_ipmi_info().get('MAC Address', False)
if bmc_mac:
LOG.info("Adding MAC address net interfaces %s", bmc_mac)
bmc_address = self.get_bmc_address()
network_interfaces_list.append(hardware.NetworkInterface(
name="BMC_INTERFACE",
mac_addr=bmc_mac,
ipv4_address=bmc_address,
has_carrier=True,
vendor="BMC",
product="Akraino"))
else:
network_interfaces_list = super(BMCHardwareManager, self).list_network_interfaces()
return network_interfaces_list
def get_ipmi_info(self):
# These modules are rarely loaded automatically
utils.try_execute('modprobe', 'ipmi_msghandler')
utils.try_execute('modprobe', 'ipmi_devintf')
utils.try_execute('modprobe', 'ipmi_si')
try:
out, _e = utils.execute(
"ipmitool lan print", shell=True, attempts=2)
except (processutils.ProcessExecutionError, OSError) as e:
# Not error, because it's normal in virtual environment
LOG.warning("Cannot get BMC info: %s", e)
return {}
info = {}
for line in out.split('\n'):
spl = line.find(':')
if spl == -1:
continue
else:
key = line[0:spl].strip()
if key == '':
continue
info[line[0:spl].strip()] = line[spl+1:].strip()
return info
| 1.875 | 2 |
museum_site/context_processors.py | DrDos0016/z2 | 3 | 11103 | <reponame>DrDos0016/z2
from django.core.cache import cache
from datetime import datetime
from museum_site.models.detail import Detail
from museum_site.models.file import File
from museum_site.constants import TERMS_DATE
from museum_site.common import (
DEBUG, EMAIL_ADDRESS, BOOT_TS, CSS_INCLUDES, UPLOAD_CAP, env_from_host,
qs_sans
)
from museum_site.core.detail_identifiers import *
def museum_global(request):
data = {}
# Debug mode
if DEBUG or request.GET.get("DEBUG") or request.session.get("DEBUG"):
data["debug"] = True
else:
data["debug"] = False
# Server info
data["HOST"] = request.get_host()
data["ENV"] = env_from_host(data["HOST"])
data["PROTOCOL"] = "https" if request.is_secure() else "http"
data["DOMAIN"] = data["PROTOCOL"] + "://" + data["HOST"]
# Server date/time
data["datetime"] = datetime.utcnow()
if data["datetime"].day == 27: # Drupe Day
data["drupe"] = True
if data["datetime"].day == 1 and data["datetime"].month == 4: # April 1st
data["april"] = True
# Common query string modifications
data["qs_sans_page"] = qs_sans(request.GET, "page")
data["qs_sans_view"] = qs_sans(request.GET, "view")
data["qs_sans_both"] = qs_sans(request.GET, ["page", "view"])
# E-mail
data["EMAIL_ADDRESS"] = EMAIL_ADDRESS
data["BOOT_TS"] = BOOT_TS
# CSS Files
data["CSS_INCLUDES"] = CSS_INCLUDES
# Featured Worlds
data["fg"] = File.objects.featured_worlds().order_by("?").first()
if request.GET.get("fgid"):
data["fg"] = File.objects.reach(pk=int(request.GET["fgid"]))
if data["fg"]:
data["fg"].extra_context = {"nozoom": True}
data["fg"] = data["fg"]
# Upload Cap
data["UPLOAD_CAP"] = UPLOAD_CAP
# Queue size
data["UPLOAD_QUEUE_SIZE"] = cache.get("UPLOAD_QUEUE_SIZE", "-")
# User TOS Date checks
if request.user.is_authenticated:
if (
TERMS_DATE > request.user.profile.accepted_tos and
request.method == "GET" and
request.path != "/user/update-tos/"
):
# Force a new login
for key in [
"_auth_user_id", "_auth_user_backend", "_auth_user_hash"
]:
if request.session.get(key):
del request.session[key]
return data
| 2.078125 | 2 |
misago/users/views/avatarserver.py | HenryChenV/iJiangNan | 1 | 11104 | <reponame>HenryChenV/iJiangNan
from django.contrib.auth import get_user_model
from django.contrib.staticfiles.templatetags.staticfiles import static
from django.shortcuts import redirect
from misago.conf import settings
UserModel = get_user_model()
def user_avatar(request, pk, size):
size = int(size)
try:
user = UserModel.objects.get(pk=pk)
except UserModel.DoesNotExist:
return blank_avatar(request)
found_avatar = user.avatars[0]
for avatar in user.avatars:
if avatar['size'] >= size:
found_avatar = avatar
return redirect(found_avatar['url'])
def blank_avatar(request):
return redirect(static(settings.MISAGO_BLANK_AVATAR))
| 2.25 | 2 |
csbdeep/internals/nets.py | papkov/CSBDeep | 2 | 11105 | from __future__ import print_function, unicode_literals, absolute_import, division
from six.moves import range, zip, map, reduce, filter
from keras.layers import Input, Conv2D, Conv3D, Activation, Lambda
from keras.models import Model
from keras.layers.merge import Add, Concatenate
import tensorflow as tf
from keras import backend as K
from .blocks import unet_block, unet_blocks, gaussian_2d
import re
from ..utils import _raise, backend_channels_last
import numpy as np
def custom_unet(input_shape,
last_activation,
n_depth=2,
n_filter_base=16,
kernel_size=(3,3,3),
n_conv_per_depth=2,
activation="relu",
batch_norm=False,
dropout=0.0,
pool_size=(2,2,2),
n_channel_out=1,
residual=False,
prob_out=False,
long_skip=True,
eps_scale=1e-3):
""" TODO """
if last_activation is None:
raise ValueError("last activation has to be given (e.g. 'sigmoid', 'relu')!")
all((s % 2 == 1 for s in kernel_size)) or _raise(ValueError('kernel size should be odd in all dimensions.'))
channel_axis = -1 if backend_channels_last() else 1
n_dim = len(kernel_size)
# TODO: rewrite with conv_block
conv = Conv2D if n_dim == 2 else Conv3D
input = Input(input_shape, name="input")
unet = unet_block(n_depth, n_filter_base, kernel_size, input_planes=input_shape[-1],
activation=activation, dropout=dropout, batch_norm=batch_norm,
n_conv_per_depth=n_conv_per_depth, pool=pool_size, long_skip=long_skip)(input)
final = conv(n_channel_out, (1,)*n_dim, activation='linear')(unet)
if residual:
if not (n_channel_out == input_shape[-1] if backend_channels_last() else n_channel_out == input_shape[0]):
raise ValueError("number of input and output channels must be the same for a residual net.")
final = Add()([final, input])
final = Activation(activation=last_activation)(final)
if prob_out:
scale = conv(n_channel_out, (1,)*n_dim, activation='softplus')(unet)
scale = Lambda(lambda x: x+np.float32(eps_scale))(scale)
final = Concatenate(axis=channel_axis)([final, scale])
return Model(inputs=input, outputs=final)
def uxnet(input_shape,
n_depth=2,
n_filter_base=16,
kernel_size=(3, 3),
n_conv_per_depth=2,
activation="relu",
last_activation='linear',
batch_norm=False,
dropout=0.0,
pool_size=(2, 2),
residual=True,
odd_to_even=False,
shortcut=None,
shared_idx=[],
prob_out=False,
eps_scale=1e-3):
"""
Multi-body U-Net which learns identity by leaving one plane out in each branch
:param input_shape:
:param n_depth:
:param n_filter_base:
:param kernel_size:
:param n_conv_per_depth:
:param activation:
:param last_activation:
:param batch_norm:
:param dropout:
:param pool_size:
:param prob_out:
:param eps_scale:
:return: Model
"""
# TODO: fill params
# TODO: add odd-to-even mode
# Define vars
channel_axis = -1 if backend_channels_last() else 1
n_planes = input_shape[channel_axis]
if n_planes % 2 != 0 and odd_to_even:
raise ValueError('Odd-to-even mode does not support uneven number of planes')
n_dim = len(kernel_size)
conv = Conv2D if n_dim == 2 else Conv3D
# Define functional model
input = Input(shape=input_shape, name='input_main')
# TODO test new implementation and remove old
# Split planes (preserve channel)
input_x = [Lambda(lambda x: x[..., i:i+1], output_shape=(None, None, 1))(input) for i in range(n_planes)]
# We can train either in odd-to-even mode or in LOO mode
if odd_to_even:
# In this mode we stack together odd and even planes, train the net to predict even from odd and vice versa
# input_x_out = [Concatenate(axis=-1)(input_x[j::2]) for j in range(2)]
input_x_out = [Concatenate(axis=-1)(input_x[j::2]) for j in range(1, -1, -1)]
else:
# Concatenate planes back in leave-one-out way
input_x_out = [Concatenate(axis=-1)([plane for i, plane in enumerate(input_x) if i != j]) for j in range(n_planes)]
# if odd_to_even:
# input_x_out = [Lambda(lambda x: x[..., j::2],
# output_shape=(None, None, n_planes // 2),
# name='{}_planes'.format('even' if j == 0 else 'odd'))(input)
# for j in range(1, -1, -1)]
# else:
# # input_x_out = [Lambda(lambda x: x[..., tf.convert_to_tensor([i for i in range(n_planes) if i != j], dtype=tf.int32)],
# # output_shape=(None, None, n_planes-1),
# # name='leave_{}_plane_out'.format(j))(input)
# # for j in range(n_planes)]
#
# input_x_out = [Lambda(lambda x: K.concatenate([x[..., :j], x[..., (j+1):]], axis=-1),
# output_shape=(None, None, n_planes - 1),
# name='leave_{}_plane_out'.format(j))(input)
# for j in range(n_planes)]
# U-Net parameters depend on mode (odd-to-even or LOO)
n_blocks = 2 if odd_to_even else n_planes
input_planes = n_planes // 2 if odd_to_even else n_planes-1
output_planes = n_planes // 2 if odd_to_even else 1
# Create U-Net blocks (by number of planes)
unet_x = unet_blocks(n_blocks=n_blocks, input_planes=input_planes, output_planes=output_planes,
n_depth=n_depth, n_filter_base=n_filter_base, kernel_size=kernel_size,
activation=activation, dropout=dropout, batch_norm=batch_norm,
n_conv_per_depth=n_conv_per_depth, pool=pool_size, shared_idx=shared_idx)
unet_x = [unet(inp_out) for unet, inp_out in zip(unet_x, input_x_out)]
# Version without weight sharing:
# unet_x = [unet_block(n_depth, n_filter_base, kernel_size,
# activation=activation, dropout=dropout, batch_norm=batch_norm,
# n_conv_per_depth=n_conv_per_depth, pool=pool_size,
# prefix='out_{}_'.format(i))(inp_out) for i, inp_out in enumerate(input_x_out)]
# TODO: rewritten for sharing -- remove commented below
# Convolve n_filter_base to 1 as each U-Net predicts a single plane
# unet_x = [conv(1, (1,) * n_dim, activation=activation)(unet) for unet in unet_x]
if residual:
if odd_to_even:
# For residual U-Net sum up output for odd planes with even planes and vice versa
unet_x = [Add()([unet, inp]) for unet, inp in zip(unet_x, input_x[::-1])]
else:
# For residual U-Net sum up output with its neighbor (next for the first plane, previous for the rest
unet_x = [Add()([unet, inp]) for unet, inp in zip(unet_x, [input_x[1]]+input_x[:-1])]
# Concatenate outputs of blocks, should receive (None, None, None, n_planes)
# TODO assert to check shape?
if odd_to_even:
# Split even and odd, assemble them together in the correct order
# TODO tests
unet_even = [Lambda(lambda x: x[..., i:i+1],
output_shape=(None, None, 1),
name='even_{}'.format(i))(unet_x[0]) for i in range(n_planes // 2)]
unet_odd = [Lambda(lambda x: x[..., i:i+1],
output_shape=(None, None, 1),
name='odd_{}'.format(i))(unet_x[1]) for i in range(n_planes // 2)]
unet_x = list(np.array(list(zip(unet_even, unet_odd))).flatten())
unet = Concatenate(axis=-1)(unet_x)
if shortcut is not None:
# We can create a shortcut without long skip connection to prevent noise memorization
if shortcut == 'unet':
shortcut_block = unet_block(long_skip=False, input_planes=n_planes,
n_depth=n_depth, n_filter_base=n_filter_base, kernel_size=kernel_size,
activation=activation, dropout=dropout, batch_norm=batch_norm,
n_conv_per_depth=n_conv_per_depth, pool=pool_size)(input)
shortcut_block = conv(n_planes, (1,) * n_dim, activation='linear', name='shortcut_final_conv')(shortcut_block)
# Or a simple gaussian blur block
elif shortcut == 'gaussian':
shortcut_block = gaussian_2d(n_planes, k=13, s=7)(input)
else:
raise ValueError('Shortcut should be either unet or gaussian')
# TODO add or concatenate?
unet = Add()([unet, shortcut_block])
# unet = Concatenate(axis=-1)([unet, shortcut_unet])
# Final activation layer
final = Activation(activation=last_activation)(unet)
if prob_out:
scale = conv(n_planes, (1,)*n_dim, activation='softplus')(unet)
scale = Lambda(lambda x: x+np.float32(eps_scale))(scale)
final = Concatenate(axis=channel_axis)([final, scale])
return Model(inputs=input, outputs=final)
def common_unet(n_dim=2, n_depth=1, kern_size=3, n_first=16, n_channel_out=1,
residual=True, prob_out=False, long_skip=True, last_activation='linear'):
"""
Construct a common CARE neural net based on U-Net [1]_ and residual learning [2]_
to be used for image restoration/enhancement.
Parameters
----------
n_dim : int
number of image dimensions (2 or 3)
n_depth : int
number of resolution levels of U-Net architecture
kern_size : int
size of convolution filter in all image dimensions
n_first : int
number of convolution filters for first U-Net resolution level (value is doubled after each downsampling operation)
n_channel_out : int
number of channels of the predicted output image
residual : bool
if True, model will internally predict the residual w.r.t. the input (typically better)
requires number of input and output image channels to be equal
prob_out : bool
standard regression (False) or probabilistic prediction (True)
if True, model will predict two values for each input pixel (mean and positive scale value)
last_activation : str
name of activation function for the final output layer
Returns
-------
function
Function to construct the network, which takes as argument the shape of the input image
Example
-------
>>> model = common_unet(2, 1,3,16, 1, True, False)(input_shape)
References
----------
.. [1] <NAME>, <NAME>, <NAME>x, *U-Net: Convolutional Networks for Biomedical Image Segmentation*, MICCAI 2015
.. [2] <NAME>, <NAME>, <NAME>, <NAME>. *Deep Residual Learning for Image Recognition*, CVPR 2016
"""
def _build_this(input_shape):
return custom_unet(input_shape, last_activation, n_depth, n_first, (kern_size,)*n_dim, pool_size=(2,)*n_dim,
n_channel_out=n_channel_out, residual=residual, prob_out=prob_out, long_skip=long_skip)
return _build_this
def common_uxnet(n_dim=2, n_depth=1, kern_size=3, n_first=16,
residual=True, prob_out=False, last_activation='linear',
shared_idx=[], odd_to_even=False, shortcut=None):
def _build_this(input_shape):
return uxnet(input_shape=input_shape, last_activation=last_activation, n_depth=n_depth, n_filter_base=n_first,
kernel_size=(kern_size,)*n_dim, pool_size=(2,)*n_dim,
residual=residual, prob_out=prob_out,
shared_idx=shared_idx, odd_to_even=odd_to_even, shortcut=shortcut)
return _build_this
modelname = re.compile("^(?P<model>resunet|unet)(?P<n_dim>\d)(?P<prob_out>p)?_(?P<n_depth>\d+)_(?P<kern_size>\d+)_(?P<n_first>\d+)(_(?P<n_channel_out>\d+)out)?(_(?P<last_activation>.+)-last)?$")
def common_unet_by_name(model):
r"""Shorthand notation for equivalent use of :func:`common_unet`.
Parameters
----------
model : str
define model to be created via string, which is parsed as a regular expression:
`^(?P<model>resunet|unet)(?P<n_dim>\d)(?P<prob_out>p)?_(?P<n_depth>\d+)_(?P<kern_size>\d+)_(?P<n_first>\d+)(_(?P<n_channel_out>\d+)out)?(_(?P<last_activation>.+)-last)?$`
Returns
-------
function
Calls :func:`common_unet` with the respective parameters.
Raises
------
ValueError
If argument `model` is not a valid string according to the regular expression.
Example
-------
>>> model = common_unet_by_name('resunet2_1_3_16_1out')(input_shape)
>>> # equivalent to: model = common_unet(2, 1,3,16, 1, True, False)(input_shape)
Todo
----
Backslashes in docstring for regexp not rendered correctly.
"""
m = modelname.fullmatch(model)
if m is None:
raise ValueError("model name '%s' unknown, must follow pattern '%s'" % (model, modelname.pattern))
# from pprint import pprint
# pprint(m.groupdict())
options = {k:int(m.group(k)) for k in ['n_depth','n_first','kern_size']}
options['prob_out'] = m.group('prob_out') is not None
options['residual'] = {'unet': False, 'resunet': True}[m.group('model')]
options['n_dim'] = int(m.group('n_dim'))
options['n_channel_out'] = 1 if m.group('n_channel_out') is None else int(m.group('n_channel_out'))
if m.group('last_activation') is not None:
options['last_activation'] = m.group('last_activation')
return common_unet(**options)
def receptive_field_unet(n_depth, kern_size, pool_size=2, n_dim=2, img_size=1024):
"""Receptive field for U-Net model (pre/post for each dimension)."""
x = np.zeros((1,)+(img_size,)*n_dim+(1,))
mid = tuple([s//2 for s in x.shape[1:-1]])
x[(slice(None),) + mid + (slice(None),)] = 1
model = custom_unet (
x.shape[1:],
n_depth=n_depth, kernel_size=[kern_size]*n_dim, pool_size=[pool_size]*n_dim,
n_filter_base=8, activation='linear', last_activation='linear',
)
y = model.predict(x)[0,...,0]
y0 = model.predict(0*x)[0,...,0]
ind = np.where(np.abs(y-y0)>0)
return [(m-np.min(i), np.max(i)-m) for (m, i) in zip(mid, ind)] | 2.171875 | 2 |
src/inf/runtime_data.py | feagi/feagi-core | 11 | 11106 | <filename>src/inf/runtime_data.py
parameters = {}
genome = {}
genome_stats = {}
genome_test_stats = []
brain = {}
cortical_list = []
cortical_map = {}
intercortical_mapping = []
block_dic = {}
upstream_neurons = {}
memory_list = {}
activity_stats = {}
temp_neuron_list = []
original_genome_id = []
fire_list = []
termination_flag = False
variation_counter_actual = 0
exposure_counter_actual = 0
mnist_training = {}
mnist_testing = {}
top_10_utf_memory_neurons = {}
top_10_utf_neurons = {}
v1_members = []
prunning_candidates = set()
genome_id = ""
event_id = '_'
blueprint = ""
comprehension_queue = ''
working_directory = ''
connectome_path = ''
paths = {}
watchdog_queue = ''
exit_condition = False
fcl_queue = ''
proximity_queue = ''
last_ipu_activity = ''
last_alertness_trigger = ''
influxdb = ''
mongodb = ''
running_in_container = False
hardware = ''
gazebo = False
stimulation_data = {}
hw_controller_path = ''
hw_controller = None
opu_pub = None
router_address = None
burst_timer = 1
# rules = ""
brain_is_running = False
# live_mode_status can have modes of idle, learning, testing, tbd
live_mode_status = 'idle'
fcl_history = {}
brain_run_id = ""
burst_detection_list = {}
burst_count = 0
fire_candidate_list = {}
previous_fcl = {}
future_fcl = {}
labeled_image = []
training_neuron_list_utf = {}
training_neuron_list_img = {}
empty_fcl_counter = 0
neuron_mp_list = []
pain_flag = False
cumulative_neighbor_count = 0
time_neuron_update = ''
time_apply_plasticity_ext = ''
plasticity_time_total = None
plasticity_time_total_p1 = None
plasticity_dict = {}
tester_test_stats = {}
# Flags
flag_ready_to_inject_image = False
| 1.765625 | 2 |
scripts/topo_countries.py | taufikhe/Censof-Mini-Project | 0 | 11107 | <reponame>taufikhe/Censof-Mini-Project
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import subprocess
from geonamescache import GeonamesCache
gc = GeonamesCache()
toposrc = '../data/states-provinces.json'
for iso2, country in gc.get_countries().items():
iso3 = country['iso3']
topojson = 'mapshaper -i {0} -filter \'"{1}" == adm0_a3\' -filter-fields fips,name -o format=topojson {1}.json'
subprocess.call(topojson.format(toposrc, iso3), shell=True)
subprocess.call('mv *.json ../src/topojson/countries/', shell=True) | 2.234375 | 2 |
nordb/database/nordic2sql.py | MrCubanfrog/NorDB | 1 | 11108 | <reponame>MrCubanfrog/NorDB
"""
This module contains all information for pushing a NordicEvent object into the database.
Functions and Classes
---------------------
"""
import psycopg2
import os
import re
import datetime
from nordb.core import usernameUtilities
from nordb.database import creationInfo
INSERT_COMMANDS = {
1: (
"INSERT INTO "
"nordic_header_main "
"(origin_time, origin_date, location_model, "
"distance_indicator, event_desc_id, epicenter_latitude, "
"epicenter_longitude, depth, depth_control, "
"locating_indicator, epicenter_reporting_agency, "
"stations_used, rms_time_residuals, magnitude_1, "
"type_of_magnitude_1, magnitude_reporting_agency_1, "
"magnitude_2, type_of_magnitude_2, magnitude_reporting_agency_2, "
"magnitude_3, type_of_magnitude_3, magnitude_reporting_agency_3, "
"event_id) "
"VALUES "
"(%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, "
"%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s) "
"RETURNING "
"id;"
),
2: (
"INSERT INTO "
"nordic_header_macroseismic "
"(description, diastrophism_code, tsunami_code, seiche_code, "
"cultural_effects, unusual_effects, maximum_observed_intensity, "
"maximum_intensity_qualifier, intensity_scale, macroseismic_latitude, "
"macroseismic_longitude, macroseismic_magnitude, type_of_magnitude, "
"logarithm_of_radius, logarithm_of_area_1, bordering_intensity_1, "
"logarithm_of_area_2, bordering_intensity_2, quality_rank, "
"reporting_agency, event_id) "
"VALUES "
"(%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, "
" %s, %s, %s, %s, %s, %s) "
"RETURNING "
"id"
),
3: (
"INSERT INTO "
"nordic_header_comment "
"(h_comment, event_id) "
"VALUES "
"(%s, %s) "
"RETURNING "
"id "
),
5: (
"INSERT INTO "
"nordic_header_error "
"(gap, second_error, epicenter_latitude_error, "
"epicenter_longitude_error, depth_error, "
"magnitude_error, header_id) "
"VALUES "
"(%s, %s, %s, %s, %s, %s, %s)"
"RETURNING "
"id"
),
6: (
"INSERT INTO "
"nordic_header_waveform "
"(waveform_info, event_id) "
"VALUES "
"(%s, %s) "
"RETURNING "
"id "
),
7: (
"INSERT INTO "
"nordic_phase_data "
"(station_code, sp_instrument_type, sp_component, quality_indicator, "
"phase_type, weight, first_motion, observation_time, "
"signal_duration, max_amplitude, max_amplitude_period, back_azimuth, "
"apparent_velocity, signal_to_noise, azimuth_residual, "
"travel_time_residual, location_weight, epicenter_distance, "
"epicenter_to_station_azimuth, event_id) "
"VALUES "
"(%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, "
"%s, %s, %s, %s, %s, %s, %s, %s, %s) "
"RETURNING "
"id "
),
}
def event2Database(nordic_event, solution_type = "O", nordic_filename = None, f_creation_id = None, e_id = -1, privacy_level='public', db_conn = None):
"""
Function that pushes a NordicEvent object to the database
:param NordicEvent nordic_event: Event that will be pushed to the database
:param int solution_type: event type id
:param str nordic_filename: name of the file from which the nordic is read from
:param int f_creation_id: id of the creation_info entry in the database
:param int e_id: id of the event to which this event will be attached to by event_root. If -1 then this event will not be attached to aything.
:param string privacy_level: privacy level of the event in the database
"""
if db_conn is None:
conn = usernameUtilities.log2nordb()
else:
conn = db_conn
if f_creation_id is None:
creation_id = creationInfo.createCreationInfo(privacy_level, conn)
else:
creation_id = f_creation_id
author_id = None
for header in nordic_event.comment_h:
search = re.search(r'\((\w{3})\)', header.h_comment)
if search is not None:
author_id = search.group(0)[1:-1]
if author_id is None:
author_id = '---'
cur = conn.cursor()
try:
cur.execute("SELECT allow_multiple FROM solution_type WHERE type_id = %s", (solution_type,))
ans = cur.fetchone()
if ans is None:
raise Exception("{0} is not a valid solution_type! Either add the event type to the database or use another solution_type".format(solution_type))
allow_multiple = ans[0]
filename_id = -1
cur.execute("SELECT id FROM nordic_file WHERE file_location = %s", (nordic_filename,))
filenameids = cur.fetchone()
if filenameids is not None:
filename_id = filenameids[0]
root_id = -1
if nordic_event.root_id != -1:
root_id = nordic_event.root_id
if e_id >= 0:
cur.execute("SELECT root_id, solution_type FROM nordic_event WHERE id = %s", (e_id,))
try:
root_id, old_solution_type = cur.fetchone()
except:
raise Exception("Given linking even_id does not exist in the database!")
if e_id == -1 and nordic_event.root_id == -1:
cur.execute("INSERT INTO nordic_event_root DEFAULT VALUES RETURNING id;")
root_id = cur.fetchone()[0]
if filename_id == -1:
cur.execute("INSERT INTO nordic_file (file_location) VALUES (%s) RETURNING id", (nordic_filename,))
filename_id = cur.fetchone()[0]
cur.execute("INSERT INTO " +
"nordic_event " +
"(solution_type, root_id, nordic_file_id, author_id, creation_id) " +
"VALUES " +
"(%s, %s, %s, %s, %s) " +
"RETURNING " +
"id",
(solution_type,
root_id,
filename_id,
author_id,
creation_id)
)
event_id = cur.fetchone()[0]
nordic_event.event_id = event_id
if e_id != -1 and solution_type == old_solution_type and not allow_multiple:
cur.execute("UPDATE nordic_event SET solution_type = 'O' WHERE id = %s", (e_id,))
main_header_id = -1
for main in nordic_event.main_h:
main.event_id = event_id
main.h_id = executeCommand( cur,
INSERT_COMMANDS[1],
main.getAsList(),
True)[0][0]
if main.error_h is not None:
main.error_h.header_id = main.h_id
main.error_h.h_id = executeCommand( cur,
INSERT_COMMANDS[5],
main.error_h.getAsList(),
True)[0][0]
for macro in nordic_event.macro_h:
macro.event_id = event_id
macro.h_id = executeCommand(cur,
INSERT_COMMANDS[2],
macro.getAsList(),
True)[0][0]
for comment in nordic_event.comment_h:
comment.event_id = event_id
comment.h_id = executeCommand( cur,
INSERT_COMMANDS[3],
comment.getAsList(),
True)[0][0]
for waveform in nordic_event.waveform_h:
waveform.event_id = event_id
waveform.h_id = executeCommand( cur,
INSERT_COMMANDS[6],
waveform.getAsList(),
True)[0][0]
for phase_data in nordic_event.data:
phase_data.event_id = event_id
d_id = executeCommand( cur,
INSERT_COMMANDS[7],
phase_data.getAsList(),
True)[0][0]
phase_data.d_id = d_id
conn.commit()
except Exception as e:
raise e
finally:
if f_creation_id is None:
creationInfo.deleteCreationInfoIfUnnecessary(creation_id, db_conn=conn)
if db_conn is None:
conn.close()
def executeCommand(cur, command, vals, returnValue):
"""
Function for for executing a command with values and handling exceptions
:param Psycopg.Cursor cur: cursor object from psycopg2 library
:param str command: the sql command string
:param list vals: list of values for the command
:param bool returnValue: boolean values for if the command returns a value
:returns: Values returned by the query or None if returnValue is False
"""
cur.execute(command, vals)
if returnValue:
return cur.fetchall()
else:
return None
| 2.15625 | 2 |
movie_trailer_website/media.py | mradenovic/movie-trailer-website | 0 | 11109 | """This module contains class definitions for storing media files"""
import webbrowser
class Movie():
"""Movie class defines movies.
Attributes:
movie_title (str): Title of the movie
movie_storyline (str): Sort description of the movie
poster_image (str): Url of the poster image
trailer_youtube (str): URL of the Youtube trailer
"""
def __init__(self, movie_title, movie_storyline, poster_image, trailer_youtube):
# type: (object, object, object, object) -> object
"""
Arguments:
movie_title (str): Title of the movie
movie_storyline (str): Sort description of the movie
poster_image (str): Url of the poster image
trailer_youtube (str): URL of the Youtube trailer
"""
self.title = movie_title
self.storyline = movie_storyline
self.poster_image_url = poster_image
self.trailer_youtube_url = trailer_youtube
| 3.703125 | 4 |
201-vmss-bottle-autoscale/workserver.py | kollexy/azure-quickstart-templates | 10 | 11110 | <gh_stars>1-10
# workserver.py - simple HTTP server with a do_work / stop_work API
# GET /do_work activates a worker thread which uses CPU
# GET /stop_work signals worker thread to stop
import math
import socket
import threading
import time
from bottle import route, run
hostname = socket.gethostname()
hostport = 9000
keepworking = False # boolean to switch worker thread on or off
# thread which maximizes CPU usage while the keepWorking global is True
def workerthread():
# outer loop to run while waiting
while (True):
# main loop to thrash the CPI
while (keepworking == True):
for x in range(1, 69):
math.factorial(x)
time.sleep(3)
# start the worker thread
worker_thread = threading.Thread(target=workerthread, args=())
worker_thread.start()
def writebody():
body = '<html><head><title>Work interface - build</title></head>'
body += '<body><h2>Worker interface on ' + hostname + '</h2><ul><h3>'
if keepworking == False:
body += '<br/>Worker thread is not running. <a href="./do_work">Start work</a><br/>'
else:
body += '<br/>Worker thread is running. <a href="./stop_work">Stop work</a><br/>'
body += '<br/>Usage:<br/><br/>/do_work = start worker thread<br/>/stop_work = stop worker thread<br/>'
body += '</h3></ul></body></html>'
return body
@route('/')
def root():
return writebody()
@route('/do_work')
def do_work():
global keepworking
# start worker thread
keepworking = True
return writebody()
@route('/stop_work')
def stop_work():
global keepworking
# stop worker thread
keepworking = False
return writebody()
run(host=hostname, port=hostport)
| 3.328125 | 3 |
year_3/databases_sem1/lab1/cli.py | honchardev/KPI | 0 | 11111 | from maxdb import DB
def runtime_on_any_exception(func):
def decorate(*args, **kwargs):
try:
func(*args, **kwargs)
except:
raise RuntimeError
return decorate
class CLIUtils(object):
DEFAULT_PATH = 'storage.json'
def __init__(self):
self._db = None
self._path = self.DEFAULT_PATH
def run(self, rawcmd):
cmd, *args = rawcmd.split(' ')
if cmd:
try:
self._cmds_cache[cmd](args)
except KeyError:
print('Lab1 does not have command <{0}>'.format(cmd))
except RuntimeError:
print('Incorrect arguments for DB.{0}: <{1}>'.format(cmd, args))
@property
def _cmds_cache(self):
return {
'tables': self._tables,
'all': self._all,
'insert': self._insert,
'get': self._get,
'update': self._update,
'delete': self._delete,
'help': lambda _: print(self._help_msg),
'path': lambda _: print(self._path),
'exit': self._close,
}
@property
def _help_msg(self):
return """LAB1 HELP:
| tables
| print list of tables from current storage.
| all <table> (<table> ...)
| display _all values from specific table.
| all labcondition
| display _all products with price more than 100UAH.
| insert <table> <cnt>
| insert N items to the table.
| is followed by >>>column_name <value>
| get <table> <id>
| get single row specified by id from table.
| update <table> <id>
| udpate table with a new single value.
| is followed by
| >>>with <column> <value> (<column> <value> (...))
| delete <table> <id>
| delete row specified by id from table.
| save <filepath>
| save database using current storage type to specified filepath.
| load <filepath>
| load specific database from file using current storage type.
| help
| display current message.
| path
| display storage file path.
| exit
| exit the program.
"""
def _tables(self, _):
print(self._db.tables())
@runtime_on_any_exception
def _all(self, args):
if 'labcondition' == args[0]:
found_rows = self._db.get(
'Products',
column='price',
cond=lambda p: int(p.value) > 100
)
print('Rows from DB.Products with price>100:')
print('\n'.join(map(str, found_rows)))
else:
for table_name in args:
table_rows = self._db.table(table_name).all_ids()
table_pretty_rows = '\n'.join(map(lambda i: 'ID {0} {1}'.format(*i), table_rows))
print('DB.{0}:\n{1}'.format(table_name, table_pretty_rows))
@runtime_on_any_exception
def _insert(self, args):
table_name, cnt = args
table_to_insert = self._db.table(table_name)
for cur_cnt in range(int(cnt)):
print('Please, enter values for DB.{0} row:'.format(table_name))
row_to_insert = {}
for column_name, column_type in table_to_insert.columns.items():
if column_type == 'fk':
print('Enter Table for FK: fktable=', end='')
fktable = input()
print('Enter Id for FK: fkid=', end='')
fkid = input()
row_to_insert[column_name] = (
{'table': fktable, 'fkid': fkid},
column_type
)
else:
print('Enter {0}, type={1}: {0}='.format(column_name, column_type), end='')
column_value = input()
row_to_insert[column_name] = (column_value, column_type)
table_to_insert.insert(row_to_insert)
@runtime_on_any_exception
def _get(self, args):
table_name, row_idx = args
print('DB.{0} id={1}:'.format(*args))
print(self._db.get(table_name, doc_id=int(row_idx)) or 'Not Found DB.{0}.{1}'.format(*args))
@runtime_on_any_exception
def _update(self, args):
table_name, row_idx = args
table_to_update = self._db.table(table_name)
row_to_update = table_to_update.get(row_id=int(row_idx))
colval_to_update = {}
print('Updating DB.{0}.{1}: {2}'.format(table_name, row_idx, row_to_update))
for column_name, column_type in table_to_update.columns.items():
if column_type == 'fk':
current_fktable = row_to_update[column_name].table
print('Change FKTable from <{0}> to value='.format(current_fktable), end='')
after_fktable = input()
current_fkid = row_to_update[column_name].fk_id
print('Change FKId from <{0}> to value='.format(current_fkid), end='')
after_fkid = input()
colval_to_update[column_name] = {
'table': after_fktable,
'fkid': after_fkid
}
else:
print('Enter value for column {0}, type={1}: {0}='.format(column_name, column_type), end='')
column_value = input()
colval_to_update[column_name] = column_value
table_to_update.update(colval_to_update, [int(row_idx)])
@runtime_on_any_exception
def _delete(self, args):
table_name, row_id = args
print('Deleted item DB.{0}.{1}'.format(*args))
print(self._db.delete(table_name, row_ids=[int(row_id)]) or 'Not Found DB.{0}.{1}'.format(*args))
def _open(self):
"""Create DB instance and preload default models."""
self._db = DB(self._path)
products = self._db.table(
'Products',
columns={'name': 'str', 'price': 'int'}
)
orders = self._db.table(
'Orders',
columns={'product': 'fk', 'client': 'str', 'destination': 'addr'}
)
try:
products.insert_multiple([
{"name": ("product1", "str"), "price": ("50", "int")},
{"name": ("product2", "str"), "price": ("100", "int")},
{"name": ("product3", "str"), "price": ("200", "int")},
])
except:
pass
try:
orders.insert_multiple([
{
"product": ({'table': 'Products', 'fkid': '1'}, 'fk'),
"client": ("honchar", "str"), "destination": ("Kyiv", "addr")
},
{
"product": ({'table': 'Products', 'fkid': '2'}, 'fk'),
"client": ("honchar2", "str"), "destination": ("Kyiv2", "addr")
},
{
"product": ({'table': 'Products', 'fkid': '3'}, 'fk'),
"client": ("honchar3", "str"), "destination": ("Kyiv3", "addr")
},
])
except:
pass
self.run('help', *())
def _close(self, _):
"""Close DB instance routine."""
self._db.close()
def __enter__(self):
self._open()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self._close(None)
| 2.515625 | 3 |
Ar_Script/past/eg_用户信息用户界面.py | archerckk/PyTest | 0 | 11112 | <gh_stars>0
import easygui as g
# judge=1
# def judge_null(tmp):
# if tmp.isspace()or len(tmp)==0:
# return judge==0
#
# while 1:
# user_info=g.multenterbox(title='账号中心',
# msg='【*用户名】为必填项\t【*真实姓名】为必填项\t【*手机号码】为必填项\t【*E-mail】为必填项',
# fields=['*用户名','*真实姓名','固定电话','*手机号码','QQ','*E-mail']
# )
#
# if judge_null(user_info[0])==0:
# g.msgbox(title='提示信息',msg='你输入的用户名为空')
# elif judge_null(user_info[1])==0:
# g.msgbox(title='提示信息',msg='你输入的真实姓名为空')
# elif judge_null(user_info[3])==0:
# g.msgbox(title='提示信息',msg='你输入的手机号码为空')
# elif judge_null(user_info[5])==0:
# g.msgbox(title='提示信息',msg='你输入的E-mail为空')
# else:
# g.msgbox(title='提示信息',msg='恭喜你注册成功')
# break
#参考2
title='用户信息填写'
msg='请真实填写用户信息'
field_list=['*用户名','*真实姓名','固定电话','*手机号码','QQ','*E-mail']
field_value=[]
field_value = g.multenterbox(msg,title,field_list)
while 1:
if field_value==None:
break
err_msg=''
for i in range(len(field_list)):
option=field_list[i].strip()
if field_value[i].strip()==''and option[0]=='*':
err_msg+='【%s】为必填项\n\n'%(field_list[i])
if err_msg=='':
break
field_value = g.multenterbox(err_msg, title, field_list,field_value)
print('用户的资料如下:'+str(field_value)) | 2.453125 | 2 |
examples/pspm_pupil/model_defs.py | fmelinscak/cognibench | 3 | 11113 | from cognibench.models import CNBModel
from cognibench.capabilities import ContinuousAction, ContinuousObservation
from cognibench.continuous import ContinuousSpace
from cognibench.models.wrappers import MatlabWrapperMixin
class PsPMModel(MatlabWrapperMixin, CNBModel, ContinuousAction, ContinuousObservation):
name = "PsPM model"
def __init__(
self, *args, lib_paths, import_base_path, predict_fn, model_spec, **kwargs
):
self.set_action_space(ContinuousSpace())
self.set_observation_space(ContinuousSpace())
def pred(matlab_sess, stimuli):
stimuli_copy = dict(stimuli)
stimuli_copy.update(model_spec)
return matlab_sess.feval(predict_fn, stimuli_copy)
MatlabWrapperMixin.__init__(
self,
lib_paths=lib_paths,
import_base_path=import_base_path,
predict_fn=pred,
)
CNBModel.__init__(self, *args, **kwargs)
| 1.898438 | 2 |
core/layouts/pixel_list.py | TheGentlemanOctopus/oracle | 0 | 11114 | from layout import Layout
class PixelList(Layout):
"""
A simple generic layout, just a list of pixels
"""
def __init__(self, pixels):
"""
pixels is a list of pixel objects
"""
self.pixels = pixels
| 3.28125 | 3 |
test/programytest/clients/restful/test_config.py | minhdc/documented-programy | 0 | 11115 | import unittest
from programy.config.file.yaml_file import YamlConfigurationFile
from programy.clients.restful.config import RestConfiguration
from programy.clients.events.console.config import ConsoleConfiguration
class RestConfigurationTests(unittest.TestCase):
def test_init(self):
yaml = YamlConfigurationFile()
self.assertIsNotNone(yaml)
yaml.load_from_text("""
rest:
host: 127.0.0.1
port: 5000
debug: false
workers: 4
use_api_keys: false
api_key_file: apikeys.txt
""", ConsoleConfiguration(), ".")
rest_config = RestConfiguration("rest")
rest_config.load_configuration(yaml, ".")
self.assertEqual("127.0.0.1", rest_config.host)
self.assertEqual(5000, rest_config.port)
self.assertEqual(False, rest_config.debug)
self.assertEqual(False, rest_config.use_api_keys)
self.assertEqual("apikeys.txt", rest_config.api_key_file)
def test_init_no_values(self):
yaml = YamlConfigurationFile()
self.assertIsNotNone(yaml)
yaml.load_from_text("""
rest:
""", ConsoleConfiguration(), ".")
rest_config = RestConfiguration("rest")
rest_config.load_configuration(yaml, ".")
self.assertEqual("0.0.0.0", rest_config.host)
self.assertEqual(80, rest_config.port)
self.assertEqual(False, rest_config.debug)
self.assertEqual(False, rest_config.use_api_keys)
def test_to_yaml_with_defaults(self):
config = RestConfiguration("rest")
data = {}
config.to_yaml(data, True)
self.assertEquals(data['host'], "0.0.0.0")
self.assertEquals(data['port'], 80)
self.assertEquals(data['debug'], False)
self.assertEquals(data['use_api_keys'], False)
self.assertEquals(data['api_key_file'], './api.keys')
self.assertEquals(data['ssl_cert_file'], './rsa.cert')
self.assertEquals(data['ssl_key_file'], './rsa.keys')
self.assertEquals(data['bot'], 'bot')
self.assertEquals(data['license_keys'], "./config/license.keys")
self.assertEquals(data['bot_selector'], "programy.clients.client.DefaultBotSelector")
self.assertEquals(data['renderer'], "programy.clients.render.text.TextRenderer")
| 2.46875 | 2 |
Assignments/06.py | zexhan17/Data-Structures-and-Algorithms-using-Python | 0 | 11116 | # Write a recursive function to count the number of nodes in a Tree. (first do your self then see code)
def count_nodes(self):
count = 1
left_count = 0
right_count = 0
if self.left:
left_count = self.left.count_nodes()
if self.right:
right_count = self.right.count_nodes()
return count + left_count + right_count
Q # 2:
'''The height of a tree is the maximum number of levels in the tree. So, a tree with just one node has a height of 1. If the root has children which are leaves, the height of the tree is 2.
The height of a TreeNode can be computed recursively using a simple algorithm: The height Of a TreeNode With no children is 1. If it has children the height is: max of height of its two sub-trees + 1.
Write a clean, recursive function for the TreeNode class that calculates the height based on the above statement(first do your self then see code) '''
def get_height(self):
height = 1
left_height = 0
right_height = 0
if self.left:
left_height = self.left.get_height()
if self.right:
right_height = self.right.get_height()
return count + max(left_height, right_height)
print(self.val)
if self.left.val > self.val or self.right.val < self.val
return False
| 4.3125 | 4 |
flask_app.py | mdaeron/clumpycrunch | 0 | 11117 | <filename>flask_app.py
#! /usr/bin/env python3
# from datetime import datetime
# from random import choices
# from string import ascii_lowercase
from flask import Flask, request, render_template, Response, send_file
from flaskext.markdown import Markdown
from D47crunch import D47data, pretty_table, make_csv, smart_type
from D47crunch import __version__ as vD47crunch
import zipfile, io, time
from pylab import *
from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
import base64
from werkzeug.wsgi import FileWrapper
from matplotlib import rcParams
# rcParams['backend'] = 'Agg'
# rcParams['interactive'] = False
rcParams['font.family'] = 'Helvetica'
rcParams['font.sans-serif'] = 'Helvetica'
rcParams['font.size'] = 10
rcParams['mathtext.fontset'] = 'custom'
rcParams['mathtext.rm'] = 'sans'
rcParams['mathtext.bf'] = 'sans:bold'
rcParams['mathtext.it'] = 'sans:italic'
rcParams['mathtext.cal'] = 'sans:italic'
rcParams['mathtext.default'] = 'rm'
rcParams['xtick.major.size'] = 4
rcParams['xtick.major.width'] = 1
rcParams['ytick.major.size'] = 4
rcParams['ytick.major.width'] = 1
rcParams['axes.grid'] = False
rcParams['axes.linewidth'] = 1
rcParams['grid.linewidth'] = .75
rcParams['grid.linestyle'] = '-'
rcParams['grid.alpha'] = .15
rcParams['savefig.dpi'] = 150
__author__ = '<NAME>'
__contact__ = '<EMAIL>'
__copyright__ = 'Copyright (c) 2020 <NAME>'
__license__ = 'Modified BSD License - https://opensource.org/licenses/BSD-3-Clause'
__date__ = '2020-04-22'
__version__ = '2.1.dev2'
rawdata_input_str = '''UID\tSession\tSample\td45\td46\td47\tNominal_d13C_VPDB\tNominal_d18O_VPDB
A01\tSession01\tETH-1\t5.795017\t11.627668\t16.893512\t2.02\t-2.19
A02\tSession01\tIAEA-C1\t6.219070\t11.491072\t17.277490
A03\tSession01\tETH-2\t-6.058681\t-4.817179\t-11.635064\t-10.17\t-18.69
A04\tSession01\tIAEA-C2\t-3.861839\t4.941839\t0.606117
A05\tSession01\tETH-3\t5.543654\t12.052277\t17.405548\t1.71\t-1.78
A06\tSession01\tMERCK\t-35.929352\t-2.087501\t-39.548484
A07\tSession01\tETH-4\t-6.222218\t-5.194170\t-11.944111
A08\tSession01\tETH-2\t-6.067055\t-4.877104\t-11.699265\t-10.17\t-18.69
A09\tSession01\tMERCK\t-35.930739\t-2.080798\t-39.545632
A10\tSession01\tETH-1\t5.788207\t11.559104\t16.801908\t2.02\t-2.19
A11\tSession01\tETH-4\t-6.217508\t-5.221407\t-11.987503
A12\tSession01\tIAEA-C2\t-3.876921\t4.868892\t0.521845
A13\tSession01\tETH-3\t5.539840\t12.013444\t17.368631\t1.71\t-1.78
A14\tSession01\tIAEA-C1\t6.219046\t11.447846\t17.234280
A15\tSession01\tMERCK\t-35.932060\t-2.088659\t-39.531627
A16\tSession01\tETH-3\t5.516658\t11.978320\t17.295740\t1.71\t-1.78
A17\tSession01\tETH-4\t-6.223370\t-5.253980\t-12.025298
A18\tSession01\tETH-2\t-6.069734\t-4.868368\t-11.688559\t-10.17\t-18.69
A19\tSession01\tIAEA-C1\t6.213642\t11.465109\t17.244547
A20\tSession01\tETH-1\t5.789982\t11.535603\t16.789811\t2.02\t-2.19
A21\tSession01\tETH-4\t-6.205703\t-5.144529\t-11.909160
A22\tSession01\tIAEA-C1\t6.212646\t11.406548\t17.187214
A23\tSession01\tETH-3\t5.531413\t11.976697\t17.332700\t1.71\t-1.78
A24\tSession01\tMERCK\t-35.926347\t-2.124579\t-39.582201
A25\tSession01\tETH-1\t5.786979\t11.527864\t16.775547\t2.02\t-2.19
A26\tSession01\tIAEA-C2\t-3.866505\t4.874630\t0.525332
A27\tSession01\tETH-2\t-6.076302\t-4.922424\t-11.753283\t-10.17\t-18.69
A28\tSession01\tIAEA-C2\t-3.878438\t4.818588\t0.467595
A29\tSession01\tETH-3\t5.546458\t12.133931\t17.501646\t1.71\t-1.78
A30\tSession01\tETH-1\t5.802916\t11.642685\t16.904286\t2.02\t-2.19
A31\tSession01\tETH-2\t-6.069274\t-4.847919\t-11.677722\t-10.17\t-18.69
A32\tSession01\tETH-3\t5.523018\t12.007363\t17.362080\t1.71\t-1.78
A33\tSession01\tETH-1\t5.802333\t11.616032\t16.884255\t2.02\t-2.19
A34\tSession01\tETH-3\t5.537375\t12.000263\t17.350856\t1.71\t-1.78
A35\tSession01\tETH-2\t-6.060713\t-4.893088\t-11.728465\t-10.17\t-18.69
A36\tSession01\tETH-3\t5.532342\t11.990022\t17.342273\t1.71\t-1.78
A37\tSession01\tETH-3\t5.533622\t11.980853\t17.342245\t1.71\t-1.78
A38\tSession01\tIAEA-C2\t-3.867587\t4.893554\t0.540404
A39\tSession01\tIAEA-C1\t6.201760\t11.406628\t17.189625
A40\tSession01\tETH-1\t5.802150\t11.563414\t16.836189\t2.02\t-2.19
A41\tSession01\tETH-2\t-6.068598\t-4.897545\t-11.722343\t-10.17\t-18.69
A42\tSession01\tMERCK\t-35.928359\t-2.098440\t-39.577150
A43\tSession01\tETH-4\t-6.219175\t-5.168031\t-11.936923
A44\tSession01\tIAEA-C2\t-3.871671\t4.871517\t0.518290
B01\tSession02\tETH-1\t5.800180\t11.640916\t16.939044\t2.02\t-2.19
B02\tSession02\tETH-1\t5.799584\t11.631297\t16.917656\t2.02\t-2.19
B03\tSession02\tIAEA-C1\t6.225135\t11.512637\t17.335876
B04\tSession02\tETH-2\t-6.030415\t-4.746444\t-11.525506\t-10.17\t-18.69
B05\tSession02\tIAEA-C2\t-3.837017\t4.992780\t0.675292
B06\tSession02\tETH-3\t5.536997\t12.048918\t17.420228\t1.71\t-1.78
B07\tSession02\tMERCK\t-35.928379\t-2.105615\t-39.594573
B08\tSession02\tETH-4\t-6.218801\t-5.185168\t-11.964407
B09\tSession02\tETH-2\t-6.068197\t-4.840037\t-11.686296\t-10.17\t-18.69
B10\tSession02\tMERCK\t-35.926951\t-2.071047\t-39.546767
B11\tSession02\tETH-1\t5.782634\t11.571818\t16.835185\t2.02\t-2.19
B12\tSession02\tETH-2\t-6.070168\t-4.877700\t-11.703876\t-10.17\t-18.69
B13\tSession02\tETH-4\t-6.214873\t-5.190550\t-11.967040
B14\tSession02\tIAEA-C2\t-3.853550\t4.919425\t0.584634
B15\tSession02\tETH-3\t5.522265\t12.011737\t17.368407\t1.71\t-1.78
B16\tSession02\tIAEA-C1\t6.219374\t11.447014\t17.264258
B17\tSession02\tMERCK\t-35.927733\t-2.103033\t-39.603494
B18\tSession02\tETH-3\t5.527002\t11.984062\t17.332660\t1.71\t-1.78
B19\tSession02\tIAEA-C2\t-3.850358\t4.889230\t0.562794
B20\tSession02\tETH-4\t-6.222398\t-5.263817\t-12.033650
B21\tSession02\tETH-3\t5.525478\t11.970096\t17.340498\t1.71\t-1.78
B22\tSession02\tETH-2\t-6.070129\t-4.941487\t-11.773824\t-10.17\t-18.69
B23\tSession02\tIAEA-C1\t6.217001\t11.434152\t17.232308
B24\tSession02\tETH-1\t5.793421\t11.533191\t16.810838\t2.02\t-2.19
B25\tSession02\tETH-4\t-6.217740\t-5.198048\t-11.977179
B26\tSession02\tIAEA-C1\t6.216912\t11.425200\t17.234224
B27\tSession02\tETH-3\t5.522238\t11.932174\t17.286903\t1.71\t-1.78
B28\tSession02\tMERCK\t-35.914404\t-2.133955\t-39.614612
B29\tSession02\tETH-1\t5.784156\t11.517244\t16.786548\t2.02\t-2.19
B30\tSession02\tIAEA-C2\t-3.852750\t4.884339\t0.551587
B31\tSession02\tETH-2\t-6.068631\t-4.924103\t-11.764507\t-10.17\t-18.69
B32\tSession02\tETH-4\t-6.220238\t-5.231375\t-12.009300
B33\tSession02\tIAEA-C2\t-3.855245\t4.866571\t0.534914
B34\tSession02\tETH-1\t5.788790\t11.544306\t16.809117\t2.02\t-2.19
B35\tSession02\tMERCK\t-35.935017\t-2.173682\t-39.664046
B36\tSession02\tETH-3\t5.518320\t11.955048\t17.300668\t1.71\t-1.78
B37\tSession02\tETH-1\t5.790564\t11.521174\t16.781304\t2.02\t-2.19
B38\tSession02\tETH-4\t-6.218809\t-5.205256\t-11.979998
B39\tSession02\tIAEA-C1\t6.204774\t11.391335\t17.181310
B40\tSession02\tETH-2\t-6.076424\t-4.967973\t-11.815466\t-10.17\t-18.69
C01\tSession03\tETH-3\t5.541868\t12.129615\t17.503738\t1.71\t-1.78
C02\tSession03\tETH-3\t5.534395\t12.034601\t17.391274\t1.71\t-1.78
C03\tSession03\tETH-1\t5.797568\t11.563575\t16.857871\t2.02\t-2.19
C04\tSession03\tETH-3\t5.529415\t11.969512\t17.342673\t1.71\t-1.78
C05\tSession03\tETH-1\t5.794026\t11.526540\t16.806934\t2.02\t-2.19
C06\tSession03\tETH-3\t5.527210\t11.937462\t17.294015\t1.71\t-1.78
C07\tSession03\tIAEA-C1\t6.220521\t11.430197\t17.242458
C08\tSession03\tETH-2\t-6.064061\t-4.900852\t-11.732976\t-10.17\t-18.69
C09\tSession03\tIAEA-C2\t-3.846482\t4.889242\t0.558395
C10\tSession03\tETH-1\t5.789644\t11.520663\t16.795837\t2.02\t-2.19
C11\tSession03\tETH-4\t-6.219385\t-5.258604\t-12.036476
C12\tSession03\tMERCK\t-35.936631\t-2.161769\t-39.693775
C13\tSession03\tETH-2\t-6.076357\t-4.939912\t-11.803553\t-10.17\t-18.69
C14\tSession03\tIAEA-C2\t-3.862518\t4.850015\t0.499777
C15\tSession03\tETH-3\t5.515822\t11.928316\t17.287739\t1.71\t-1.78
C16\tSession03\tETH-4\t-6.216625\t-5.252914\t-12.033781
C17\tSession03\tETH-1\t5.792540\t11.537788\t16.801906\t2.02\t-2.19
C18\tSession03\tIAEA-C1\t6.218853\t11.447394\t17.270859
C19\tSession03\tETH-2\t-6.070107\t-4.944520\t-11.806885\t-10.17\t-18.69
C20\tSession03\tMERCK\t-35.935001\t-2.155577\t-39.675070
C21\tSession03\tETH-3\t5.542309\t12.082338\t17.471951\t1.71\t-1.78
C22\tSession03\tETH-4\t-6.209017\t-5.137393\t-11.920935
C23\tSession03\tETH-1\t5.796781\t11.621197\t16.905496\t2.02\t-2.19
C24\tSession03\tMERCK\t-35.926449\t-2.053921\t-39.576918
C25\tSession03\tETH-2\t-6.057158\t-4.797641\t-11.644824\t-10.17\t-18.69
C26\tSession03\tIAEA-C1\t6.221982\t11.501725\t17.321709
C27\tSession03\tETH-3\t5.535162\t12.023486\t17.396560\t1.71\t-1.78
C28\tSession03\tIAEA-C2\t-3.836934\t4.984196\t0.665651
C29\tSession03\tETH-3\t5.531331\t11.991300\t17.353622\t1.71\t-1.78
C30\tSession03\tIAEA-C2\t-3.844008\t4.926554\t0.601156
C31\tSession03\tETH-2\t-6.063163\t-4.907454\t-11.765065\t-10.17\t-18.69
C32\tSession03\tMERCK\t-35.941566\t-2.163022\t-39.704731
C33\tSession03\tETH-3\t5.523894\t11.992718\t17.363902\t1.71\t-1.78
C34\tSession03\tIAEA-C1\t6.220801\t11.462090\t17.282153
C35\tSession03\tETH-1\t5.794369\t11.563017\t16.845673\t2.02\t-2.19
C36\tSession03\tETH-4\t-6.221257\t-5.272969\t-12.055444
C37\tSession03\tETH-3\t5.517832\t11.957180\t17.312487\t1.71\t-1.78
C38\tSession03\tETH-2\t-6.053330\t-4.909476\t-11.740852\t-10.17\t-18.69
C39\tSession03\tIAEA-C1\t6.217139\t11.440085\t17.244787
C40\tSession03\tETH-1\t5.794091\t11.541948\t16.826158\t2.02\t-2.19
C41\tSession03\tIAEA-C2\t-3.803466\t4.894953\t0.624184
C42\tSession03\tETH-3\t5.513788\t11.933062\t17.286883\t1.71\t-1.78
C43\tSession03\tETH-1\t5.793334\t11.569668\t16.844535\t2.02\t-2.19
C44\tSession03\tETH-2\t-6.064928\t-4.935031\t-11.786336\t-10.17\t-18.69
C45\tSession03\tETH-4\t-6.216796\t-5.300373\t-12.075033
C46\tSession03\tETH-3\t5.521772\t11.933713\t17.283775\t1.71\t-1.78
C47\tSession03\tMERCK\t-35.937762\t-2.181553\t-39.739636
D01\tSession04\tETH-4\t-6.218867\t-5.242334\t-12.032129
D02\tSession04\tIAEA-C1\t6.218458\t11.435622\t17.238776
D03\tSession04\tETH-3\t5.522006\t11.946540\t17.300601\t1.71\t-1.78
D04\tSession04\tMERCK\t-35.931765\t-2.175265\t-39.716152
D05\tSession04\tETH-1\t5.786884\t11.560397\t16.823187\t2.02\t-2.19
D06\tSession04\tIAEA-C2\t-3.846071\t4.861980\t0.534465
D07\tSession04\tETH-2\t-6.072653\t-4.917987\t-11.786215\t-10.17\t-18.69
D08\tSession04\tETH-3\t5.516592\t11.923729\t17.275641\t1.71\t-1.78
D09\tSession04\tETH-1\t5.789889\t11.531354\t16.804221\t2.02\t-2.19
D10\tSession04\tIAEA-C2\t-3.845074\t4.865635\t0.546284
D11\tSession04\tETH-1\t5.795006\t11.507829\t16.772751\t2.02\t-2.19
D12\tSession04\tETH-1\t5.791371\t11.540606\t16.822704\t2.02\t-2.19
D13\tSession04\tETH-2\t-6.074029\t-4.937379\t-11.786614\t-10.17\t-18.69
D14\tSession04\tETH-4\t-6.216977\t-5.273352\t-12.057294
D15\tSession04\tIAEA-C1\t6.214304\t11.412869\t17.227005
D16\tSession04\tETH-2\t-6.071021\t-4.966406\t-11.812116\t-10.17\t-18.69
D17\tSession04\tETH-3\t5.543181\t12.065648\t17.455042\t1.71\t-1.78
D18\tSession04\tETH-1\t5.805793\t11.632212\t16.937561\t2.02\t-2.19
D19\tSession04\tIAEA-C1\t6.230425\t11.518038\t17.342943
D20\tSession04\tETH-2\t-6.049292\t-4.811109\t-11.639895\t-10.17\t-18.69
D21\tSession04\tIAEA-C2\t-3.829436\t4.967992\t0.665451
D22\tSession04\tETH-3\t5.538827\t12.064780\t17.438156\t1.71\t-1.78
D23\tSession04\tMERCK\t-35.935604\t-2.092229\t-39.632228
D24\tSession04\tETH-4\t-6.215430\t-5.166894\t-11.939419
D25\tSession04\tETH-2\t-6.068214\t-4.868420\t-11.716099\t-10.17\t-18.69
D26\tSession04\tMERCK\t-35.918898\t-2.041585\t-39.566777
D27\tSession04\tETH-1\t5.786924\t11.584138\t16.861248\t2.02\t-2.19
D28\tSession04\tETH-2\t-6.062115\t-4.820423\t-11.664703\t-10.17\t-18.69
D29\tSession04\tETH-4\t-6.210819\t-5.160997\t-11.943417
D30\tSession04\tIAEA-C2\t-3.842542\t4.937635\t0.603831
D31\tSession04\tETH-3\t5.527648\t11.985083\t17.353603\t1.71\t-1.78
D32\tSession04\tIAEA-C1\t6.221429\t11.481788\t17.284825
D33\tSession04\tMERCK\t-35.922066\t-2.113682\t-39.642962
D34\tSession04\tETH-3\t5.521955\t11.989323\t17.345179\t1.71\t-1.78
D35\tSession04\tIAEA-C2\t-3.838229\t4.937180\t0.617586
D36\tSession04\tETH-4\t-6.215638\t-5.221584\t-11.999819
D37\tSession04\tETH-2\t-6.067508\t-4.893477\t-11.754488\t-10.17\t-18.69
D38\tSession04\tIAEA-C1\t6.214580\t11.440629\t17.254051'''
app = Flask(__name__)
Markdown(app, extensions = [
'markdown.extensions.tables',
# 'pymdownx.magiclink',
# 'pymdownx.betterem',
'pymdownx.highlight',
'pymdownx.tilde',
'pymdownx.caret',
# 'pymdownx.emoji',
# 'pymdownx.tasklist',
'pymdownx.superfences'
])
default_payload = {
'display_results': False,
'error_msg': '',
'rawdata_input_str': rawdata_input_str,
'o17_R13_VPDB': 0.01118,
'o17_R18_VSMOW': 0.0020052,
'o17_R17_VSMOW': 0.00038475,
'o17_lambda': 0.528,
'd13C_stdz_setting': 'd13C_stdz_setting_2pt',
'd18O_stdz_setting': 'd18O_stdz_setting_2pt',
'wg_setting': 'wg_setting_fromsamples',
# 'wg_setting_fromsample_samplename': 'ETH-3',
# 'wg_setting_fromsample_d13C': 1.71,
# 'wg_setting_fromsample_d18O': -1.78,
'acidfrac_setting': 1.008129,
'rf_input_str': '0.258\tETH-1\n0.256\tETH-2\n0.691\tETH-3',
'stdz_method_setting': 'stdz_method_setting_pooled',
}
@app.route('/faq/')
def faq():
with open(f'{app.root_path}/faq.md') as fid:
md = fid.read()
return render_template('faq.html', md = md, vD47crunch = vD47crunch)
@app.route('/readme/')
def readme():
with open(f'{app.root_path}/README.md') as fid:
md = fid.read()
headless_md = md[md.find('\n'):]
return render_template('readme.html', md = headless_md, vD47crunch = vD47crunch)
@app.route('/', methods = ['GET', 'POST'])
def main():
if request.method == 'GET':
return start()
else:
if request.form['action'] == 'Process':
return proceed()
elif request.form['action'] == 'Download zipped results':
return zipresults()
def start():
payload = default_payload.copy()
# payload['token'] = datetime.now().strftime('%y%m%d') + ''.join(choices(ascii_lowercase, k=5))
return render_template('main.html', payload = payload, vD47crunch = vD47crunch)
def proceed():
payload = dict(request.form)
data = D47data()
if payload['d13C_stdz_setting'] == 'd13C_stdz_setting_2pt':
data.d13C_STANDARDIZATION_METHOD = '2pt'
elif payload['d13C_stdz_setting'] == 'd13C_stdz_setting_1pt':
data.d13C_STANDARDIZATION_METHOD = '1pt'
elif payload['d13C_stdz_setting'] == 'd13C_stdz_setting_none':
data.d13C_STANDARDIZATION_METHOD = 'none'
if payload['d18O_stdz_setting'] == 'd18O_stdz_setting_2pt':
data.d18O_STANDARDIZATION_METHOD = '2pt'
elif payload['d18O_stdz_setting'] == 'd18O_stdz_setting_1pt':
data.d18O_STANDARDIZATION_METHOD = '1pt'
elif payload['d18O_stdz_setting'] == 'd18O_stdz_setting_none':
data.d18O_STANDARDIZATION_METHOD = 'none'
anchors = [l.split('\t') for l in payload['rf_input_str'].splitlines() if '\t' in l]
data.Nominal_D47 = {l[1]: float(l[0]) for l in anchors}
try:
data.R13_VPDB = float(payload['o17_R13_VPDB'])
except:
payload['error_msg'] = 'Check the value of R13_VPDB in oxygen-17 correction settings.'
return render_template('main.html', payload = payload, vD47crunch = vD47crunch)
try:
data.R18_VSMOW = float(payload['o17_R18_VSMOW'])
except:
payload['error_msg'] = 'Check the value of R18_VSMOW in oxygen-17 correction settings.'
return render_template('main.html', payload = payload, vD47crunch = vD47crunch)
try:
data.R17_VSMOW = float(payload['o17_R17_VSMOW'])
except:
payload['error_msg'] = 'Check the value of R17_VSMOW in oxygen-17 correction settings.'
return render_template('main.html', payload = payload, vD47crunch = vD47crunch)
try:
data.lambda_17 = float(payload['o17_lambda'])
except:
payload['error_msg'] = 'Check the value of λ in oxygen-17 correction settings.'
return render_template('main.html', payload = payload, vD47crunch = vD47crunch)
data.input(payload['rawdata_input_str'])
# try:
# data.input(payload['rawdata_input_str'], '\t')
# except:
# payload['error_msg'] = 'Raw data input failed for some reason.'
# return render_template('main.html', payload = payload, vD47crunch = vD47crunch)
for r in data:
for k in ['UID', 'Sample', 'Session', 'd45', 'd46', 'd47']:
if k not in r or r[k] == '':
payload['error_msg'] = f'Analysis "{r["UID"]}" is missing field "{k}".'
return render_template('main.html', payload = payload, vD47crunch = vD47crunch)
for k in ['d45', 'd46', 'd47']:
if not isinstance(r[k], (int, float)):
payload['error_msg'] = f'Analysis "{r["UID"]}" should have a valid number for field "{k}".'
return render_template('main.html', payload = payload, vD47crunch = vD47crunch)
if payload['wg_setting'] == 'wg_setting_fromsamples':
# if payload['wg_setting_fromsample_samplename'] == '':
# payload['error_msg'] = 'Empty sample name in WG settings.'
# return render_template('main.html', payload = payload, vD47crunch = vD47crunch)
#
# wg_setting_fromsample_samplename = payload['wg_setting_fromsample_samplename']
#
# for s in data.sessions:
# if wg_setting_fromsample_samplename not in [r['Sample'] for r in data.sessions[s]['data']]:
# payload['error_msg'] = f'Sample name from WG settings ("{wg_setting_fromsample_samplename}") not found in session "{s}".'
# return render_template('main.html', payload = payload, vD47crunch = vD47crunch)
#
# try:
# wg_setting_fromsample_d13C = float(payload['wg_setting_fromsample_d13C'])
# except:
# payload['error_msg'] = 'Check the δ13C value in WG settings.'
# return render_template('main.html', payload = payload, vD47crunch = vD47crunch)
#
# try:
# wg_setting_fromsample_d18O = float(payload['wg_setting_fromsample_d18O'])
# except:
# payload['error_msg'] = 'Check the δ18O value in WG settings.'
# return render_template('main.html', payload = payload, vD47crunch = vD47crunch)
try:
acidfrac = float(payload['acidfrac_setting'])
except:
payload['error_msg'] = 'Check the acid fractionation value.'
return render_template('main.html', payload = payload, vD47crunch = vD47crunch)
if acidfrac == 0:
payload['error_msg'] = 'Acid fractionation value should be greater than zero.'
return render_template('main.html', payload = payload, vD47crunch = vD47crunch)
if payload['wg_setting'] == 'wg_setting_fromsamples':
data.Nominal_d13C_VPDB = {}
data.Nominal_d18O_VPDB = {}
for r in data:
if 'Nominal_d13C_VPDB' in r:
if r['Sample'] in data.Nominal_d13C_VPDB:
if data.Nominal_d13C_VPDB[r['Sample']] != r['Nominal_d13C_VPDB']:
payload['error_msg'] = f"Inconsistent <span class='field'>Nominal_d13C_VPDB</span> value for {r['Sample']} (analysis: {r['UID']})."
return render_template('main.html', payload = payload, vD47crunch = vD47crunch)
else:
data.Nominal_d13C_VPDB[r['Sample']] = r['Nominal_d13C_VPDB']
if 'Nominal_d18O_VPDB' in r:
if r['Sample'] in data.Nominal_d18O_VPDB:
if data.Nominal_d18O_VPDB[r['Sample']] != r['Nominal_d18O_VPDB']:
payload['error_msg'] = f"Inconsistent <span class='field'>Nominal_d18O_VPDB</span> value for {r['Sample']} (analysis {r['UID']})."
return render_template('main.html', payload = payload, vD47crunch = vD47crunch)
else:
data.Nominal_d18O_VPDB[r['Sample']] = r['Nominal_d18O_VPDB']
try:
data.wg(a18_acid = acidfrac)
except:
payload['error_msg'] = 'WG computation failed for some reason.'
return render_template('main.html', payload = payload, vD47crunch = vD47crunch)
if payload['wg_setting'] == 'wg_setting_explicit':
for r in data:
for k in ['d13Cwg_VPDB', 'd18Owg_VSMOW']:
if k not in r:
payload['error_msg'] = f'Analysis "{r["UID"]}" is missing field "{k}".'
return render_template('main.html', payload = payload, vD47crunch = vD47crunch)
try:
data.crunch()
except:
payload['error_msg'] = 'Crunching step failed for some reason.'
return render_template('main.html', payload = payload, vD47crunch = vD47crunch)
method = {
'stdz_method_setting_pooled': 'pooled',
'stdz_method_setting_indep_sessions': 'indep_sessions',
}[payload['stdz_method_setting']]
data.standardize(
consolidate_tables = False,
consolidate_plots = False,
method = method)
csv = 'Session,a,b,c,va,vb,vc,covab,covac,covbc,Xa,Ya,Xu,Yu'
for session in data.sessions:
s = data.sessions[session]
Ga = [r for r in s['data'] if r['Sample'] in data.anchors]
Gu = [r for r in s['data'] if r['Sample'] in data.unknowns]
csv += f"\n{session},{s['a']},{s['b']},{s['c']},{s['CM'][0,0]},{s['CM'][1,1]},{s['CM'][2,2]},{s['CM'][0,1]},{s['CM'][0,2]},{s['CM'][1,2]},{';'.join([str(r['d47']) for r in Ga])},{';'.join([str(r['D47']) for r in Ga])},{';'.join([str(r['d47']) for r in Gu])},{';'.join([str(r['D47']) for r in Gu])}"
# payload['error_msg'] = 'Foo bar.'
# return str(payload).replace(', ','\n')
payload['display_results'] = True
payload['csv_of_sessions'] = csv
summary = data.summary(save_to_file = False, print_out = False)
tosessions = data.table_of_sessions(save_to_file = False, print_out = False)
payload['summary'] = pretty_table(summary, header = 0)
payload['summary_rows'] = len(payload['summary'].splitlines())+2
payload['summary_cols'] = len(payload['summary'].splitlines()[0])
payload['table_of_sessions'] = pretty_table(tosessions)
payload['table_of_sessions_rows'] = len(payload['table_of_sessions'].splitlines())+1
payload['table_of_sessions_cols'] = len(payload['table_of_sessions'].splitlines()[0])
payload['table_of_sessions_csv'] = make_csv(tosessions)
tosamples = data.table_of_samples(save_to_file = False, print_out = False)
payload['table_of_samples'] = pretty_table(tosamples)
payload['table_of_samples'] = payload['table_of_samples'][:] + 'NB: d18O_VSMOW is the composition of the analyzed CO2.'
payload['table_of_samples_rows'] = len(payload['table_of_samples'].splitlines())
payload['table_of_samples_cols'] = len(payload['table_of_samples'].splitlines()[0])+1
payload['table_of_samples_csv'] = make_csv(tosamples)
toanalyses = data.table_of_analyses(save_to_file = False, print_out = False)
payload['table_of_analyses'] = pretty_table(toanalyses)
payload['table_of_analyses_rows'] = len(payload['table_of_analyses'].splitlines())+1
payload['table_of_analyses_cols'] = len(payload['table_of_analyses'].splitlines()[0])
payload['table_of_analyses_csv'] = make_csv(toanalyses)
covars = "\n\nCOVARIANCE BETWEEN SAMPLE Δ47 VALUES:\n\n"
txt = [['Sample #1', 'Sample #2', 'Covariance', 'Correlation']]
unknowns = [k for k in data.unknowns]
for k, s1 in enumerate(unknowns):
for s2 in unknowns[k+1:]:
txt += [[
s1,
s2,
f"{data.sample_D47_covar(s1,s2):.4e}",
f"{data.sample_D47_covar(s1,s2)/data.samples[s1]['SE_D47']/data.samples[s2]['SE_D47']:.6f}",
]]
covars += pretty_table(txt, align = '<<>>')
payload['report'] = f"Report generated on {time.asctime()}\nClumpyCrunch v{__version__} using D47crunch v{vD47crunch}"
payload['report'] += "\n\nOXYGEN-17 CORRECTION PARAMETERS:\n" + pretty_table([['R13_VPDB', 'R18_VSMOW', 'R17_VSMOW', 'lambda_17'], [payload['o17_R13_VPDB'], payload['o17_R18_VSMOW'], payload['o17_R17_VSMOW'], payload['o17_lambda']]], align = '<<<<')
if payload['wg_setting'] == 'wg_setting_fromsample':
payload['report'] += f"\n\nWG compositions constrained by sample {wg_setting_fromsample_samplename} with:"
payload['report'] += f"\n δ13C_VPDB = {wg_setting_fromsample_d13C}"
payload['report'] += f"\n δ18O_VPDB = {wg_setting_fromsample_d18O}"
payload['report'] += f"\n(18O/16O) AFF = {wg_setting_fromsample_acidfrac}\n"
elif payload['wg_setting'] == 'wg_setting_explicit':
payload['report'] += f"\n\nWG compositions specified by user.\n"
payload['report'] += f"\n\nSUMMARY:\n{payload['summary']}"
payload['report'] += f"\n\nSAMPLES:\n{payload['table_of_samples']}\n"
payload['report'] += f"\n\nSESSIONS:\n{payload['table_of_sessions']}"
payload['report'] += f"\n\nANALYSES:\n{payload['table_of_analyses']}"
payload['report'] += covars
txt = payload['csv_of_sessions']
txt = [[x.strip() for x in l.split(',')] for l in txt.splitlines() if l.strip()]
sessions = [{k: smart_type(v) for k,v in zip(txt[0], l)} for l in txt[1:]]
payload['plots'] = []
for s in sessions:
s['Xa'] = [float(x) for x in s['Xa'].split(';')]
s['Ya'] = [float(x) for x in s['Ya'].split(';')]
s['Xu'] = [float(x) for x in s['Xu'].split(';')]
s['Yu'] = [float(x) for x in s['Yu'].split(';')]
for s in sessions:
fig = figure(figsize = (3,3))
subplots_adjust(.2,.15,.95,.9)
plot_session(s)
pngImage = io.BytesIO()
FigureCanvas(fig).print_png(pngImage)
pngImageB64String = "data:image/png;base64,"
pngImageB64String += base64.b64encode(pngImage.getvalue()).decode('utf8')
payload['plots'] += [pngImageB64String]
close(fig)
return(render_template('main.html', payload = payload, vD47crunch = vD47crunch))
# @app.route("/csv/<foo>/<filename>", methods = ['POST'])
# def get_file(foo, filename):
# payload = dict(request.form)
# return Response(
# payload[foo],
# mimetype='text/plain',
# headers={'Content-Disposition': f'attachment;filename="{filename}"'}
# )
def normalization_error(a, b, c, CM, d47, D47):
V = array([-D47, -d47, -1]) /a
return float((V @ CM @ V.T) ** .5)
def zipresults():
payload = dict(request.form)
# return str(payload).replace(', ','\n')
mem = io.BytesIO()
with zipfile.ZipFile(mem, 'w') as zf:
for k, filename in [
('report', 'report.txt'),
('table_of_sessions_csv', 'csv/sessions.csv'),
('table_of_samples_csv', 'csv/samples.csv'),
('table_of_analyses_csv', 'csv/analyses.csv'),
]:
data = zipfile.ZipInfo(f'/{filename}')
data.date_time = time.localtime(time.time())[:6]
data.compress_type = zipfile.ZIP_DEFLATED
zf.writestr(data, payload[k])
txt = payload['csv_of_sessions']
txt = [[x.strip() for x in l.split(',')] for l in txt.splitlines() if l.strip()]
sessions = [{k: smart_type(v) for k,v in zip(txt[0], l)} for l in txt[1:]]
for s in sessions:
s['Xa'] = [float(x) for x in s['Xa'].split(';')]
s['Ya'] = [float(x) for x in s['Ya'].split(';')]
s['Xu'] = [float(x) for x in s['Xu'].split(';')]
s['Yu'] = [float(x) for x in s['Yu'].split(';')]
X = [x for s in sessions for k in ['Xa', 'Xu'] for x in s[k]]
Y = [y for s in sessions for k in ['Ya', 'Yu'] for y in s[k]]
xmin, xmax, ymin, ymax = [min(X), max(X), min(Y), max(Y)]
dx = xmax - xmin
dy = ymax - ymin
xmin -= dx/20
xmax += dx/20
ymin -= dy/20
ymax += dy/20
for s in sessions:
fig = figure(figsize = (5,5))
subplots_adjust(.15,.15,.9,.9)
plot_session(s, [xmin, xmax, ymin, ymax])
buf = io.BytesIO()
savefig(buf, format = 'pdf')
close(fig)
zf.writestr(f"/sessions/{s['Session']}.pdf", buf.getvalue())
mem.seek(0)
response = Response(FileWrapper(mem), mimetype="application/zip", direct_passthrough=True)
response.headers['Content-Disposition'] = 'attachment; filename=ClumpyCrunch.zip'
return response
def plot_session(s, axislimits = []):
kw = dict(mfc = 'None', mec = (.9,0,0), mew = .75, ms = 4)
plot(s['Xa'], s['Ya'], 'x', **kw)
kw['mec'] = 'k'
plot(s['Xu'], s['Yu'], 'x', **kw)
if axislimits:
xmin, xmax, ymin, ymax = axislimits
else:
xmin, xmax, ymin, ymax = axis()
XI,YI = meshgrid(linspace(xmin, xmax), linspace(ymin, ymax))
CM = array([[s['va'], s['covab'], s['covac']], [s['covab'], s['vb'], s['covbc']], [s['covac'], s['covbc'], s['vc']]])
a, b, c = s['a'], s['b'], s['c']
SI = array([[normalization_error(a, b, c, CM, xi, yi) for xi in XI[0,:]] for yi in YI[:,0]])
rng = SI.max() - SI.min()
if rng <= 0.01:
cinterval = 0.001
elif rng <= 0.03:
cinterval = 0.004
elif rng <= 0.1:
cinterval = 0.01
elif rng <= 0.3:
cinterval = 0.03
else:
cinterval = 0.1
cval = [ceil(SI.min() / .001) * .001 + k * cinterval for k in range(int(ceil((SI.max() - SI.min()) / cinterval)))]
cs = contour(XI, YI, SI, cval, colors = 'r', alpha = .5, linewidths = .75)
clabel(cs)
axis([xmin, xmax, ymin, ymax])
xlabel('δ$_{47}$ (‰ WG)')
ylabel('Δ$_{47}$ (‰)')
title(s['Session'])
grid(alpha = .15)
| 2.15625 | 2 |
rubric_sampling/experiments/train_rnn.py | YangAzure/rubric-sampling-public | 20 | 11118 | r"""Train a neural network to predict feedback for a program string."""
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
import os
import sys
import random
import numpy as np
from tqdm import tqdm
import torch
import torch.optim as optim
import torch.utils.data as data
import torch.nn.functional as F
from .models import ProgramRNN
from .utils import AverageMeter, save_checkpoint, merge_args_with_dict
from .datasets import load_dataset
from .config import default_hyperparams
from .rubric_utils.load_params import get_label_params, get_max_seq_len
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('dataset', type=str, help='annotated|synthetic')
parser.add_argument('problem_id', type=int, help='1|2|3|4|5|6|7|8')
parser.add_argument('out_dir', type=str, help='where to save outputs')
parser.add_argument('--cuda', action='store_true', default=False,
help='enables CUDA training [default: False]')
args = parser.parse_args()
args.cuda = args.cuda and torch.cuda.is_available()
merge_args_with_dict(args, default_hyperparams)
device = torch.device('cuda' if args.cuda else 'cpu')
args.max_seq_len = get_max_seq_len(args.problem_id)
label_dim, _, _, _, _ = get_label_params(args.problem_id)
# reproducibility
torch.manual_seed(args.seed)
np.random.seed(args.seed)
if not os.path.isdir(args.out_dir):
os.makedirs(args.out_dir)
train_dataset = load_dataset( args.dataset, args.problem_id, 'train', vocab=None,
max_seq_len=args.max_seq_len, min_occ=args.min_occ)
val_dataset = load_dataset( args.dataset, args.problem_id, 'val', vocab=train_dataset.vocab,
max_seq_len=args.max_seq_len, min_occ=args.min_occ)
test_dataset = load_dataset(args.dataset, args.problem_id, 'test', vocab=train_dataset.vocab,
max_seq_len=args.max_seq_len, min_occ=args.min_occ)
train_loader = data.DataLoader(train_dataset, batch_size=args.batch_size, shuffle=True)
val_loader = data.DataLoader(val_dataset, batch_size=args.batch_size, shuffle=False)
test_loader = data.DataLoader(test_dataset, batch_size=args.batch_size, shuffle=False)
model = ProgramRNN( args.z_dim, label_dim, train_dataset.vocab_size, embedding_dim=args.embedding_dim,
hidden_dim=args.hidden_dim, num_layers=args.num_layers)
model = model.to(device)
optimizer = optim.Adam(model.parameters(), lr=args.lr)
def train(epoch):
model.train()
loss_meter = AverageMeter()
acc_meter = AverageMeter()
for batch_idx, (seq, length, label, _) in enumerate(train_loader):
assert label is not None
batch_size = len(seq)
seq = seq.to(device)
length = length.to(device)
label = label.to(device)
optimizer.zero_grad()
label_out = model(seq, length)
loss = F.binary_cross_entropy(label_out, label)
loss.backward()
loss_meter.update(loss.item(), batch_size)
optimizer.step()
acc = np.mean(torch.round(label_out).detach().numpy() == label.detach().numpy())
acc_meter.update(acc, batch_size)
if batch_idx % args.log_interval == 0:
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}\tAccuracy: {:.4f}'.format(
epoch, batch_idx * batch_size, len(train_loader.dataset),
100. * batch_idx / len(train_loader), loss_meter.avg,
acc_meter.avg))
print('====> Epoch: {}\tLoss: {:.4f}\tAccuracy: {:.4f}'.format(
epoch, loss_meter.avg, acc_meter.avg))
return loss_meter.avg, acc_meter.avg
def test(epoch, loader, name='Test'):
model.eval()
loss_meter = AverageMeter()
acc_meter = AverageMeter()
with torch.no_grad():
with tqdm(total=len(loader)) as pbar:
for (seq, length, label, _) in loader:
assert label is not None
batch_size = len(seq)
seq = seq.to(device)
length = length.to(device)
label = label.to(device)
label_out = model(seq, length)
loss = F.binary_cross_entropy(label_out, label)
loss_meter.update(loss.item(), batch_size)
acc = np.mean(torch.round(label_out.cpu()).numpy() == label.cpu().numpy())
acc_meter.update(acc, batch_size)
pbar.update()
print('====> {} Epoch: {}\tLoss: {:.4f}\tAccuracy: {:.4f}'.format(
name, epoch, loss_meter.avg, acc_meter.avg))
return loss_meter.avg, acc_meter.avg
best_loss = sys.maxint
track_train_loss = np.zeros(args.epochs)
track_val_loss = np.zeros(args.epochs)
track_test_loss = np.zeros(args.epochs)
track_train_acc = np.zeros(args.epochs)
track_val_acc = np.zeros(args.epochs)
track_test_acc = np.zeros(args.epochs)
for epoch in xrange(1, args.epochs + 1):
train_loss, train_acc = train(epoch)
val_loss, val_acc = test(epoch, val_loader, name='Val')
test_loss, test_acc = test(epoch, test_loader, name='Test')
track_train_loss[epoch - 1] = train_loss
track_val_loss[epoch - 1] = val_loss
track_test_loss[epoch - 1] = test_loss
track_train_acc[epoch - 1] = train_acc
track_val_acc[epoch - 1] = val_acc
track_test_acc[epoch - 1] = test_acc
is_best = val_loss < best_loss
best_loss = min(val_loss, best_loss)
save_checkpoint({
'state_dict': model.state_dict(),
'cmd_line_args': args,
'vocab': train_dataset.vocab,
}, is_best, folder=args.out_dir)
np.save(os.path.join(args.out_dir, 'train_loss.npy'), track_train_loss)
np.save(os.path.join(args.out_dir, 'val_loss.npy'), track_val_loss)
np.save(os.path.join(args.out_dir, 'test_loss.npy'), track_test_loss)
np.save(os.path.join(args.out_dir, 'train_acc.npy'), track_train_acc)
np.save(os.path.join(args.out_dir, 'val_acc.npy'), track_val_acc)
np.save(os.path.join(args.out_dir, 'test_acc.npy'), track_test_acc)
| 2.34375 | 2 |
python/code.py | Warabhi/ga-learner-dsmp-repo | 0 | 11119 | <filename>python/code.py
# --------------
# Code starts here
class_1 = ['<NAME>' , '<NAME>' , '<NAME>' , '<NAME>']
class_2 = ['<NAME>' , '<NAME>' , '<NAME>']
new_class = class_1 + class_2
print(new_class)
new_class.append('<NAME>')
print(new_class)
del new_class[5]
print(new_class)
# Code ends here
# --------------
# Code starts here
courses = {'Math': 65 , 'English': 70 , 'History': 80 , 'French': 70 , 'Science': 60}
total = sum(courses.values())
print(total)
percentage = total/500*100
print(percentage)
# Code ends here
# --------------
# Code starts here
mathematics = { '<NAME>' : 78, '<NAME>' : 95, '<NAME>' : 65 ,
'<NAME>' : 50 , '<NAME>' : 70 , '<NAME>' : 66 , '<NAME>' : 75}
max_marks_scored = max(mathematics, key=mathematics.get)
print(max_marks_scored)
topper = max_marks_scored
print(topper)
# Code ends here
# --------------
# Given string
topper = ' andrew ng'
# Code starts here
first_name = topper.split()[0]
print(first_name)
last_name = topper.split()[1]
print(last_name)
full_name = last_name +' '+ first_name
print(full_name)
certificate_name = full_name.upper()
print(certificate_name)
# Code ends here
| 3.9375 | 4 |
checklog.py | mtibbett67/pymodules | 0 | 11120 | <reponame>mtibbett67/pymodules<gh_stars>0
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
NAME:
checklog.py
DESCRIPTION:
This script checks the tail of the log file and lists the disk space
CREATED:
Sun Mar 15 22:53:54 2015
VERSION:
1.0
AUTHOR:
<NAME>
AUTHOR_EMAIL:
<EMAIL>
URL:
N/A
DOWNLOAD_URL:
N/A
INSTALL_REQUIRES:
[]
PACKAGES:
[]
SCRIPTS:
[]
'''
# Standard library imports
import os
import sys
import subprocess
# Related third party imports
# Local application/library specific imports
# Console colors
W = '\033[0m' # white (normal)
R = '\033[31m' # red
G = '\033[32m' # green
O = '\033[33m' # orange
B = '\033[34m' # blue
P = '\033[35m' # purple
C = '\033[36m' # cyan
GR = '\033[37m' # gray
# Section formats
SEPARATOR = B + '=' * 80 + W
NL = '\n'
# Clear the terminal
os.system('clear')
# Check for root or sudo. Remove if not needed.
UID = os.getuid()
if UID != 0:
print R + ' [!]' + O + ' ERROR:' + G + ' sysupdate' + O + \
' must be run as ' + R + 'root' + W
# print R + ' [!]' + O + ' login as root (' + W + 'su root' + O + ') \
# or try ' + W + 'sudo ./wifite.py' + W
os.execvp('sudo', ['sudo'] + sys.argv)
else:
print NL
print G + 'You are running this script as ' + R + 'root' + W
print NL + SEPARATOR + NL
LOG = ['tail', '/var/log/messages']
DISK = ['df', '-h']
def check(arg1, arg2):
'''Call subprocess to check logs'''
print G + arg1 + W + NL
item = subprocess.check_output(arg2)
#subprocess.call(arg2)
print item + NL + SEPARATOR + NL
check('Runing tail on messages', LOG)
check('Disk usage', DISK)
| 2.765625 | 3 |
algorithms/implementation/minimum_distances.py | avenet/hackerrank | 0 | 11121 | <filename>algorithms/implementation/minimum_distances.py
n = int(input().strip())
items = [
int(A_temp)
for A_temp
in input().strip().split(' ')
]
items_map = {}
result = None
for i, item in enumerate(items):
if item not in items_map:
items_map[item] = [i]
else:
items_map[item].append(i)
for _, item_indexes in items_map.items():
items_indexes_length = len(item_indexes)
if items_indexes_length > 1:
for i in range(items_indexes_length):
for j in range(i + 1, items_indexes_length):
diff = item_indexes[j] - item_indexes[i]
if result is None:
result = diff
elif diff < result:
result = diff
print(result if result else -1)
| 3.5625 | 4 |
spore/spore.py | pavankkota/SPoRe | 1 | 11122 | """
Sparse Poisson Recovery (SPoRe) module for solving Multiple Measurement Vector
problem with Poisson signals (MMVP) by batch stochastic gradient ascent and
Monte Carlo integration
Authors: <NAME>, <NAME>
Reference:
[1] <NAME>, <NAME>, <NAME>, and <NAME>, "Extreme Compressed
Sensing of Poisson Rates from Multiple Measurements," Mar. 2021.
arXiv ID:
"""
from abc import ABC, abstractmethod
import numpy as np
import time
import pdb
from .mmv_models import FwdModelGroup, SPoReFwdModelGroup
class SPoRe(object):
def __init__(self, N, fwdmodel, sampler, batch_size=100, step_size=1e-1,
min_lambda=1e-3, pyx_min=0, grad_scale=5e-2, conv_rel=1e-2, conv_window=500,
patience = 3000, step_cut = 0.1, max_cut = 5, max_iter=int(1e4)):
"""
Parameters
----------
N: int
Dimension of signals
fwdmodel : object
instance of a mmv_models.FwdModel class. Object should contain any necessary
model-specific parameters as attributes
sampler : object
instance of a spore.Sampler class that has a .sample method returning S samples
of signals X from a probability distribution (N, S, :)
batch_size: int
Number of columns of Y to randomly draw and evaluate for each iteration
step_size: float
initial learning rate for stochastic gradient ascent
min_lambda: float
Lower bound on individual entries of lambda. \epsilon in [1]
pyx_min: float (default 0, i.e. no effect)
A batch element y_b is only included in analysis if max(p(y_b|x_s))
among sampled x's (x_s) is greater than this value. Prevents steps
in the direction of junk measurements (e.g. a corrupted siganl) OR
if samples are not good for the y_b
[1] used 0 for all experiments
grad_scale: float
Maximum l2-norm of gradient step that can be taken. Any step larger
is rescaled to have this l2-norm
conv_rel: float (0,1)
Fractional change in the average of lambda estimate in two conv_windows,
below which iteration stops
conv_window: int
Number of iterations over which to evaluate moving averages. Nonoverlapping windows
are compared. E.g. if conv_window = 500, then 999-500 iterations ago is averaged
and compared to 499-current average.
patience: int
Number of iterations to wait for improvement in log likelihood before
cutting step size
step_cut: float (0, 1)
Fraction to cut step size by if patience exceeded
max_cut: int
Maximum number of times step size can be cut by step_cut before
quitting
max_iter: int
Maximum iteration budget. SPoRe terminates regardless of convergence status
"""
self.N = N
if isinstance(fwdmodel, FwdModelGroup):
self.fwdmodel_group = fwdmodel
else:
self.fwdmodel_group = FwdModelGroup([fwdmodel])
self.sampler = sampler
self.batch_size = batch_size
self.step_size = step_size
self.min_lambda = min_lambda
self.pyx_min = pyx_min
self.grad_scale = grad_scale
self.conv_rel = conv_rel
self.conv_window = conv_window
self.patience = patience
self.step_cut = step_cut
self.max_cut = max_cut
self.max_iter = max_iter
def recover(self, Y, S, lam0=None, randinit_offset=1e-1, seed=None, verbose=True):
"""Recover poisson rate parameters given
Parameters
----------
Y : array_like
Observations.
Shape ``(M, D)``.
S : int
Number of samples to draw for each Y.
lam0: array_like
Initial value for estimated lambda. If None, lam0 = randinit_offset
Shape: ``(N,)
randinit_offset: float
Random initializations (if lam0 not provided) are drawn.
Offset sets a minimum value for any particular entry of lambda0
seed: int or None
Initial seed for before iterations begin
verbose: boolean
If True, prints some information every <self.conv_window> iterations
Returns
-------
lam_S : numpy array
Recovered estimate of lambda
Shape ``(N,)``
includeCheck: numpy array
Indices of observations that never influenced a gradient step. These
observations can be considered 'unexplained' by the recovered lambda.
Can be indicative of a corrupted measurement.
Not used in [1]
lamHistory: numpy array
History of lambda estimates at each iteration
Shape ``(N, iters)`` (for iters evaluated until convergence)
llHistory: numpy array
History of median log-likelihood estimates at each iteration
Shape ``(iters,)``
"""
if isinstance(self.fwdmodel_group, SPoReFwdModelGroup):
fwdmodel = self.fwdmodel_group
else:
_, D = Y.shape
group_indices = None
fwdmodel = SPoReFwdModelGroup(self.fwdmodel_group, group_indices)
M, D = np.shape(Y)
np.random.seed(seed)
lamHistory = np.zeros((self.N, self.max_iter))
llHistory = np.zeros((self.max_iter))
if lam0 is None:
lam0 = np.ones(self.N)*randinit_offset
lamHat = lam0
# Remaining false elements at convergence => unexplained measurements. Not used in [1]
includeCheck = np.zeros(D) > np.ones(D)
refIter = 0
bestIter = 0
stepTemp = self.step_size
numCut = 0
t0 = time.time()
stepIter = []
# Batch gradient ascent
for i in range(self.max_iter):
# Get batch elements and sample for each
batchInds = np.random.choice(D, self.batch_size)
Y_batch = Y[:,batchInds]
self.sampler._lam = lamHat
X_sample = self.sampler.sample(Y_batch, S)
pyx = fwdmodel.py_x_batch(Y_batch[:, None, :], X_sample, batchInds) # (S, B) array
# Don't eval batch elements whose p(y|x) is too low for all samples. In [1] (self.pyx_min=0)
batchInclude = np.max(pyx, axis=0) > self.pyx_min
includeCheck[batchInds[batchInclude]] = True
pyx = pyx[:, batchInclude]
if np.shape(X_sample)[2] > 1:
X_sample = X_sample[:,:,batchInclude]
pqRatio = self.sampler.pq_ratio(X_sample)
probsAgg = pyx * pqRatio # (S, B) array, aggregate value of pdf computations
# Evaluate loss and gradient
llHistory[i] = self.log_likelihood(probsAgg)
grad = self.gradient(X_sample, lamHat, probsAgg)
step = stepTemp * grad
# Necessary to make more robust against numerical issue described in [1]
if not np.all(grad==np.zeros(self.N)): # at least some sampled X informs a gradient step
stepIter.append(i) # track when steps are taken
if np.any( (lamHat+step) >self.min_lambda): #if at least one index is stepped meaningfully
# Rescale according to the indices still in question
normCheck = np.linalg.norm(step[ (lamHat+step) >self.min_lambda])
if normCheck > self.grad_scale :
step = (self.grad_scale / normCheck) * step
else: # step is likely too big, period.
if np.linalg.norm(step) > self.grad_scale : # Rescale based on whole step vector
step = (self.grad_scale / np.linalg.norm(step)) * step
#if steps have been taken at least 1/2 the time, recent conv_window worth of iterations likely to have been taken
# hypothesize that steps may not be taken occasionally at first as lamHat is a bad estimate, but will be taken with increasing regularity
enoughSteps = np.sum(np.array(stepIter) > (i - self.conv_window*2)) > self.conv_window
lamHat += step
lamHat[lamHat < self.min_lambda] = self.min_lambda
lamHistory[:, i] = lamHat
# Check convergence
if (i+1) >= (self.conv_window*2):
lam1 = np.mean(lamHistory[:, (i-2*self.conv_window+1):(i-self.conv_window+1)], axis=1) # e.g [:, 0:500] if conv_window is 500
lam2 = np.mean(lamHistory[:, (i-self.conv_window+1):(i+1)], axis=1) # e.g. [:, 500:] if i is 999, conv_window is 500
pctChange = np.linalg.norm(lam2 - lam1, ord=1) / np.linalg.norm(lam1, ord=1)
if pctChange < self.conv_rel and enoughSteps:
break
# Cut learning rate (if necessary)
if llHistory[i] >= llHistory[bestIter] or np.isnan(llHistory[bestIter]):
bestIter = i
refIter = i
if i - refIter >= self.patience and enoughSteps:
stepTemp = self.step_cut * stepTemp
refIter = i
numCut += 1
if verbose is True:
print('Step size cut ' + str(numCut) + ' times')
if numCut >= self.max_cut:
break
# Report:
if verbose is True and (i+1)>=(self.conv_window*2) and (i+1) % self.conv_window == 0:
print('Iteration #: ' + str(i+1) + '; l1-norm change: ' + str(pctChange) + \
'; recovery time: ' + str(round(time.time()-t0, 2)) + ' seconds')
# average over last conv_window iterations' values
lamHat = np.mean(lamHistory[:, (i-self.conv_window+1):(i+1)], axis=1)
return lamHat, includeCheck, lamHistory, llHistory
def log_likelihood(self, p_agg):
r"""Compute log-likelihood and return the ~average (median/B).
Median used because of high variability of individual batch draws.
Outlier resistance important if using log-likelihood to inform convergence
Parameters
----------
p_agg: array_like
element-wise product of p(y|x) (an (S,B,) array) and
pqRatio (an (S,B) array or an (S,) array if sample_same=True)
Explicitly: p_agg for any element is p(y_b|x_s) * p(x_s|\lamHat) / Q(x_s)
where Q is the sampling function
Shape: (S, B,)
Returns
-------
ll: average log likelihood of p(y_b|\lambda)
"""
S, B = np.shape(p_agg)
likelihood = (1/S) * np.sum(p_agg, axis=0) # of all batch elements
ll = np.median(np.log(likelihood)) / B
return ll
def gradient(self, X_s, lamHat, p_agg):
"""
Compute MC gradients based on pre-computed measurement/sampling likelihoods
p(y|x), Q(x_s) (p_agg) and Poisson likelihoods (samples X_s, current estimate lamHat)
Parameters
----------
X_s : array_like
Sampled X's
Shape (N, S, B) or (N, S, 1)
lamHat : array_like
current estimate of lambda. Shape (N,)
p_agg : see log_likelihood()
Returns
-------
grad: array_like
batch gradient
Shape: (N,)
"""
_, _, sameSamples = np.shape(X_s) #same samples over each iteration
S, B = np.shape(p_agg)
grad = np.zeros((self.N,))
#Note - it's ok if grad = 0 if all sumChecks fail - equates to waiting
#until next iter
sums = np.sum(p_agg, axis=0)
sumCheck = sums !=0
if np.size(sumCheck) != 0: #else just return zero vector
if sameSamples == 1:
xOverL = X_s[:,:,0] / lamHat[:, None] #(N, S)
grad = np.sum((xOverL @ p_agg[:, sumCheck]) / sums[sumCheck] - 1 , axis=1)
else:
xOverL = X_s / lamHat[:, None, None] #(N, S, B)
numer = np.einsum('ij...,j...->i...', xOverL[:,:,sumCheck], p_agg[:,sumCheck])
grad = np.sum((numer / sums) - 1, axis=1)
grad = grad/B
return grad
class Sampler(ABC):
@abstractmethod
def sample(self, Y, S, seed=None):
"""Generate samples of X for each column of Y
Parameters
----------
Y : array_like
Observations to sample according to. This array must have
shape ``(M, B)``.
S : int
Number of samples to draw for each Y.
seed: Random seed for drawing
Returns
-------
X : (N, S, B) or (N, S, 1) ndarray
S Samples of X for each of B columns of Y. Last dimension is 1 if
same samples apply to all batch elements
"""
pass
@abstractmethod
def pq_ratio(self, X):
"""
Get the ratio of probability densities of input X
P(X|self._lam)/Q(X) element-wise
Where P(X|self._lam) is the Poisson probability of each entry in X
Q(X) is the sampler's probability of drawing that X
Parameters
----------
X : array_like
N-dimensional Vectors within range of Sampler.sample(), stacked in columns of array
Shape: ``(N, S, B)`` or ``(N, S, 1)``
Returns
-------
ratio : array_like
Probability densities Q(x) for all X
Shape: ``(S, B)``
"""
pass
class PoissonSampler(Sampler):
def __init__(self, lam, sample_same=True, seed=None):
"""
As used in [1]: Q(x) = P(x|lamHat)
Parameters
----------
lam : array_like (float)
Poisson rates from which to draw
Shape: ``(N,)``
sample_same : bool
Whether to use the same X samples for each column of Y.
"""
self._lam = lam
self._sample_same = sample_same
self._generator = np.random.default_rng(seed)
def sample(self, Y, S):
N, = self._lam.shape
_, B = Y.shape
if self._sample_same:
X = self._generator.poisson(self._lam[:, None, None], (N, S, 1))
else:
X = self._generator.poisson(self._lam[:, None, None], (N, S, B))
return X
def pq_ratio(self, X):
_, S, B = np.shape(X)
#With Poisson sampler - always sampling according to the current lambda value in the sampler
ratio = np.ones((S,B))
return ratio | 2.34375 | 2 |
306/translate_cds.py | jsh/pybites | 0 | 11123 | """Use translation table to translate coding sequence to protein."""
from Bio.Data import CodonTable # type: ignore
from Bio.Seq import Seq # type: ignore
def translate_cds(cds: str, translation_table: str) -> str:
"""Translate coding sequence to protein.
:param cds: str: DNA coding sequence (CDS)
:param translation_table: str: translation table
as defined in Bio.Seq.Seq.CodonTable.ambiguous_generic_by_name
:return: str: Protein sequence
"""
table = CodonTable.ambiguous_dna_by_name[translation_table]
cds = "".join(cds.split()) # clean out whitespace
coding_dna = Seq(cds)
protein = coding_dna.translate(table, cds=True, to_stop=True)
return str(protein)
| 3.203125 | 3 |
example.py | n0emis/pycodimd | 1 | 11124 | <gh_stars>1-10
from pycodimd import CodiMD
cmd = CodiMD('https://md.noemis.me')
#cmd.login('<EMAIL>','CorrectHorseBatteryStaple')
cmd.load_cookies()
print(cmd.history()[-1]['text']) # Print Name of latest Note
| 1.890625 | 2 |
PP4E-Examples-1.4/Examples/PP4E/System/Environment/echoenv.py | AngelLiang/PP4E | 0 | 11125 | <filename>PP4E-Examples-1.4/Examples/PP4E/System/Environment/echoenv.py
import os
print('echoenv...', end=' ')
print('Hello,', os.environ['USER'])
| 1.78125 | 2 |
plix/displays.py | freelan-developers/plix | 1 | 11126 | <reponame>freelan-developers/plix
"""
Display command results.
"""
from __future__ import unicode_literals
from contextlib import contextmanager
from argparse import Namespace
from io import BytesIO
from colorama import AnsiToWin32
from chromalog.stream import stream_has_color_support
from chromalog.colorizer import Colorizer
from chromalog.mark.helpers.simple import (
warning,
important,
success,
error,
)
class BaseDisplay(object):
"""
Provides general display logic to its subclasses.
"""
@contextmanager
def command(self, index, command):
"""
Contextmanager that wraps calls to :func:`start_command` and
:func:`stop_command`.
:param index: The index of the command.
:param command: The command that is about to be executed, as an unicode
string.
"""
self.start_command(
index=index,
command=command,
)
result = Namespace(returncode=None)
try:
yield result
finally:
self.stop_command(
index=index,
command=command,
returncode=result.returncode,
)
class StreamDisplay(BaseDisplay):
"""
Displays commands output to an output stream.
"""
def __init__(self, stream, colorizer=Colorizer()):
"""
Initialize the :class:`StreamDisplay`.
:param stream: The stream to be attached too.
"""
super(StreamDisplay, self).__init__()
self.colorizer = colorizer
self.output_map = {}
if stream_has_color_support(stream):
self.stream = AnsiToWin32(stream).stream
else:
self.stream = stream
# Python 3 differentiates binary streams.
if hasattr(stream, 'buffer'):
self.binary_stream = stream.buffer
else:
self.binary_stream = stream
def format_output(self, message, *args, **kwargs):
"""
Format some output in regards to the output stream color-capability.
:param message: A message.
:returns: The formatted message.
"""
if stream_has_color_support(self.stream):
return self.colorizer.colorize_message(message, *args, **kwargs)
else:
return message.format(*args, **kwargs)
def set_context(self, commands):
"""
Set the context for display.
:param commands: The list of commands to be executed.
"""
self.longest_len = max(map(len, commands))
def start_command(self, index, command):
"""
Indicate that a command stopped.
:param index: The index of the command.
:param command: The command that is about to be executed, as an unicode
string.
"""
self.stream.write(self.format_output(
"{}) {}",
warning(important(index + 1)),
command,
))
self.stream.flush()
self.output_map[index] = BytesIO()
def stop_command(self, index, command, returncode):
"""
Indicate that a command stopped.
:param index: The index of the command.
:param command: The command that was executed, as an unicode string.
:param returncode: The exit status.
"""
self.stream.write(self.format_output(
"{}\t[{}]\n",
" " * (self.longest_len - len(command)),
success("success") if returncode == 0 else error("failed"),
))
if returncode != 0:
self.binary_stream.write(self.output_map[index].getvalue())
self.stream.write(self.format_output(
"{}) {} {}\n",
warning(important(index + 1)),
error("Command exited with"),
important(error(returncode)),
))
del self.output_map[index]
def command_output(self, index, data):
"""
Add some output for a command.
:param index: The index of the command.
:param data: The output data (as bytes).
"""
self.output_map[index].write(data)
| 2.5625 | 3 |
plugins/Autocomplete/plugin.py | mogad0n/Limnoria | 476 | 11127 | <reponame>mogad0n/Limnoria
###
# Copyright (c) 2020-2021, The Limnoria Contributors
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions, and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions, and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the author of this software nor the name of
# contributors to this software may be used to endorse or promote products
# derived from this software without specific prior written consent.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
###
from supybot import conf, ircutils, ircmsgs, callbacks
from supybot.i18n import PluginInternationalization
_ = PluginInternationalization("Autocomplete")
REQUEST_TAG = "+draft/autocomplete-request"
RESPONSE_TAG = "+draft/autocomplete-response"
def _commonPrefix(L):
"""Takes a list of lists, and returns their longest common prefix."""
assert L
if len(L) == 1:
return L[0]
for n in range(1, max(map(len, L)) + 1):
prefix = L[0][:n]
for item in L[1:]:
if prefix != item[:n]:
return prefix[0:-1]
assert False
def _getAutocompleteResponse(irc, msg, payload):
"""Returns the value of the +draft/autocomplete-response tag for the given
+draft/autocomplete-request payload."""
tokens = callbacks.tokenize(
payload, channel=msg.channel, network=irc.network
)
normalized_payload = " ".join(tokens)
candidate_commands = _getCandidates(irc, normalized_payload)
if len(candidate_commands) == 0:
# No result
return None
elif len(candidate_commands) == 1:
# One result, return it directly
commands = candidate_commands
else:
# Multiple results, return only the longest common prefix + one word
tokenized_candidates = [
callbacks.tokenize(c, channel=msg.channel, network=irc.network)
for c in candidate_commands
]
common_prefix = _commonPrefix(tokenized_candidates)
words_after_prefix = {
candidate[len(common_prefix)] for candidate in tokenized_candidates
}
commands = [
" ".join(common_prefix + [word]) for word in words_after_prefix
]
# strip what the user already typed
assert all(command.startswith(normalized_payload) for command in commands)
normalized_payload_length = len(normalized_payload)
response_items = [
command[normalized_payload_length:] for command in commands
]
return "\t".join(sorted(response_items))
def _getCandidates(irc, normalized_payload):
"""Returns a list of commands starting with the normalized_payload."""
candidates = set()
for cb in irc.callbacks:
cb_commands = cb.listCommands()
# copy them with the plugin name (optional when calling a command)
# at the beginning
plugin_name = cb.canonicalName()
cb_commands += [plugin_name + " " + command for command in cb_commands]
candidates |= {
command
for command in cb_commands
if command.startswith(normalized_payload)
}
return candidates
class Autocomplete(callbacks.Plugin):
"""Provides command completion for IRC clients that support it."""
def _enabled(self, irc, msg):
return (
conf.supybot.protocols.irc.experimentalExtensions()
and self.registryValue("enabled", msg.channel, irc.network)
)
def doTagmsg(self, irc, msg):
if REQUEST_TAG not in msg.server_tags:
return
if "msgid" not in msg.server_tags:
return
if not self._enabled(irc, msg):
return
msgid = msg.server_tags["msgid"]
text = msg.server_tags[REQUEST_TAG]
# using callbacks._addressed instead of callbacks.addressed, as
# callbacks.addressed would tag the m
payload = callbacks._addressed(irc, msg, payload=text)
if not payload:
# not addressed
return
# marks used by '_addressed' are usually prefixes (char, string,
# nick), but may also be suffixes (with
# supybot.reply.whenAddressedBy.nick.atEnd); but there is no way to
# have it in the middle of the message AFAIK.
assert payload in text
if not text.endswith(payload):
# If there is a suffix, it means the end of the text is used to
# address the bot, so it can't be a method to be completed.
return
autocomplete_response = _getAutocompleteResponse(irc, msg, payload)
if not autocomplete_response:
return
target = msg.channel or ircutils.nickFromHostmask(msg.prefix)
irc.queueMsg(
ircmsgs.IrcMsg(
server_tags={
"+draft/reply": msgid,
RESPONSE_TAG: autocomplete_response,
},
command="TAGMSG",
args=[target],
)
)
Class = Autocomplete
# vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79:
| 1.382813 | 1 |
utils/preprocess_twitter.py | arnavk/tumblr-emotions | 0 | 11128 | """
preprocess-twitter.py
python preprocess-twitter.py "Some random text with #hashtags, @mentions and http://t.co/kdjfkdjf (links). :)"
Script for preprocessing tweets by <NAME>
with small modifications by <NAME>
with translation to Python by <NAME>
Translation of Ruby script to create features for GloVe vectors for Twitter data.
http://nlp.stanford.edu/projects/glove/preprocess-twitter.rb
"""
import sys
import regex as re
FLAGS = re.MULTILINE | re.DOTALL
def hashtag(text):
text = text.group()
hashtag_body = text[1:]
if hashtag_body.isupper():
result = " {} ".format(hashtag_body.lower())
else:
result = " ".join(["<hashtag>"] + re.split(r"(?=[A-Z])", hashtag_body, flags=FLAGS))
return result
def allcaps(text):
text = text.group()
return text.lower() + " <allcaps>"
def tokenize(text):
# Different regex parts for smiley faces
eyes = r"[8:=;]"
nose = r"['`\-]?"
# function so code less repetitive
def re_sub(pattern, repl):
return re.sub(pattern, repl, text, flags=FLAGS)
text = re_sub(r"https?:\/\/\S+\b|www\.(\w+\.)+\S*", "<url>")
text = re_sub(r"@\w+", "<user>")
text = re_sub(r"{}{}[)dD]+|[)dD]+{}{}".format(eyes, nose, nose, eyes), "<smile>")
text = re_sub(r"{}{}p+".format(eyes, nose), "<lolface>")
text = re_sub(r"{}{}\(+|\)+{}{}".format(eyes, nose, nose, eyes), "<sadface>")
text = re_sub(r"{}{}[\/|l*]".format(eyes, nose), "<neutralface>")
text = re_sub(r"/"," / ")
text = re_sub(r"<3","<heart>")
text = re_sub(r"[-+]?[.\d]*[\d]+[:,.\d]*", "<number>")
text = re_sub(r"#\S+", hashtag)
text = re_sub(r"([!?.]){2,}", r"\1 <repeat>")
text = re_sub(r"\b(\S*?)(.)\2{2,}\b", r"\1\2 <elong>")
## -- I just don't understand why the Ruby script adds <allcaps> to everything so I limited the selection.
# text = re_sub(r"([^a-z0-9()<>'`\-]){2,}", allcaps)
text = re_sub(r"([A-Z]){2,}", allcaps)
return text.lower()
if __name__ == '__main__':
_, text = sys.argv
if text == "test":
text = "I TEST alllll kinds of #hashtags and #HASHTAGS, @mentions and 3000 (http://t.co/dkfjkdf). w/ <3 :) haha!!!!!"
tokens = tokenize(text)
print(tokens)
| 3.234375 | 3 |
pyPLANES/pw/pw_classes.py | matael/pyPLANES | 0 | 11129 | #! /usr/bin/env python
# -*- coding:utf8 -*-
#
# pw_classes.py
#
# This file is part of pyplanes, a software distributed under the MIT license.
# For any question, please contact one of the authors cited below.
#
# Copyright (c) 2020
# <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
import numpy as np
import numpy.linalg as LA
import matplotlib.pyplot as plt
from mediapack import from_yaml
from mediapack import Air, PEM, EqFluidJCA
from pyPLANES.utils.io import initialisation_out_files_plain
from pyPLANES.core.calculus import PwCalculus
from pyPLANES.core.multilayer import MultiLayer
from pyPLANES.pw.pw_layers import FluidLayer
from pyPLANES.pw.pw_interfaces import FluidFluidInterface, RigidBacking
Air = Air()
# def initialise_PW_solver(L, b):
# nb_PW = 0
# dofs = []
# for _layer in L:
# if _layer.medium.MODEL == "fluid":
# dofs.append(nb_PW+np.arange(2))
# nb_PW += 2
# elif _layer.medium.MODEL == "pem":
# dofs.append(nb_PW+np.arange(6))
# nb_PW += 6
# elif _layer.medium.MODEL == "elastic":
# dofs.append(nb_PW+np.arange(4))
# nb_PW += 4
# interface = []
# for i_l, _layer in enumerate(L[:-1]):
# interface.append((L[i_l].medium.MODEL, L[i_l+1].medium.MODEL))
# return nb_PW, interface, dofs
class PwProblem(PwCalculus, MultiLayer):
"""
Plane Wave Problem
"""
def __init__(self, **kwargs):
PwCalculus.__init__(self, **kwargs)
termination = kwargs.get("termination","rigid")
self.method = kwargs.get("termination","global")
MultiLayer.__init__(self, **kwargs)
self.kx, self.ky, self.k = None, None, None
self.shift_plot = kwargs.get("shift_pw", 0.)
self.plot = kwargs.get("plot_results", [False]*6)
self.result = {}
self.outfiles_directory = False
if self.method == "global":
self.layers.insert(0,FluidLayer(Air,1.e-2))
if self.layers[1].medium.MEDIUM_TYPE == "fluid":
self.interfaces.append(FluidFluidInterface(self.layers[0],self.layers[1]))
self.nb_PW = 0
for _layer in self.layers:
if _layer.medium.MODEL == "fluid":
_layer.dofs = self.nb_PW+np.arange(2)
self.nb_PW += 2
elif _layer.medium.MODEL == "pem":
_layer.dofs = self.nb_PW+np.arange(6)
self.nb_PW += 6
elif _layer.medium.MODEL == "elastic":
_layer.dofs = self.nb_PW+np.arange(4)
self.nb_PW += 4
def update_frequency(self, f):
PwCalculus.update_frequency(self, f)
MultiLayer.update_frequency(self, f, self.k, self.kx)
def create_linear_system(self, f):
self.A = np.zeros((self.nb_PW-1, self.nb_PW), dtype=complex)
i_eq = 0
# Loop on the interfaces
for _int in self.interfaces:
if self.method == "global":
i_eq = _int.update_M_global(self.A, i_eq)
# for i_inter, _inter in enumerate(self.interfaces):
# if _inter[0] == "fluid":
# if _inter[1] == "fluid":
# i_eq = self.interface_fluid_fluid(i_eq, i_inter, Layers, dofs, M)
# if _inter[1] == "pem":
# i_eq = self.interface_fluid_pem(i_eq, i_inter, Layers, dofs, M)
# if _inter[1] == "elastic":
# i_eq = self.interface_fluid_elastic(i_eq, i_inter, Layers, dofs, M)
# elif _inter[0] == "pem":
# if _inter[1] == "fluid":
# i_eq = self.interface_pem_fluid(i_eq, i_inter, Layers, dofs, M)
# if _inter[1] == "pem":
# i_eq = self.interface_pem_pem(i_eq, i_inter, Layers, dofs, M)
# if _inter[1] == "elastic":
# i_eq = self.interface_pem_elastic(i_eq, i_inter, Layers, dofs, M)
# elif _inter[0] == "elastic":
# if _inter[1] == "fluid":
# i_eq = self.interface_elastic_fluid(i_eq, i_inter, Layers, dofs, M)
# if _inter[1] == "pem":
# i_eq = self.interface_elastic_pem(i_eq, i_inter, Layers, dofs, M)
# if _inter[1] == "elastic":
# i_eq = self.interface_elastic_elastic(i_eq, i_inter, Layers, dofs, M)
# if self.backing == backing.rigid:
# if Layers[-1].medium.MODEL == "fluid":
# i_eq = self.interface_fluid_rigid(M, i_eq, Layers[-1], dofs[-1] )
# elif Layers[-1].medium.MODEL == "pem":
# i_eq = self.interface_pem_rigid(M, i_eq, Layers[-1], dofs[-1])
# elif Layers[-1].medium.MODEL == "elastic":
# i_eq = self.interface_elastic_rigid(M, i_eq, Layers[-1], dofs[-1])
# elif self.backing == "transmission":
# i_eq = self.semi_infinite_medium(M, i_eq, Layers[-1], dofs[-1] )
self.F = -self.A[:, 0]*np.exp(1j*self.ky*self.layers[0].d) # - is for transposition, exponential term is for the phase shift
self.A = np.delete(self.A, 0, axis=1)
# print(self.A)
X = LA.solve(self.A, self.F)
# print(X)
# R_pyPLANES_PW = X[0]
# if self.backing == "transmission":
# T_pyPLANES_PW = X[-2]
# else:
# T_pyPLANES_PW = 0.
# X = np.delete(X, 0)
# del(dofs[0])
# for i, _ld in enumerate(dofs):
# dofs[i] -= 2
# if self.plot:
# self.plot_sol_PW(X, dofs)
# out["R"] = R_pyPLANES_PW
# out["T"] = T_pyPLANES_PW
# return out
# class Solver_PW(PwCalculus):
# def __init__(self, **kwargs):
# PwCalculus.__init__(self, **kwargs)
# ml = kwargs.get("ml")
# termination = kwargs.get("termination")
# self.layers = []
# for _l in ml:
# if _l[0] == "Air":
# mat = Air
# else:
# mat = from_yaml(_l[0]+".yaml")
# d = _l[1]
# self.layers.append(Layer(mat,d))
# if termination in ["trans", "transmission","Transmission"]:
# self.backing = "Transmission"
# else:
# self.backing = backing.rigid
# self.kx, self.ky, self.k = None, None, None
# self.shift_plot = kwargs.get("shift_pw", 0.)
# self.plot = kwargs.get("plot_results", [False]*6)
# self.result = {}
# self.outfiles_directory = False
# initialisation_out_files_plain(self)
# def write_out_files(self, out):
# self.out_file.write("{:.12e}\t".format(self.current_frequency))
# abs = 1-np.abs(out["R"])**2
# self.out_file.write("{:.12e}\t".format(abs))
# self.out_file.write("\n")
# def interface_fluid_fluid(self, ieq, iinter, L, d, M):
# SV_1, k_y_1 = fluid_SV(self.kx, self.k, L[iinter].medium.K)
# SV_2, k_y_2 = fluid_SV(self.kx, self.k, L[iinter+1].medium.K)
# M[ieq, d[iinter+0][0]] = SV_1[0, 0]*np.exp(-1j*k_y_1*L[iinter].thickness)
# M[ieq, d[iinter+0][1]] = SV_1[0, 1]
# M[ieq, d[iinter+1][0]] = -SV_2[0, 0]
# M[ieq, d[iinter+1][1]] = -SV_2[0, 1]*np.exp(-1j*k_y_2*L[iinter+1].thickness)
# ieq += 1
# M[ieq, d[iinter+0][0]] = SV_1[1, 0]*np.exp(-1j*k_y_1*L[iinter].thickness)
# M[ieq, d[iinter+0][1]] = SV_1[1, 1]
# M[ieq, d[iinter+1][0]] = -SV_2[1, 0]
# M[ieq, d[iinter+1][1]] = -SV_2[1, 1]*np.exp(-1j*k_y_2*L[iinter+1].thickness)
# ieq += 1
# return ieq
# def interface_fluid_rigid(self, M, ieq, L, d):
# SV, k_y = fluid_SV(self.kx, self.k, L.medium.K)
# M[ieq, d[0]] = SV[0, 0]*np.exp(-1j*k_y*L.thickness)
# M[ieq, d[1]] = SV[0, 1]
# ieq += 1
# return ieq
# def semi_infinite_medium(self, M, ieq, L, d):
# M[ieq, d[1]] = 1.
# ieq += 1
# return ieq
# def interface_pem_pem(self, ieq, iinter, L, d, M):
# SV_1, k_y_1 = PEM_SV(L[iinter].medium, self.kx)
# SV_2, k_y_2 = PEM_SV(L[iinter+1].medium, self.kx)
# for _i in range(6):
# M[ieq, d[iinter+0][0]] = SV_1[_i, 0]*np.exp(-1j*k_y_1[0]*L[iinter].thickness)
# M[ieq, d[iinter+0][1]] = SV_1[_i, 1]*np.exp(-1j*k_y_1[1]*L[iinter].thickness)
# M[ieq, d[iinter+0][2]] = SV_1[_i, 2]*np.exp(-1j*k_y_1[2]*L[iinter].thickness)
# M[ieq, d[iinter+0][3]] = SV_1[_i, 3]
# M[ieq, d[iinter+0][4]] = SV_1[_i, 4]
# M[ieq, d[iinter+0][5]] = SV_1[_i, 5]
# M[ieq, d[iinter+1][0]] = -SV_2[_i, 0]
# M[ieq, d[iinter+1][1]] = -SV_2[_i, 1]
# M[ieq, d[iinter+1][2]] = -SV_2[_i, 2]
# M[ieq, d[iinter+1][3]] = -SV_2[_i, 3]*np.exp(-1j*k_y_2[0]*L[iinter+1].thickness)
# M[ieq, d[iinter+1][4]] = -SV_2[_i, 4]*np.exp(-1j*k_y_2[1]*L[iinter+1].thickness)
# M[ieq, d[iinter+1][5]] = -SV_2[_i, 5]*np.exp(-1j*k_y_2[2]*L[iinter+1].thickness)
# ieq += 1
# return ieq
# def interface_fluid_pem(self, ieq, iinter, L, d, M):
# SV_1, k_y_1 = fluid_SV(self.kx, self.k, L[iinter].medium.K)
# SV_2, k_y_2 = PEM_SV(L[iinter+1].medium,self.kx)
# # print(k_y_2)
# M[ieq, d[iinter+0][0]] = SV_1[0, 0]*np.exp(-1j*k_y_1*L[iinter].thickness)
# M[ieq, d[iinter+0][1]] = SV_1[0, 1]
# M[ieq, d[iinter+1][0]] = -SV_2[2, 0]
# M[ieq, d[iinter+1][1]] = -SV_2[2, 1]
# M[ieq, d[iinter+1][2]] = -SV_2[2, 2]
# M[ieq, d[iinter+1][3]] = -SV_2[2, 3]*np.exp(-1j*k_y_2[0]*L[iinter+1].thickness)
# M[ieq, d[iinter+1][4]] = -SV_2[2, 4]*np.exp(-1j*k_y_2[1]*L[iinter+1].thickness)
# M[ieq, d[iinter+1][5]] = -SV_2[2, 5]*np.exp(-1j*k_y_2[2]*L[iinter+1].thickness)
# ieq += 1
# M[ieq, d[iinter+0][0]] = SV_1[1, 0]*np.exp(-1j*k_y_1*L[iinter].thickness)
# M[ieq, d[iinter+0][1]] = SV_1[1, 1]
# M[ieq, d[iinter+1][0]] = -SV_2[4, 0]
# M[ieq, d[iinter+1][1]] = -SV_2[4, 1]
# M[ieq, d[iinter+1][2]] = -SV_2[4, 2]
# M[ieq, d[iinter+1][3]] = -SV_2[4, 3]*np.exp(-1j*k_y_2[0]*L[iinter+1].thickness)
# M[ieq, d[iinter+1][4]] = -SV_2[4, 4]*np.exp(-1j*k_y_2[1]*L[iinter+1].thickness)
# M[ieq, d[iinter+1][5]] = -SV_2[4, 5]*np.exp(-1j*k_y_2[2]*L[iinter+1].thickness)
# ieq += 1
# M[ieq, d[iinter+1][0]] = SV_2[0, 0]
# M[ieq, d[iinter+1][1]] = SV_2[0, 1]
# M[ieq, d[iinter+1][2]] = SV_2[0, 2]
# M[ieq, d[iinter+1][3]] = SV_2[0, 3]*np.exp(-1j*k_y_2[0]*L[iinter+1].thickness)
# M[ieq, d[iinter+1][4]] = SV_2[0, 4]*np.exp(-1j*k_y_2[1]*L[iinter+1].thickness)
# M[ieq, d[iinter+1][5]] = SV_2[0, 5]*np.exp(-1j*k_y_2[2]*L[iinter+1].thickness)
# ieq += 1
# M[ieq, d[iinter+1][0]] = SV_2[3, 0]
# M[ieq, d[iinter+1][1]] = SV_2[3, 1]
# M[ieq, d[iinter+1][2]] = SV_2[3, 2]
# M[ieq, d[iinter+1][3]] = SV_2[3, 3]*np.exp(-1j*k_y_2[0]*L[iinter+1].thickness)
# M[ieq, d[iinter+1][4]] = SV_2[3, 4]*np.exp(-1j*k_y_2[1]*L[iinter+1].thickness)
# M[ieq, d[iinter+1][5]] = SV_2[3, 5]*np.exp(-1j*k_y_2[2]*L[iinter+1].thickness)
# ieq += 1
# return ieq
# def interface_elastic_pem(self, ieq, iinter, L, d, M):
# SV_1, k_y_1 = elastic_SV(L[iinter].medium,self.kx, self.omega)
# SV_2, k_y_2 = PEM_SV(L[iinter+1].medium,self.kx)
# # print(k_y_2)
# M[ieq, d[iinter+0][0]] = -SV_1[0, 0]*np.exp(-1j*k_y_1[0]*L[iinter].thickness)
# M[ieq, d[iinter+0][1]] = -SV_1[0, 1]*np.exp(-1j*k_y_1[1]*L[iinter].thickness)
# M[ieq, d[iinter+0][2]] = -SV_1[0, 2]
# M[ieq, d[iinter+0][3]] = -SV_1[0, 3]
# M[ieq, d[iinter+1][0]] = SV_2[0, 0]
# M[ieq, d[iinter+1][1]] = SV_2[0, 1]
# M[ieq, d[iinter+1][2]] = SV_2[0, 2]
# M[ieq, d[iinter+1][3]] = SV_2[0, 3]*np.exp(-1j*k_y_2[0]*L[iinter+1].thickness)
# M[ieq, d[iinter+1][4]] = SV_2[0, 4]*np.exp(-1j*k_y_2[1]*L[iinter+1].thickness)
# M[ieq, d[iinter+1][5]] = SV_2[0, 5]*np.exp(-1j*k_y_2[2]*L[iinter+1].thickness)
# ieq += 1
# M[ieq, d[iinter+0][0]] = -SV_1[1, 0]*np.exp(-1j*k_y_1[0]*L[iinter].thickness)
# M[ieq, d[iinter+0][1]] = -SV_1[1, 1]*np.exp(-1j*k_y_1[1]*L[iinter].thickness)
# M[ieq, d[iinter+0][2]] = -SV_1[1, 2]
# M[ieq, d[iinter+0][3]] = -SV_1[1, 3]
# M[ieq, d[iinter+1][0]] = SV_2[1, 0]
# M[ieq, d[iinter+1][1]] = SV_2[1, 1]
# M[ieq, d[iinter+1][2]] = SV_2[1, 2]
# M[ieq, d[iinter+1][3]] = SV_2[1, 3]*np.exp(-1j*k_y_2[0]*L[iinter+1].thickness)
# M[ieq, d[iinter+1][4]] = SV_2[1, 4]*np.exp(-1j*k_y_2[1]*L[iinter+1].thickness)
# M[ieq, d[iinter+1][5]] = SV_2[1, 5]*np.exp(-1j*k_y_2[2]*L[iinter+1].thickness)
# ieq += 1
# M[ieq, d[iinter+0][0]] = -SV_1[1, 0]*np.exp(-1j*k_y_1[0]*L[iinter].thickness)
# M[ieq, d[iinter+0][1]] = -SV_1[1, 1]*np.exp(-1j*k_y_1[1]*L[iinter].thickness)
# M[ieq, d[iinter+0][2]] = -SV_1[1, 2]
# M[ieq, d[iinter+0][3]] = -SV_1[1, 3]
# M[ieq, d[iinter+1][0]] = SV_2[2, 0]
# M[ieq, d[iinter+1][1]] = SV_2[2, 1]
# M[ieq, d[iinter+1][2]] = SV_2[2, 2]
# M[ieq, d[iinter+1][3]] = SV_2[2, 3]*np.exp(-1j*k_y_2[0]*L[iinter+1].thickness)
# M[ieq, d[iinter+1][4]] = SV_2[2, 4]*np.exp(-1j*k_y_2[1]*L[iinter+1].thickness)
# M[ieq, d[iinter+1][5]] = SV_2[2, 5]*np.exp(-1j*k_y_2[2]*L[iinter+1].thickness)
# ieq += 1
# M[ieq, d[iinter+0][0]] = -SV_1[2, 0]*np.exp(-1j*k_y_1[0]*L[iinter].thickness)
# M[ieq, d[iinter+0][1]] = -SV_1[2, 1]*np.exp(-1j*k_y_1[1]*L[iinter].thickness)
# M[ieq, d[iinter+0][2]] = -SV_1[2, 2]
# M[ieq, d[iinter+0][3]] = -SV_1[2, 3]
# M[ieq, d[iinter+1][0]] = (SV_2[3, 0]-SV_2[4, 0])
# M[ieq, d[iinter+1][1]] = (SV_2[3, 1]-SV_2[4, 1])
# M[ieq, d[iinter+1][2]] = (SV_2[3, 2]-SV_2[4, 2])
# M[ieq, d[iinter+1][3]] = (SV_2[3, 3]-SV_2[4, 3])*np.exp(-1j*k_y_2[0]*L[iinter+1].thickness)
# M[ieq, d[iinter+1][4]] = (SV_2[3, 4]-SV_2[4, 4])*np.exp(-1j*k_y_2[1]*L[iinter+1].thickness)
# M[ieq, d[iinter+1][5]] = (SV_2[3, 5]-SV_2[4, 5])*np.exp(-1j*k_y_2[2]*L[iinter+1].thickness)
# ieq += 1
# M[ieq, d[iinter+0][0]] = -SV_1[3, 0]*np.exp(-1j*k_y_1[0]*L[iinter].thickness)
# M[ieq, d[iinter+0][1]] = -SV_1[3, 1]*np.exp(-1j*k_y_1[1]*L[iinter].thickness)
# M[ieq, d[iinter+0][2]] = -SV_1[3, 2]
# M[ieq, d[iinter+0][3]] = -SV_1[3, 3]
# M[ieq, d[iinter+1][0]] = SV_2[5, 0]
# M[ieq, d[iinter+1][1]] = SV_2[5, 1]
# M[ieq, d[iinter+1][2]] = SV_2[5, 2]
# M[ieq, d[iinter+1][3]] = SV_2[5, 3]*np.exp(-1j*k_y_2[0]*L[iinter+1].thickness)
# M[ieq, d[iinter+1][4]] = SV_2[5, 4]*np.exp(-1j*k_y_2[1]*L[iinter+1].thickness)
# M[ieq, d[iinter+1][5]] = SV_2[5, 5]*np.exp(-1j*k_y_2[2]*L[iinter+1].thickness)
# ieq += 1
# return ieq
# def interface_pem_elastic(self, ieq, iinter, L, d, M):
# SV_1, k_y_1 = PEM_SV(L[iinter].medium,self.kx)
# SV_2, k_y_2 = elastic_SV(L[iinter+1].medium,self.kx, self.omega)
# # print(k_y_2)
# M[ieq, d[iinter+0][0]] = SV_1[0, 0]*np.exp(-1j*k_y_1[0]*L[iinter].thickness)
# M[ieq, d[iinter+0][1]] = SV_1[0, 1]*np.exp(-1j*k_y_1[1]*L[iinter].thickness)
# M[ieq, d[iinter+0][2]] = SV_1[0, 2]*np.exp(-1j*k_y_1[2]*L[iinter].thickness)
# M[ieq, d[iinter+0][3]] = SV_1[0, 3]
# M[ieq, d[iinter+0][4]] = SV_1[0, 4]
# M[ieq, d[iinter+0][5]] = SV_1[0, 5]
# M[ieq, d[iinter+1][0]] = -SV_2[0, 0]
# M[ieq, d[iinter+1][1]] = -SV_2[0, 1]
# M[ieq, d[iinter+1][2]] = -SV_2[0, 2]*np.exp(-1j*k_y_2[0]*L[iinter+1].thickness)
# M[ieq, d[iinter+1][3]] = -SV_2[0, 3]*np.exp(-1j*k_y_2[1]*L[iinter+1].thickness)
# ieq += 1
# M[ieq, d[iinter+0][0]] = SV_1[1, 0]*np.exp(-1j*k_y_1[0]*L[iinter].thickness)
# M[ieq, d[iinter+0][1]] = SV_1[1, 1]*np.exp(-1j*k_y_1[1]*L[iinter].thickness)
# M[ieq, d[iinter+0][2]] = SV_1[1, 2]*np.exp(-1j*k_y_1[2]*L[iinter].thickness)
# M[ieq, d[iinter+0][3]] = SV_1[1, 3]
# M[ieq, d[iinter+0][4]] = SV_1[1, 4]
# M[ieq, d[iinter+0][5]] = SV_1[1, 5]
# M[ieq, d[iinter+1][0]] = -SV_2[1, 0]
# M[ieq, d[iinter+1][1]] = -SV_2[1, 1]
# M[ieq, d[iinter+1][2]] = -SV_2[1, 2]*np.exp(-1j*k_y_2[0]*L[iinter+1].thickness)
# M[ieq, d[iinter+1][3]] = -SV_2[1, 3]*np.exp(-1j*k_y_2[1]*L[iinter+1].thickness)
# ieq += 1
# M[ieq, d[iinter+0][0]] = SV_1[2, 0]*np.exp(-1j*k_y_1[0]*L[iinter].thickness)
# M[ieq, d[iinter+0][1]] = SV_1[2, 1]*np.exp(-1j*k_y_1[1]*L[iinter].thickness)
# M[ieq, d[iinter+0][2]] = SV_1[2, 2]*np.exp(-1j*k_y_1[2]*L[iinter].thickness)
# M[ieq, d[iinter+0][3]] = SV_1[2, 3]
# M[ieq, d[iinter+0][4]] = SV_1[2, 4]
# M[ieq, d[iinter+0][5]] = SV_1[2, 5]
# M[ieq, d[iinter+1][0]] = -SV_2[1, 0]
# M[ieq, d[iinter+1][1]] = -SV_2[1, 1]
# M[ieq, d[iinter+1][2]] = -SV_2[1, 2]*np.exp(-1j*k_y_2[0]*L[iinter+1].thickness)
# M[ieq, d[iinter+1][3]] = -SV_2[1, 3]*np.exp(-1j*k_y_2[1]*L[iinter+1].thickness)
# ieq += 1
# M[ieq, d[iinter+0][0]] = (SV_1[3, 0]-SV_1[4, 0])*np.exp(-1j*k_y_1[0]*L[iinter].thickness)
# M[ieq, d[iinter+0][1]] = (SV_1[3, 1]-SV_1[4, 1])*np.exp(-1j*k_y_1[1]*L[iinter].thickness)
# M[ieq, d[iinter+0][2]] = (SV_1[3, 2]-SV_1[4, 2])*np.exp(-1j*k_y_1[2]*L[iinter].thickness)
# M[ieq, d[iinter+0][3]] = (SV_1[3, 3]-SV_1[4, 3])
# M[ieq, d[iinter+0][4]] = (SV_1[3, 4]-SV_1[4, 4])
# M[ieq, d[iinter+0][5]] = (SV_1[3, 5]-SV_1[4, 5])
# M[ieq, d[iinter+1][0]] = -SV_2[2, 0]
# M[ieq, d[iinter+1][1]] = -SV_2[2, 1]
# M[ieq, d[iinter+1][2]] = -SV_2[2, 2]*np.exp(-1j*k_y_2[0]*L[iinter+1].thickness)
# M[ieq, d[iinter+1][3]] = -SV_2[2, 3]*np.exp(-1j*k_y_2[1]*L[iinter+1].thickness)
# ieq += 1
# M[ieq, d[iinter+0][0]] = SV_1[5, 0]*np.exp(-1j*k_y_1[0]*L[iinter].thickness)
# M[ieq, d[iinter+0][1]] = SV_1[5, 1]*np.exp(-1j*k_y_1[1]*L[iinter].thickness)
# M[ieq, d[iinter+0][2]] = SV_1[5, 2]*np.exp(-1j*k_y_1[2]*L[iinter].thickness)
# M[ieq, d[iinter+0][3]] = SV_1[5, 3]
# M[ieq, d[iinter+0][4]] = SV_1[5, 4]
# M[ieq, d[iinter+0][5]] = SV_1[5, 5]
# M[ieq, d[iinter+1][0]] = -SV_2[3, 0]
# M[ieq, d[iinter+1][1]] = -SV_2[3, 1]
# M[ieq, d[iinter+1][2]] = -SV_2[3, 2]*np.exp(-1j*k_y_2[0]*L[iinter+1].thickness)
# M[ieq, d[iinter+1][3]] = -SV_2[3, 3]*np.exp(-1j*k_y_2[1]*L[iinter+1].thickness)
# ieq += 1
# return ieq
# def interface_elastic_elastic(self, ieq, iinter, L, d, M):
# SV_1, k_y_1 = elastic_SV(L[iinter].medium,self.kx, self.omega)
# SV_2, k_y_2 = elastic_SV(L[iinter+1].medium,self.kx, self.omega)
# for _i in range(4):
# M[ieq, d[iinter+0][0]] = SV_1[_i, 0]*np.exp(-1j*k_y_1[0]*L[iinter].thickness)
# M[ieq, d[iinter+0][1]] = SV_1[_i, 1]*np.exp(-1j*k_y_1[1]*L[iinter].thickness)
# M[ieq, d[iinter+0][2]] = SV_1[_i, 2]
# M[ieq, d[iinter+0][3]] = SV_1[_i, 3]
# M[ieq, d[iinter+1][0]] = -SV_2[_i, 0]
# M[ieq, d[iinter+1][1]] = -SV_2[_i, 1]
# M[ieq, d[iinter+1][2]] = -SV_2[_i, 2]*np.exp(-1j*k_y_2[0]*L[iinter+1].thickness)
# M[ieq, d[iinter+1][3]] = -SV_2[_i, 3]*np.exp(-1j*k_y_2[1]*L[iinter+1].thickness)
# ieq += 1
# return ieq
# def interface_fluid_elastic(self, ieq, iinter, L, d, M):
# SV_1, k_y_1 = fluid_SV(self.kx, self.k, L[iinter].medium.K)
# SV_2, k_y_2 = elastic_SV(L[iinter+1].medium, self.kx, self.omega)
# # Continuity of u_y
# M[ieq, d[iinter+0][0]] = SV_1[0, 0]*np.exp(-1j*k_y_1*L[iinter].thickness)
# M[ieq, d[iinter+0][1]] = SV_1[0, 1]
# M[ieq, d[iinter+1][0]] = -SV_2[1, 0]
# M[ieq, d[iinter+1][1]] = -SV_2[1, 1]
# M[ieq, d[iinter+1][2]] = -SV_2[1, 2]*np.exp(-1j*k_y_2[0]*L[iinter+1].thickness)
# M[ieq, d[iinter+1][3]] = -SV_2[1, 3]*np.exp(-1j*k_y_2[1]*L[iinter+1].thickness)
# ieq += 1
# # sigma_yy = -p
# M[ieq, d[iinter+0][0]] = SV_1[1, 0]*np.exp(-1j*k_y_1*L[iinter].thickness)
# M[ieq, d[iinter+0][1]] = SV_1[1, 1]
# M[ieq, d[iinter+1][0]] = SV_2[2, 0]
# M[ieq, d[iinter+1][1]] = SV_2[2, 1]
# M[ieq, d[iinter+1][2]] = SV_2[2, 2]*np.exp(-1j*k_y_2[0]*L[iinter+1].thickness)
# M[ieq, d[iinter+1][3]] = SV_2[2, 3]*np.exp(-1j*k_y_2[1]*L[iinter+1].thickness)
# ieq += 1
# # sigma_xy = 0
# M[ieq, d[iinter+1][0]] = SV_2[0, 0]
# M[ieq, d[iinter+1][1]] = SV_2[0, 1]
# M[ieq, d[iinter+1][2]] = SV_2[0, 2]*np.exp(-1j*k_y_2[0]*L[iinter+1].thickness)
# M[ieq, d[iinter+1][3]] = SV_2[0, 3]*np.exp(-1j*k_y_2[1]*L[iinter+1].thickness)
# ieq += 1
# return ieq
# def interface_pem_fluid(self, ieq, iinter, L, d, M):
# SV_1, k_y_1 = PEM_SV(L[iinter].medium, self.kx)
# SV_2, k_y_2 = fluid_SV(self.kx, self.k, L[iinter+1].medium.K)
# # print(k_y_2)
# M[ieq, d[iinter+0][0]] = -SV_1[2, 0]*np.exp(-1j*k_y_1[0]*L[iinter].thickness)
# M[ieq, d[iinter+0][1]] = -SV_1[2, 1]*np.exp(-1j*k_y_1[1]*L[iinter].thickness)
# M[ieq, d[iinter+0][2]] = -SV_1[2, 2]*np.exp(-1j*k_y_1[2]*L[iinter].thickness)
# M[ieq, d[iinter+0][3]] = -SV_1[2, 3]
# M[ieq, d[iinter+0][4]] = -SV_1[2, 4]
# M[ieq, d[iinter+0][5]] = -SV_1[2, 5]
# M[ieq, d[iinter+1][0]] = SV_2[0, 0]
# M[ieq, d[iinter+1][1]] = SV_2[0, 1]*np.exp(-1j*k_y_2*L[iinter+1].thickness)
# ieq += 1
# M[ieq, d[iinter+0][0]] = -SV_1[4, 0]*np.exp(-1j*k_y_1[0]*L[iinter].thickness)
# M[ieq, d[iinter+0][1]] = -SV_1[4, 1]*np.exp(-1j*k_y_1[1]*L[iinter].thickness)
# M[ieq, d[iinter+0][2]] = -SV_1[4, 2]*np.exp(-1j*k_y_1[2]*L[iinter].thickness)
# M[ieq, d[iinter+0][3]] = -SV_1[4, 3]
# M[ieq, d[iinter+0][4]] = -SV_1[4, 4]
# M[ieq, d[iinter+0][5]] = -SV_1[4, 5]
# M[ieq, d[iinter+1][0]] = SV_2[1, 0]
# M[ieq, d[iinter+1][1]] = SV_2[1, 1]*np.exp(-1j*k_y_2*L[iinter+1].thickness)
# ieq += 1
# M[ieq, d[iinter+0][0]] = SV_1[0, 0]*np.exp(-1j*k_y_1[0]*L[iinter].thickness)
# M[ieq, d[iinter+0][1]] = SV_1[0, 1]*np.exp(-1j*k_y_1[1]*L[iinter].thickness)
# M[ieq, d[iinter+0][2]] = SV_1[0, 2]*np.exp(-1j*k_y_1[2]*L[iinter].thickness)
# M[ieq, d[iinter+0][3]] = SV_1[0, 3]
# M[ieq, d[iinter+0][4]] = SV_1[0, 4]
# M[ieq, d[iinter+0][5]] = SV_1[0, 5]
# ieq += 1
# M[ieq, d[iinter+0][0]] = SV_1[3, 0]*np.exp(-1j*k_y_1[0]*L[iinter].thickness)
# M[ieq, d[iinter+0][1]] = SV_1[3, 1]*np.exp(-1j*k_y_1[1]*L[iinter].thickness)
# M[ieq, d[iinter+0][2]] = SV_1[3, 2]*np.exp(-1j*k_y_1[2]*L[iinter].thickness)
# M[ieq, d[iinter+0][3]] = SV_1[3, 3]
# M[ieq, d[iinter+0][4]] = SV_1[3, 4]
# M[ieq, d[iinter+0][5]] = SV_1[3, 5]
# ieq += 1
# return ieq
# def interface_elastic_fluid(self, ieq, iinter, L, d, M):
# SV_1, k_y_1 = elastic_SV(L[iinter].medium, self.kx, self.omega)
# SV_2, k_y_2 = fluid_SV(self.kx, self.k, L[iinter+1].medium.K)
# # Continuity of u_y
# M[ieq, d[iinter+0][0]] = -SV_1[1, 0]*np.exp(-1j*k_y_1[0]*L[iinter].thickness)
# M[ieq, d[iinter+0][1]] = -SV_1[1, 1]*np.exp(-1j*k_y_1[1]*L[iinter].thickness)
# M[ieq, d[iinter+0][2]] = -SV_1[1, 2]
# M[ieq, d[iinter+0][3]] = -SV_1[1, 3]
# M[ieq, d[iinter+1][0]] = SV_2[0, 0]
# M[ieq, d[iinter+1][1]] = SV_2[0, 1]*np.exp(-1j*k_y_2*L[iinter+1].thickness)
# ieq += 1
# # sigma_yy = -p
# M[ieq, d[iinter+0][0]] = SV_1[2, 0]*np.exp(-1j*k_y_1[0]*L[iinter].thickness)
# M[ieq, d[iinter+0][1]] = SV_1[2, 1]*np.exp(-1j*k_y_1[1]*L[iinter].thickness)
# M[ieq, d[iinter+0][2]] = SV_1[2, 2]
# M[ieq, d[iinter+0][3]] = SV_1[2, 3]
# M[ieq, d[iinter+1][0]] = SV_2[1, 0]
# M[ieq, d[iinter+1][1]] = SV_2[1, 1]*np.exp(-1j*k_y_2*L[iinter+1].thickness)
# ieq += 1
# # sigma_xy = 0
# M[ieq, d[iinter+0][0]] = SV_1[0, 0]*np.exp(-1j*k_y_1[0]*L[iinter].thickness)
# M[ieq, d[iinter+0][1]] = SV_1[0, 1]*np.exp(-1j*k_y_1[1]*L[iinter].thickness)
# M[ieq, d[iinter+0][2]] = SV_1[0, 2]
# M[ieq, d[iinter+0][3]] = SV_1[0, 3]
# ieq += 1
# return ieq
# def interface_elastic_rigid(self, M, ieq, L, d):
# SV, k_y = elastic_SV(L.medium,self.kx, self.omega)
# M[ieq, d[0]] = SV[1, 0]*np.exp(-1j*k_y[0]*L.thickness)
# M[ieq, d[1]] = SV[1, 1]*np.exp(-1j*k_y[1]*L.thickness)
# M[ieq, d[2]] = SV[1, 2]
# M[ieq, d[3]] = SV[1, 3]
# ieq += 1
# M[ieq, d[0]] = SV[3, 0]*np.exp(-1j*k_y[0]*L.thickness)
# M[ieq, d[1]] = SV[3, 1]*np.exp(-1j*k_y[1]*L.thickness)
# M[ieq, d[2]] = SV[3, 2]
# M[ieq, d[3]] = SV[3, 3]
# ieq += 1
# return ieq
# def interface_pem_rigid(self, M, ieq, L, d):
# SV, k_y = PEM_SV(L.medium, self.kx)
# M[ieq, d[0]] = SV[1, 0]*np.exp(-1j*k_y[0]*L.thickness)
# M[ieq, d[1]] = SV[1, 1]*np.exp(-1j*k_y[1]*L.thickness)
# M[ieq, d[2]] = SV[1, 2]*np.exp(-1j*k_y[2]*L.thickness)
# M[ieq, d[3]] = SV[1, 3]
# M[ieq, d[4]] = SV[1, 4]
# M[ieq, d[5]] = SV[1, 5]
# ieq += 1
# M[ieq, d[0]] = SV[2, 0]*np.exp(-1j*k_y[0]*L.thickness)
# M[ieq, d[1]] = SV[2, 1]*np.exp(-1j*k_y[1]*L.thickness)
# M[ieq, d[2]] = SV[2, 2]*np.exp(-1j*k_y[2]*L.thickness)
# M[ieq, d[3]] = SV[2, 3]
# M[ieq, d[4]] = SV[2, 4]
# M[ieq, d[5]] = SV[2, 5]
# ieq += 1
# M[ieq, d[0]] = SV[5, 0]*np.exp(-1j*k_y[0]*L.thickness)
# M[ieq, d[1]] = SV[5, 1]*np.exp(-1j*k_y[1]*L.thickness)
# M[ieq, d[2]] = SV[5, 2]*np.exp(-1j*k_y[2]*L.thickness)
# M[ieq, d[3]] = SV[5, 3]
# M[ieq, d[4]] = SV[5, 4]
# M[ieq, d[5]] = SV[5, 5]
# ieq += 1
# return ieq
# def plot_sol_PW(self, X, dofs):
# x_start = self.shift_plot
# for _l, _layer in enumerate(self.layers):
# x_f = np.linspace(0, _layer.thickness,200)
# x_b = x_f-_layer.thickness
# if _layer.medium.MODEL == "fluid":
# SV, k_y = fluid_SV(self.kx, self.k, _layer.medium.K)
# pr = SV[1, 0]*np.exp(-1j*k_y*x_f)*X[dofs[_l][0]]
# pr += SV[1, 1]*np.exp( 1j*k_y*x_b)*X[dofs[_l][1]]
# ut = SV[0, 0]*np.exp(-1j*k_y*x_f)*X[dofs[_l][0]]
# ut += SV[0, 1]*np.exp( 1j*k_y*x_b)*X[dofs[_l][1]]
# if self.plot[2]:
# plt.figure(2)
# plt.plot(x_start+x_f, np.abs(pr), 'r')
# plt.plot(x_start+x_f, np.imag(pr), 'm')
# plt.title("Pressure")
# # plt.figure(5)
# # plt.plot(x_start+x_f,np.abs(ut),'b')
# # plt.plot(x_start+x_f,np.imag(ut),'k')
# if _layer.medium.MODEL == "pem":
# SV, k_y = PEM_SV(_layer.medium, self.kx)
# ux, uy, pr, ut = 0*1j*x_f, 0*1j*x_f, 0*1j*x_f, 0*1j*x_f
# for i_dim in range(3):
# ux += SV[1, i_dim ]*np.exp(-1j*k_y[i_dim]*x_f)*X[dofs[_l][i_dim]]
# ux += SV[1, i_dim+3]*np.exp( 1j*k_y[i_dim]*x_b)*X[dofs[_l][i_dim+3]]
# uy += SV[5, i_dim ]*np.exp(-1j*k_y[i_dim]*x_f)*X[dofs[_l][i_dim]]
# uy += SV[5, i_dim+3]*np.exp( 1j*k_y[i_dim]*x_b)*X[dofs[_l][i_dim+3]]
# pr += SV[4, i_dim ]*np.exp(-1j*k_y[i_dim]*x_f)*X[dofs[_l][i_dim]]
# pr += SV[4, i_dim+3]*np.exp( 1j*k_y[i_dim]*x_b)*X[dofs[_l][i_dim+3]]
# ut += SV[2, i_dim ]*np.exp(-1j*k_y[i_dim]*x_f)*X[dofs[_l][i_dim]]
# ut += SV[2, i_dim+3]*np.exp( 1j*k_y[i_dim]*x_b)*X[dofs[_l][i_dim+3]]
# if self.plot[0]:
# plt.figure(0)
# plt.plot(x_start+x_f, np.abs(uy), 'r')
# plt.plot(x_start+x_f, np.imag(uy), 'm')
# plt.title("Solid displacement along x")
# if self.plot[1]:
# plt.figure(1)
# plt.plot(x_start+x_f, np.abs(ux), 'r')
# plt.plot(x_start+x_f, np.imag(ux), 'm')
# plt.title("Solid displacement along y")
# if self.plot[2]:
# plt.figure(2)
# plt.plot(x_start+x_f, np.abs(pr), 'r')
# plt.plot(x_start+x_f, np.imag(pr), 'm')
# plt.title("Pressure")
# if _layer.medium.MODEL == "elastic":
# SV, k_y = elastic_SV(_layer.medium, self.kx, self.omega)
# ux, uy, pr, sig = 0*1j*x_f, 0*1j*x_f, 0*1j*x_f, 0*1j*x_f
# for i_dim in range(2):
# ux += SV[1, i_dim ]*np.exp(-1j*k_y[i_dim]*x_f)*X[dofs[_l][i_dim]]
# ux += SV[1, i_dim+2]*np.exp( 1j*k_y[i_dim]*x_b)*X[dofs[_l][i_dim+2]]
# uy += SV[3, i_dim ]*np.exp(-1j*k_y[i_dim]*x_f)*X[dofs[_l][i_dim]]
# uy += SV[3, i_dim+2]*np.exp( 1j*k_y[i_dim]*x_b)*X[dofs[_l][i_dim+2]]
# pr -= SV[2, i_dim ]*np.exp(-1j*k_y[i_dim]*x_f)*X[dofs[_l][i_dim]]
# pr -= SV[2, i_dim+2]*np.exp( 1j*k_y[i_dim]*x_b)*X[dofs[_l][i_dim+2]]
# sig -= SV[0, i_dim ]*np.exp(-1j*k_y[i_dim]*x_f)*X[dofs[_l][i_dim]]
# sig -= SV[0, i_dim+2]*np.exp( 1j*k_y[i_dim]*x_b)*X[dofs[_l][i_dim+2]]
# if self.plot[0]:
# plt.figure(0)
# plt.plot(x_start+x_f, np.abs(uy), 'r')
# plt.plot(x_start+x_f, np.imag(uy), 'm')
# plt.title("Solid displacement along x")
# if self.plot[1]:
# plt.figure(1)
# plt.plot(x_start+x_f, np.abs(ux), 'r')
# plt.plot(x_start+x_f, np.imag(ux), 'm')
# plt.title("Solid displacement along y")
# # if self.plot[2]:
# # plt.figure(2)
# # plt.plot(x_start+x_f, np.abs(pr), 'r')
# # plt.plot(x_start+x_f, np.imag(pr), 'm')
# # plt.title("Sigma_yy")
# # if self.plot[2]:
# # plt.figure(3)
# # plt.plot(x_start+x_f, np.abs(sig), 'r')
# # plt.plot(x_start+x_f, np.imag(sig), 'm')
# # plt.title("Sigma_xy")
# x_start += _layer.thickness
# def PEM_SV(mat,ky):
# ''' S={0:\hat{\sigma}_{xy}, 1:u_y^s, 2:u_y^t, 3:\hat{\sigma}_{yy}, 4:p, 5:u_x^s}'''
# kx_1 = np.sqrt(mat.delta_1**2-ky**2)
# kx_2 = np.sqrt(mat.delta_2**2-ky**2)
# kx_3 = np.sqrt(mat.delta_3**2-ky**2)
# kx = np.array([kx_1, kx_2, kx_3])
# delta = np.array([mat.delta_1, mat.delta_2, mat.delta_3])
# alpha_1 = -1j*mat.A_hat*mat.delta_1**2-1j*2*mat.N*kx[0]**2
# alpha_2 = -1j*mat.A_hat*mat.delta_2**2-1j*2*mat.N*kx[1]**2
# alpha_3 = -2*1j*mat.N*kx[2]*ky
# SV = np.zeros((6,6), dtype=complex)
# SV[0:6, 0] = np.array([-2*1j*mat.N*kx[0]*ky, kx[0], mat.mu_1*kx[0], alpha_1, 1j*delta[0]**2*mat.K_eq_til*mat.mu_1, ky])
# SV[0:6, 3] = np.array([ 2*1j*mat.N*kx[0]*ky,-kx[0],-mat.mu_1*kx[0], alpha_1, 1j*delta[0]**2*mat.K_eq_til*mat.mu_1, ky])
# SV[0:6, 1] = np.array([-2*1j*mat.N*kx[1]*ky, kx[1], mat.mu_2*kx[1],alpha_2, 1j*delta[1]**2*mat.K_eq_til*mat.mu_2, ky])
# SV[0:6, 4] = np.array([ 2*1j*mat.N*kx[1]*ky,-kx[1],-mat.mu_2*kx[1],alpha_2, 1j*delta[1]**2*mat.K_eq_til*mat.mu_2, ky])
# SV[0:6, 2] = np.array([1j*mat.N*(kx[2]**2-ky**2), ky, mat.mu_3*ky, alpha_3, 0., -kx[2]])
# SV[0:6, 5] = np.array([1j*mat.N*(kx[2]**2-ky**2), ky, mat.mu_3*ky, -alpha_3, 0., kx[2]])
# return SV, kx
# def elastic_SV(mat,ky, omega):
# ''' S={0:\sigma_{xy}, 1: u_y, 2 \sigma_{yy}, 3 u_x}'''
# P_mat = mat.lambda_ + 2.*mat.mu
# delta_p = omega*np.sqrt(mat.rho/P_mat)
# delta_s = omega*np.sqrt(mat.rho/mat.mu)
# kx_p = np.sqrt(delta_p**2-ky**2)
# kx_s = np.sqrt(delta_s**2-ky**2)
# kx = np.array([kx_p, kx_s])
# alpha_p = -1j*mat.lambda_*delta_p**2 - 2j*mat.mu*kx[0]**2
# alpha_s = 2j*mat.mu*kx[1]*ky
# SV = np.zeros((4, 4), dtype=np.complex)
# SV[0:4, 0] = np.array([-2.*1j*mat.mu*kx[0]*ky, kx[0], alpha_p, ky])
# SV[0:4, 2] = np.array([ 2.*1j*mat.mu*kx[0]*ky, -kx[0], alpha_p, ky])
# SV[0:4, 1] = np.array([1j*mat.mu*(kx[1]**2-ky**2), ky,-alpha_s, -kx[1]])
# SV[0:4, 3] = np.array([1j*mat.mu*(kx[1]**2-ky**2), ky, alpha_s, kx[1]])
# return SV, kx
# def fluid_SV(kx, k, K):
# ''' S={0:u_y , 1:p}'''
# ky = np.sqrt(k**2-kx**2)
# SV = np.zeros((2, 2), dtype=complex)
# SV[0, 0:2] = np.array([ky/(1j*K*k**2), -ky/(1j*K*k**2)])
# SV[1, 0:2] = np.array([1, 1])
# return SV, ky
# def resolution_PW_imposed_displacement(S, p):
# # print("k={}".format(p.k))
# Layers = S.layers.copy()
# n, interfaces, dofs = initialise_PW_solver(Layers, S.backing)
# M = np.zeros((n, n), dtype=complex)
# i_eq = 0
# # Loop on the layers
# for i_inter, _inter in enumerate(interfaces):
# if _inter[0] == "fluid":
# if _inter[1] == "fluid":
# i_eq = interface_fluid_fluid(i_eq, i_inter, Layers, dofs, M, p)
# if _inter[1] == "pem":
# i_eq = interface_fluid_pem(i_eq, i_inter, Layers, dofs, M, p)
# elif _inter[0] == "pem":
# if _inter[1] == "fluid":
# i_eq = interface_pem_fluid(i_eq, i_inter, Layers, dofs, M, p)
# if _inter[1] == "pem":
# i_eq = interface_pem_pem(i_eq, i_inter, Layers, dofs, M, p)
# if S.backing == backing.rigid:
# if Layers[-1].medium.MODEL == "fluid":
# i_eq = interface_fluid_rigid(M, i_eq, Layers[-1], dofs[-1], p)
# elif Layers[-1].medium.MODEL == "pem":
# i_eq = interface_pem_rigid(M, i_eq, Layers[-1], dofs[-1], p)
# if Layers[0].medium.MODEL == "fluid":
# F = np.zeros(n, dtype=complex)
# SV, k_y = fluid_SV(p.kx, p.k, Layers[0].medium.K)
# M[i_eq, dofs[0][0]] = SV[0, 0]
# M[i_eq, dofs[0][1]] = SV[0, 1]*np.exp(-1j*k_y*Layers[0].thickness)
# F[i_eq] = 1.
# elif Layers[0].medium.MODEL == "pem":
# SV, k_y = PEM_SV(Layers[0].medium, p.kx)
# M[i_eq, dofs[0][0]] = SV[2, 0]
# M[i_eq, dofs[0][1]] = SV[2, 1]
# M[i_eq, dofs[0][2]] = SV[2, 2]
# M[i_eq, dofs[0][3]] = SV[2, 3]*np.exp(-1j*k_y[0]*Layers[0].thickness)
# M[i_eq, dofs[0][4]] = SV[2, 4]*np.exp(-1j*k_y[1]*Layers[0].thickness)
# M[i_eq, dofs[0][5]] = SV[2, 5]*np.exp(-1j*k_y[2]*Layers[0].thickness)
# F = np.zeros(n, dtype=complex)
# F[i_eq] = 1.
# i_eq +=1
# M[i_eq, dofs[0][0]] = SV[0, 0]
# M[i_eq, dofs[0][1]] = SV[0, 1]
# M[i_eq, dofs[0][2]] = SV[0, 2]
# M[i_eq, dofs[0][3]] = SV[0, 3]*np.exp(-1j*k_y[0]*Layers[0].thickness)
# M[i_eq, dofs[0][4]] = SV[0, 4]*np.exp(-1j*k_y[1]*Layers[0].thickness)
# M[i_eq, dofs[0][5]] = SV[0, 5]*np.exp(-1j*k_y[2]*Layers[0].thickness)
# i_eq += 1
# M[i_eq, dofs[0][0]] = SV[3, 0]
# M[i_eq, dofs[0][1]] = SV[3, 1]
# M[i_eq, dofs[0][2]] = SV[3, 2]
# M[i_eq, dofs[0][3]] = SV[3, 3]*np.exp(-1j*k_y[0]*Layers[0].thickness)
# M[i_eq, dofs[0][4]] = SV[3, 4]*np.exp(-1j*k_y[1]*Layers[0].thickness)
# M[i_eq, dofs[0][5]] = SV[3, 5]*np.exp(-1j*k_y[2]*Layers[0].thickness)
# X = LA.solve(M, F)
# # print("|R pyPLANES_PW| = {}".format(np.abs(X[0])))
# print("R pyPLANES_PW = {}".format(X[0]))
# plot_sol_PW(S, X, dofs, p)
| 1.976563 | 2 |
mmtbx/conformation_dependent_library/mcl.py | pcxod/cctbx_project | 0 | 11130 | <reponame>pcxod/cctbx_project<filename>mmtbx/conformation_dependent_library/mcl.py
from __future__ import absolute_import, division, print_function
import sys
import time
from cctbx.array_family import flex
from scitbx.math import superpose
from mmtbx.conformation_dependent_library import mcl_sf4_coordination
from six.moves import range
from mmtbx.conformation_dependent_library import metal_coordination_library
def get_pdb_hierarchy_from_restraints(code):
from mmtbx.monomer_library import server
from iotbx import pdb
mon_lib_server = server.server()
path = mon_lib_server.get_comp_comp_id_direct(code, return_filename=True)
cif_obj = server.read_cif(path)
ligand_inp=pdb.pdb_input(source_info="Model from %s" % path,
lines=flex.split_lines(""))
ligand_hierarchy = ligand_inp.construct_hierarchy()
model=pdb.hierarchy.model()
chain=pdb.hierarchy.chain()
chain.id='Z'
rg=pdb.hierarchy.residue_group()
ag=pdb.hierarchy.atom_group()
for block, loops in cif_obj.blocks.items():
if block=='comp_list': continue
for loop in loops.iterloops():
for row in loop.iterrows():
if '_chem_comp_atom.comp_id' not in row: break
ag.resname = row['_chem_comp_atom.comp_id']
atom = pdb.hierarchy.atom()
atom.name = row['_chem_comp_atom.atom_id']
atom.element = '%2s' % row['_chem_comp_atom.type_symbol']
atom.xyz = (
float(row['_chem_comp_atom.x']),
float(row['_chem_comp_atom.y']),
float(row['_chem_comp_atom.z']),
)
ag.append_atom(atom)
rg.append_atom_group(ag)
chain.append_residue_group(rg)
model.append_chain(chain)
ligand_hierarchy.append_model(model)
ligand_hierarchy.atoms().reset_i_seq()
return ligand_hierarchy
def update(grm,
pdb_hierarchy,
link_records=None,
log=sys.stdout,
verbose=False,
):
def _atom_id(a, show_i_seq=False):
if show_i_seq:
return '%s (%5d)' % (a.id_str(), a.i_seq)
else:
return '%s' % (a.id_str())
if link_records is None: link_records={}
link_records.setdefault('LINK', [])
hooks = [
["Iron sulfur cluster coordination",
mcl_sf4_coordination.get_sulfur_iron_cluster_coordination,
mcl_sf4_coordination.get_all_proxies,
],
['Zn2+ tetrahedral coordination',
metal_coordination_library.get_metal_coordination_proxies,
metal_coordination_library.get_proxies,
],
]
outl = ''
outl_debug = ''
for label, get_coordination, get_all_proxies in hooks:
rc = get_coordination(
pdb_hierarchy=pdb_hierarchy,
nonbonded_proxies=grm.pair_proxies(
sites_cart=pdb_hierarchy.atoms().extract_xyz()).nonbonded_proxies,
verbose=verbose,
)
bproxies, aproxies = get_all_proxies(rc)
if bproxies is None: continue
if len(bproxies):
outl += ' %s\n' % label
outl += ' %s\n' % label
atoms = pdb_hierarchy.atoms()
sf4_coordination = {}
for bp in bproxies:
sf4_ag = atoms[bp.i_seqs[0]].parent()
sf4_coordination.setdefault(sf4_ag.id_str(), [])
sf4_coordination[sf4_ag.id_str()].append((atoms[bp.i_seqs[0]],
atoms[bp.i_seqs[1]]))
link = (atoms[bp.i_seqs[0]], atoms[bp.i_seqs[1]], 'x,y,z')
if link not in link_records: link_records['LINK'].append(link)
for sf4, aas in sorted(sf4_coordination.items()):
outl += '%spdb="%s"\n' % (' '*6, sf4)
outl_debug += '%spdb="%s"\n' % (' '*6, sf4)
for aa in sorted(aas):
outl += '%s%s - %s\n' % (' '*8, _atom_id(aa[0]), _atom_id(aa[1]))
outl_debug += '%s%s - %s\n' % (' '*8,
_atom_id(aa[0], True),
_atom_id(aa[1], True))
if bproxies:
try:
grm.add_new_bond_restraints_in_place(
proxies=bproxies,
sites_cart=pdb_hierarchy.atoms().extract_xyz(),
)
except RuntimeError as e:
print('\n\n%s' % outl_debug)
raise e
#
done = []
remove = []
for i, angle in enumerate(aproxies):
i_seqs = list(angle.i_seqs)
i_seqs.sort()
if i_seqs in done:
remove.append(i)
else:
done.append(i_seqs)
if remove:
remove.reverse()
for r in remove:
del aproxies[r]
#
if aproxies:
outl += '%s%s' % (' '*6, 'Number of angles added : %d\n' % len(aproxies))
grm.add_angles_in_place(aproxies)
if outl:
print(' Dynamic metal coordination', file=log)
print(outl, file=log)
def _extract_sites_cart(ag, element=None):
selection = []
for atom in ag.atoms():
if element and atom.element.upper().strip()!=element.upper().strip():
continue
selection.append(atom.xyz)
return flex.vec3_double(selection)
def generate_sites_fixed(pdb_hierarchy, resname, element=None):
for ag in pdb_hierarchy.atom_groups():
if ag.resname.strip().upper()==resname.upper():
yield _extract_sites_cart(ag, element), ag
def superpose_ideal_residue_coordinates(pdb_hierarchy,
resname,
superpose_element=None,
):
element_lookup = {'SF4' : 'Fe',
'F3S' : 'S',
#'F4S' : 'S', # not done yet
#'CLF' : 'Fe', # too flexible
'DVT' : 'V',
}
from mmtbx.monomer_library import pdb_interpretation
t0=time.time()
rmsd_list = {}
if superpose_element is None:
superpose_element = element_lookup.get(resname, None)
if resname in pdb_interpretation.ideal_ligands:
ideal_hierarchy = get_pdb_hierarchy_from_restraints(resname)
else:
assert 0
sites_moving = _extract_sites_cart(ideal_hierarchy, superpose_element)
assert len(sites_moving), 'No atoms %s found' % superpose_element
for ideal_ag in ideal_hierarchy.atom_groups(): break
for sites_fixed, ag in generate_sites_fixed(pdb_hierarchy,
resname,
superpose_element,
):
assert sites_fixed.size() == sites_moving.size(), '%(resname)s residue is missing atoms' % locals()
import random
min_rmsd = 1e9
min_sites_cart = None
for i in range(100):
random.shuffle(sites_moving)
lsq_fit = superpose.least_squares_fit(
reference_sites = sites_fixed,
other_sites = sites_moving)
new_atoms = ideal_ag.detached_copy().atoms()
sites_new = new_atoms.extract_xyz()
sites_new = lsq_fit.r.elems * sites_new + lsq_fit.t.elems
rmsd = sites_fixed.rms_difference(lsq_fit.other_sites_best_fit())
if rmsd<min_rmsd:
min_rmsd=rmsd
min_sites_cart = sites_new
rmsd_list[ag.id_str()] = min_rmsd
sites_new = min_sites_cart
new_atoms.set_xyz(sites_new)
for atom1 in ag.atoms():
for atom2 in new_atoms:
if atom1.name.strip()==atom2.name.strip():
atom1.xyz=atom2.xyz
break
else:
assert 0, 'not all atoms updated - missing %s' % atom1.quote()
outl = ''
if rmsd_list:
outl = '\n %(resname)s Regularisation' % locals()
outl+= '\n residue rmsd'
for id_str, rmsd in sorted(rmsd_list.items()):
outl += '\n "%s" %0.1f' % (id_str, rmsd)
outl += '\n Time to superpose : %0.2fs\n' % (time.time()-t0)
return outl
def superpose_ideal_ligand_on_poor_ligand(ideal_hierarchy,
poor_hierarchy,
):
"""Function superpose an ideal ligand onto the mangled ligand from a
ligand fitting procedure
Args:
ideal_hierarchy (pdb_hierarchy): Ideal ligand
poor_hierarchy (pdb_hierarchy): Poor ligand with correct c.o.m. and same
atom names in order. Could become more sophisticated.
"""
sites_moving = flex.vec3_double()
sites_fixed = flex.vec3_double()
for atom1, atom2 in zip(ideal_hierarchy.atoms(), poor_hierarchy.atoms()):
assert atom1.name==atom2.name, '%s!=%s' % (atom1.quote(),atom2.quote())
sites_moving.append(atom1.xyz)
sites_fixed.append(atom2.xyz)
lsq_fit = superpose.least_squares_fit(
reference_sites = sites_fixed,
other_sites = sites_moving)
sites_new = ideal_hierarchy.atoms().extract_xyz()
sites_new = lsq_fit.r.elems * sites_new + lsq_fit.t.elems
# rmsd = sites_fixed.rms_difference(lsq_fit.other_sites_best_fit())
ideal_hierarchy.atoms().set_xyz(sites_new)
return ideal_hierarchy
if __name__=="__main__":
from iotbx import pdb
ideal_inp=pdb.pdb_input(sys.argv[1])
ideal_hierarchy = ideal_inp.construct_hierarchy()
poor_inp=pdb.pdb_input(sys.argv[2])
poor_hierarchy = poor_inp.construct_hierarchy()
ideal_hierarchy = superpose_ideal_ligand_on_poor_ligand(ideal_hierarchy, poor_hierarchy)
ideal_hierarchy.write_pdb_file('new.pdb')
| 1.9375 | 2 |
tests/test_install.py | dfroger/conda | 0 | 11131 | <gh_stars>0
from contextlib import contextmanager
import random
import shutil
import stat
import tempfile
import unittest
from os.path import join
from conda import install
from conda.install import (PaddingError, binary_replace, update_prefix,
warn_failed_remove, duplicates_to_remove)
from .decorators import skip_if_no_mock
from .helpers import mock
patch = mock.patch if mock else None
def generate_random_path():
return '/some/path/to/file%s' % random.randint(100, 200)
class TestBinaryReplace(unittest.TestCase):
def test_simple(self):
self.assertEqual(
binary_replace(b'xxxaaaaaxyz\x00zz', b'aaaaa', b'bbbbb'),
b'xxxbbbbbxyz\x00zz')
def test_shorter(self):
self.assertEqual(
binary_replace(b'xxxaaaaaxyz\x00zz', b'aaaaa', b'bbbb'),
b'xxxbbbbxyz\x00\x00zz')
def test_too_long(self):
self.assertRaises(PaddingError, binary_replace,
b'xxxaaaaaxyz\x00zz', b'aaaaa', b'bbbbbbbb')
def test_no_extra(self):
self.assertEqual(binary_replace(b'aaaaa\x00', b'aaaaa', b'bbbbb'),
b'bbbbb\x00')
def test_two(self):
self.assertEqual(
binary_replace(b'aaaaa\x001234aaaaacc\x00\x00', b'aaaaa',
b'bbbbb'),
b'bbbbb\x001234bbbbbcc\x00\x00')
def test_spaces(self):
self.assertEqual(
binary_replace(b' aaaa \x00', b'aaaa', b'bbbb'),
b' bbbb \x00')
def test_multiple(self):
self.assertEqual(
binary_replace(b'aaaacaaaa\x00', b'aaaa', b'bbbb'),
b'bbbbcbbbb\x00')
self.assertEqual(
binary_replace(b'aaaacaaaa\x00', b'aaaa', b'bbb'),
b'bbbcbbb\x00\x00\x00')
self.assertRaises(PaddingError, binary_replace,
b'aaaacaaaa\x00', b'aaaa', b'bbbbb')
class FileTests(unittest.TestCase):
def setUp(self):
self.tmpdir = tempfile.mkdtemp()
self.tmpfname = join(self.tmpdir, 'testfile')
def tearDown(self):
shutil.rmtree(self.tmpdir)
def test_default_text(self):
with open(self.tmpfname, 'w') as fo:
fo.write('#!/opt/anaconda1anaconda2anaconda3/bin/python\n'
'echo "Hello"\n')
update_prefix(self.tmpfname, '/usr/local')
with open(self.tmpfname, 'r') as fi:
data = fi.read()
self.assertEqual(data, '#!/usr/local/bin/python\n'
'echo "Hello"\n')
def test_binary(self):
with open(self.tmpfname, 'wb') as fo:
fo.write(b'\x7fELF.../some-placeholder/lib/libfoo.so\0')
update_prefix(self.tmpfname, '/usr/local',
placeholder='/some-placeholder', mode='binary')
with open(self.tmpfname, 'rb') as fi:
data = fi.read()
self.assertEqual(
data,
b'\x7fELF.../usr/local/lib/libfoo.so\0\0\0\0\0\0\0\0'
)
class remove_readonly_TestCase(unittest.TestCase):
def test_takes_three_args(self):
with self.assertRaises(TypeError):
install._remove_readonly()
with self.assertRaises(TypeError):
install._remove_readonly(True)
with self.assertRaises(TypeError):
install._remove_readonly(True, True)
with self.assertRaises(TypeError):
install._remove_readonly(True, True, True, True)
@skip_if_no_mock
def test_calls_os_chmod(self):
some_path = generate_random_path()
with patch.object(install.os, 'chmod') as chmod:
install._remove_readonly(mock.Mock(), some_path, {})
chmod.assert_called_with(some_path, stat.S_IWRITE)
@skip_if_no_mock
def test_calls_func(self):
some_path = generate_random_path()
func = mock.Mock()
with patch.object(install.os, 'chmod'):
install._remove_readonly(func, some_path, {})
func.assert_called_with(some_path)
class rm_rf_file_and_link_TestCase(unittest.TestCase):
@contextmanager
def generate_mock_islink(self, value):
with patch.object(install, 'islink', return_value=value) as islink:
yield islink
@contextmanager
def generate_mock_isdir(self, value):
with patch.object(install, 'isdir', return_value=value) as isdir:
yield isdir
@contextmanager
def generate_mock_isfile(self, value):
with patch.object(install, 'isfile', return_value=value) as isfile:
yield isfile
@contextmanager
def generate_mock_os_access(self, value):
with patch.object(install.os, 'access', return_value=value) as os_access:
yield os_access
@contextmanager
def generate_mock_unlink(self):
with patch.object(install.os, 'unlink') as unlink:
yield unlink
@contextmanager
def generate_mock_rmtree(self):
with patch.object(install.shutil, 'rmtree') as rmtree:
yield rmtree
@contextmanager
def generate_mock_sleep(self):
with patch.object(install.time, 'sleep') as sleep:
yield sleep
@contextmanager
def generate_mock_log(self):
with patch.object(install, 'log') as log:
yield log
@contextmanager
def generate_mock_on_win(self, value):
original = install.on_win
install.on_win = value
yield
install.on_win = original
@contextmanager
def generate_mock_check_call(self):
with patch.object(install.subprocess, 'check_call') as check_call:
yield check_call
@contextmanager
def generate_mocks(self, islink=True, isfile=True, isdir=True, on_win=False, os_access=True):
with self.generate_mock_islink(islink) as mock_islink:
with self.generate_mock_isfile(isfile) as mock_isfile:
with self.generate_mock_os_access(os_access) as mock_os_access:
with self.generate_mock_isdir(isdir) as mock_isdir:
with self.generate_mock_unlink() as mock_unlink:
with self.generate_mock_rmtree() as mock_rmtree:
with self.generate_mock_sleep() as mock_sleep:
with self.generate_mock_log() as mock_log:
with self.generate_mock_on_win(on_win):
with self.generate_mock_check_call() as check_call:
yield {
'islink': mock_islink,
'isfile': mock_isfile,
'isdir': mock_isdir,
'os_access': mock_os_access,
'unlink': mock_unlink,
'rmtree': mock_rmtree,
'sleep': mock_sleep,
'log': mock_log,
'check_call': check_call,
}
def generate_directory_mocks(self, on_win=False):
return self.generate_mocks(islink=False, isfile=False, isdir=True,
on_win=on_win)
def generate_all_false_mocks(self):
return self.generate_mocks(False, False, False)
@property
def generate_random_path(self):
return generate_random_path()
@skip_if_no_mock
def test_calls_islink(self):
with self.generate_mocks() as mocks:
some_path = self.generate_random_path
install.rm_rf(some_path)
mocks['islink'].assert_called_with(some_path)
@skip_if_no_mock
def test_calls_unlink_on_true_islink(self):
with self.generate_mocks() as mocks:
some_path = self.generate_random_path
install.rm_rf(some_path)
mocks['unlink'].assert_called_with(some_path)
@skip_if_no_mock
def test_calls_unlink_on_os_access_false(self):
with self.generate_mocks(os_access=False) as mocks:
some_path = self.generate_random_path
install.rm_rf(some_path)
mocks['unlink'].assert_called_with(some_path)
@skip_if_no_mock
def test_does_not_call_isfile_if_islink_is_true(self):
with self.generate_mocks() as mocks:
some_path = self.generate_random_path
install.rm_rf(some_path)
self.assertFalse(mocks['isfile'].called)
@skip_if_no_mock
def test_calls_isfile_with_path(self):
with self.generate_mocks(islink=False, isfile=True) as mocks:
some_path = self.generate_random_path
install.rm_rf(some_path)
mocks['isfile'].assert_called_with(some_path)
@skip_if_no_mock
def test_calls_unlink_on_false_islink_and_true_isfile(self):
with self.generate_mocks(islink=False, isfile=True) as mocks:
some_path = self.generate_random_path
install.rm_rf(some_path)
mocks['unlink'].assert_called_with(some_path)
@skip_if_no_mock
def test_does_not_call_unlink_on_false_values(self):
with self.generate_mocks(islink=False, isfile=False) as mocks:
some_path = self.generate_random_path
install.rm_rf(some_path)
self.assertFalse(mocks['unlink'].called)
@skip_if_no_mock
def test_does_not_call_shutil_on_false_isdir(self):
with self.generate_all_false_mocks() as mocks:
some_path = self.generate_random_path
install.rm_rf(some_path)
self.assertFalse(mocks['rmtree'].called)
@skip_if_no_mock
def test_calls_rmtree_at_least_once_on_isdir_true(self):
with self.generate_directory_mocks() as mocks:
some_path = self.generate_random_path
install.rm_rf(some_path)
mocks['rmtree'].assert_called_with(
some_path, onerror=warn_failed_remove, ignore_errors=False)
@skip_if_no_mock
def test_calls_rmtree_only_once_on_success(self):
with self.generate_directory_mocks() as mocks:
some_path = self.generate_random_path
install.rm_rf(some_path)
self.assertEqual(1, mocks['rmtree'].call_count)
@skip_if_no_mock
def test_raises_final_exception_if_it_cant_remove(self):
with self.generate_directory_mocks() as mocks:
mocks['rmtree'].side_effect = OSError
some_path = self.generate_random_path
with self.assertRaises(OSError):
install.rm_rf(some_path)
@skip_if_no_mock
def test_retries_six_times_to_ensure_it_cant_really_remove(self):
with self.generate_directory_mocks() as mocks:
mocks['rmtree'].side_effect = OSError
some_path = self.generate_random_path
with self.assertRaises(OSError):
install.rm_rf(some_path)
self.assertEqual(6, mocks['rmtree'].call_count)
@skip_if_no_mock
def test_retries_as_many_as_max_retries_plus_one(self):
max_retries = random.randint(7, 10)
with self.generate_directory_mocks() as mocks:
mocks['rmtree'].side_effect = OSError
some_path = self.generate_random_path
with self.assertRaises(OSError):
install.rm_rf(some_path, max_retries=max_retries)
self.assertEqual(max_retries + 1, mocks['rmtree'].call_count)
@skip_if_no_mock
def test_stops_retrying_after_success(self):
with self.generate_directory_mocks() as mocks:
mocks['rmtree'].side_effect = [OSError, OSError, None]
some_path = self.generate_random_path
install.rm_rf(some_path)
self.assertEqual(3, mocks['rmtree'].call_count)
@skip_if_no_mock
def test_pauses_for_same_number_of_seconds_as_max_retries(self):
with self.generate_directory_mocks() as mocks:
mocks['rmtree'].side_effect = OSError
max_retries = random.randint(1, 10)
with self.assertRaises(OSError):
install.rm_rf(self.generate_random_path,
max_retries=max_retries)
expected = [mock.call(i) for i in range(max_retries)]
mocks['sleep'].assert_has_calls(expected)
@skip_if_no_mock
def test_logs_messages_generated_for_each_retry(self):
with self.generate_directory_mocks() as mocks:
random_path = self.generate_random_path
mocks['rmtree'].side_effect = OSError(random_path)
max_retries = random.randint(1, 10)
with self.assertRaises(OSError):
install.rm_rf(random_path, max_retries=max_retries)
log_template = "\n".join([
"Unable to delete %s" % random_path,
"%s" % OSError(random_path),
"Retrying after %d seconds...",
])
expected_call_list = [mock.call(log_template % i)
for i in range(max_retries)]
mocks['log'].debug.assert_has_calls(expected_call_list)
@skip_if_no_mock
def test_tries_extra_kwarg_on_windows(self):
with self.generate_directory_mocks(on_win=True) as mocks:
random_path = self.generate_random_path
mocks['rmtree'].side_effect = [OSError, None]
install.rm_rf(random_path)
expected_call_list = [
mock.call(random_path, ignore_errors=False, onerror=warn_failed_remove),
mock.call(random_path, onerror=install._remove_readonly)
]
mocks['rmtree'].assert_has_calls(expected_call_list)
self.assertEqual(2, mocks['rmtree'].call_count)
class duplicates_to_remove_TestCase(unittest.TestCase):
def test_1(self):
linked = ['conda-3.18.8-py27_0', 'conda-3.19.0',
'python-2.7.10-2', 'python-2.7.11-0',
'zlib-1.2.8-0']
keep = ['conda-3.19.0', 'python-2.7.11-0']
self.assertEqual(duplicates_to_remove(linked, keep),
['conda-3.18.8-py27_0', 'python-2.7.10-2'])
def test_2(self):
linked = ['conda-3.19.0',
'python-2.7.10-2', 'python-2.7.11-0',
'zlib-1.2.7-1', 'zlib-1.2.8-0', 'zlib-1.2.8-4']
keep = ['conda-3.19.0', 'python-2.7.11-0']
self.assertEqual(duplicates_to_remove(linked, keep),
['python-2.7.10-2', 'zlib-1.2.7-1', 'zlib-1.2.8-0'])
def test_3(self):
linked = ['python-2.7.10-2', 'python-2.7.11-0', 'python-3.4.3-1']
keep = ['conda-3.19.0', 'python-2.7.11-0']
self.assertEqual(duplicates_to_remove(linked, keep),
['python-2.7.10-2', 'python-3.4.3-1'])
def test_nokeep(self):
linked = ['python-2.7.10-2', 'python-2.7.11-0', 'python-3.4.3-1']
self.assertEqual(duplicates_to_remove(linked, []),
['python-2.7.10-2', 'python-2.7.11-0'])
def test_misc(self):
d1 = 'a-1.3-0'
self.assertEqual(duplicates_to_remove([], []), [])
self.assertEqual(duplicates_to_remove([], [d1]), [])
self.assertEqual(duplicates_to_remove([d1], [d1]), [])
self.assertEqual(duplicates_to_remove([d1], []), [])
d2 = 'a-1.4-0'
li = set([d1, d2])
self.assertEqual(duplicates_to_remove(li, [d2]), [d1])
self.assertEqual(duplicates_to_remove(li, [d1]), [d2])
self.assertEqual(duplicates_to_remove(li, []), [d1])
self.assertEqual(duplicates_to_remove(li, [d1, d2]), [])
if __name__ == '__main__':
unittest.main()
| 2.296875 | 2 |
django_elastic_appsearch/slicer.py | CorrosiveKid/django_elastic_appsearch | 11 | 11132 | <filename>django_elastic_appsearch/slicer.py<gh_stars>10-100
"""A Queryset slicer for Django."""
def slice_queryset(queryset, chunk_size):
"""Slice a queryset into chunks."""
start_pk = 0
queryset = queryset.order_by('pk')
while True:
# No entry left
if not queryset.filter(pk__gt=start_pk).exists():
break
try:
# Fetch chunk_size entries if possible
end_pk = queryset.filter(pk__gt=start_pk).values_list(
'pk', flat=True)[chunk_size - 1]
# Fetch rest entries if less than chunk_size left
except IndexError:
end_pk = queryset.values_list('pk', flat=True).last()
yield queryset.filter(pk__gt=start_pk).filter(pk__lte=end_pk)
start_pk = end_pk
| 2.734375 | 3 |
newsite/news/urls.py | JasperStfun/Django_C | 0 | 11133 | <filename>newsite/news/urls.py<gh_stars>0
from django.urls import path
from . import views
urlpatterns = [
path('', views.news_home, name='news_home'),
path('create', views.create, name='create'),
path('<int:pk>', views.NewsDetailView.as_view(), name='news-detail'),
path('<int:pk>/update', views.NewsUpdateView.as_view(), name='news-update'),
path('<int:pk>/delete', views.NewsDeleteView.as_view(), name='news-delete'),
] | 1.835938 | 2 |
module/phase_one/headers.py | cqr-cryeye-forks/Florid | 7 | 11134 | <reponame>cqr-cryeye-forks/Florid<filename>module/phase_one/headers.py
import requests
import lib.common
MODULE_NAME = 'headers'
def run():
r = requests.get(lib.common.SOURCE_URL)
# X-Forwarded-By:
if 'X-Powered-By' in r.headers:
lib.common.RESULT_ONE_DICT['X-Powered-By'] = r.headers['X-Powered-By']
# Server:
if 'Server' in r.headers:
lib.common.RESULT_ONE_DICT['Server'] = r.headers['Server']
lib.common.ALIVE_LINE[MODULE_NAME] += 1
| 2.125 | 2 |
ekorpkit/io/fetch/edgar/edgar.py | entelecheia/ekorpkit | 4 | 11135 | import os
import requests
from bs4 import BeautifulSoup
from ekorpkit import eKonf
from ekorpkit.io.download.web import web_download, web_download_unzip
class EDGAR:
def __init__(self, **args):
self.args = eKonf.to_config(args)
self.base_url = self.args.base_url
self.url = self.args.url
self.output_dir = self.args.output_dir
os.makedirs(self.output_dir, exist_ok=True)
self.force_download = self.args.force_download
self.name = self.args.name
self.build()
def build(self):
if self.force_download or not os.listdir(self.output_dir):
self.download_edgar()
else:
print(f"{self.name} is already downloaded")
def download_edgar(self):
user_agent = "Mozilla/5.0"
headers = {"User-Agent": user_agent}
page = requests.get(self.url, headers=headers)
soup = BeautifulSoup(page.content, "html.parser")
filelist = soup.find_all("a", class_="filename")
for file in filelist:
link = self.base_url + file.get("href")
file_path = self.output_dir + "/" + file.get_text().strip()
web_download(link, file_path, self.name, self.force_download)
| 2.875 | 3 |
HARK/ConsumptionSaving/tests/test_PerfForesightConsumerType.py | michiboo/HARK | 0 | 11136 | from HARK.ConsumptionSaving.ConsIndShockModel import PerfForesightConsumerType
import numpy as np
import unittest
class testPerfForesightConsumerType(unittest.TestCase):
def setUp(self):
self.agent = PerfForesightConsumerType()
self.agent_infinite = PerfForesightConsumerType(cycles=0)
PF_dictionary = {
'CRRA' : 2.5,
'DiscFac' : 0.96,
'Rfree' : 1.03,
'LivPrb' : [0.98],
'PermGroFac' : [1.01],
'T_cycle' : 1,
'cycles' : 0,
'AgentCount' : 10000
}
self.agent_alt = PerfForesightConsumerType(
**PF_dictionary)
def test_default_solution(self):
self.agent.solve()
c = self.agent.solution[0].cFunc
self.assertEqual(c.x_list[0], -0.9805825242718447)
self.assertEqual(c.x_list[1], 0.01941747572815533)
self.assertEqual(c.y_list[0], 0)
self.assertEqual(c.y_list[1], 0.511321002804608)
self.assertEqual(c.decay_extrap, False)
def test_another_solution(self):
self.agent_alt.DiscFac = 0.90
self.agent_alt.solve()
self.assertAlmostEqual(
self.agent_alt.solution[0].cFunc(10).tolist(),
3.9750093524820787)
def test_checkConditions(self):
self.agent_infinite.checkConditions()
self.assertTrue(self.agent_infinite.AIC)
self.assertTrue(self.agent_infinite.GICPF)
self.assertTrue(self.agent_infinite.RIC)
self.assertTrue(self.agent_infinite.FHWC)
def test_simulation(self):
self.agent_infinite.solve()
# Create parameter values necessary for simulation
SimulationParams = {
"AgentCount" : 10000, # Number of agents of this type
"T_sim" : 120, # Number of periods to simulate
"aNrmInitMean" : -6.0, # Mean of log initial assets
"aNrmInitStd" : 1.0, # Standard deviation of log initial assets
"pLvlInitMean" : 0.0, # Mean of log initial permanent income
"pLvlInitStd" : 0.0, # Standard deviation of log initial permanent income
"PermGroFacAgg" : 1.0, # Aggregate permanent income growth factor
"T_age" : None, # Age after which simulated agents are automatically killed
}
self.agent_infinite(**SimulationParams) # This implicitly uses the assignParameters method of AgentType
# Create PFexample object
self.agent_infinite.track_vars = ['mNrmNow']
self.agent_infinite.initializeSim()
self.agent_infinite.simulate()
self.assertAlmostEqual(
np.mean(self.agent_infinite.mNrmNow_hist,axis=1)[40],
-23.008063500363942
)
self.assertAlmostEqual(
np.mean(self.agent_infinite.mNrmNow_hist,axis=1)[100],
-27.164608851546927
)
## Try now with the manipulation at time step 80
self.agent_infinite.initializeSim()
self.agent_infinite.simulate(80)
self.agent_infinite.aNrmNow += -5. # Adjust all simulated consumers' assets downward by 5
self.agent_infinite.simulate(40)
self.assertAlmostEqual(
np.mean(self.agent_infinite.mNrmNow_hist,axis=1)[40],
-23.008063500363942
)
self.assertAlmostEqual(
np.mean(self.agent_infinite.mNrmNow_hist,axis=1)[100],
-29.140261331951606
)
| 2.296875 | 2 |
src-gen/openapi_server/models/config.py | etherisc/bima-bolt-api | 0 | 11137 | <filename>src-gen/openapi_server/models/config.py
# coding: utf-8
from __future__ import absolute_import
from datetime import date, datetime # noqa: F401
from typing import List, Dict # noqa: F401
from openapi_server.models.base_model_ import Model
from openapi_server.models.component import Component
from openapi_server import util
from openapi_server.models.component import Component # noqa: E501
class Config(Model):
"""NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech).
Do not edit the class manually.
"""
def __init__(self, mongo=None, s3=None, arc2=None, created_at=None): # noqa: E501
"""Config - a model defined in OpenAPI
:param mongo: The mongo of this Config. # noqa: E501
:type mongo: Component
:param s3: The s3 of this Config. # noqa: E501
:type s3: Component
:param arc2: The arc2 of this Config. # noqa: E501
:type arc2: Component
:param created_at: The created_at of this Config. # noqa: E501
:type created_at: datetime
"""
self.openapi_types = {
'mongo': Component,
's3': Component,
'arc2': Component,
'created_at': datetime
}
self.attribute_map = {
'mongo': 'mongo',
's3': 's3',
'arc2': 'arc2',
'created_at': 'created_at'
}
self._mongo = mongo
self._s3 = s3
self._arc2 = arc2
self._created_at = created_at
@classmethod
def from_dict(cls, dikt) -> 'Config':
"""Returns the dict as a model
:param dikt: A dict.
:type: dict
:return: The Config of this Config. # noqa: E501
:rtype: Config
"""
return util.deserialize_model(dikt, cls)
@property
def mongo(self):
"""Gets the mongo of this Config.
:return: The mongo of this Config.
:rtype: Component
"""
return self._mongo
@mongo.setter
def mongo(self, mongo):
"""Sets the mongo of this Config.
:param mongo: The mongo of this Config.
:type mongo: Component
"""
if mongo is None:
raise ValueError("Invalid value for `mongo`, must not be `None`") # noqa: E501
self._mongo = mongo
@property
def s3(self):
"""Gets the s3 of this Config.
:return: The s3 of this Config.
:rtype: Component
"""
return self._s3
@s3.setter
def s3(self, s3):
"""Sets the s3 of this Config.
:param s3: The s3 of this Config.
:type s3: Component
"""
if s3 is None:
raise ValueError("Invalid value for `s3`, must not be `None`") # noqa: E501
self._s3 = s3
@property
def arc2(self):
"""Gets the arc2 of this Config.
:return: The arc2 of this Config.
:rtype: Component
"""
return self._arc2
@arc2.setter
def arc2(self, arc2):
"""Sets the arc2 of this Config.
:param arc2: The arc2 of this Config.
:type arc2: Component
"""
if arc2 is None:
raise ValueError("Invalid value for `arc2`, must not be `None`") # noqa: E501
self._arc2 = arc2
@property
def created_at(self):
"""Gets the created_at of this Config.
Creation timestamp, omit this property for post requests # noqa: E501
:return: The created_at of this Config.
:rtype: datetime
"""
return self._created_at
@created_at.setter
def created_at(self, created_at):
"""Sets the created_at of this Config.
Creation timestamp, omit this property for post requests # noqa: E501
:param created_at: The created_at of this Config.
:type created_at: datetime
"""
self._created_at = created_at
| 2.234375 | 2 |
txt_annotation.py | bubbliiiing/classification-keras | 30 | 11138 | import os
from os import getcwd
#---------------------------------------------#
# 训练前一定要注意修改classes
# 种类顺序需要和model_data下的txt一样
#---------------------------------------------#
classes = ["cat", "dog"]
sets = ["train", "test"]
wd = getcwd()
for se in sets:
list_file = open('cls_' + se + '.txt', 'w')
datasets_path = "datasets/" + se
types_name = os.listdir(datasets_path)
for type_name in types_name:
if type_name not in classes:
continue
cls_id = classes.index(type_name)
photos_path = os.path.join(datasets_path, type_name)
photos_name = os.listdir(photos_path)
for photo_name in photos_name:
_, postfix = os.path.splitext(photo_name)
if postfix not in ['.jpg', '.png', '.jpeg']:
continue
list_file.write(str(cls_id) + ";" + '%s/%s'%(wd, os.path.join(photos_path, photo_name)))
list_file.write('\n')
list_file.close()
| 2.6875 | 3 |
S4/S4 Library/simulation/relationships/sim_knowledge.py | NeonOcean/Environment | 1 | 11139 | <filename>S4/S4 Library/simulation/relationships/sim_knowledge.py
from protocolbuffers import SimObjectAttributes_pb2 as protocols
from careers.career_unemployment import CareerUnemployment
import services
import sims4
logger = sims4.log.Logger('Relationship', default_owner='jjacobson')
class SimKnowledge:
__slots__ = ('_rel_data', '_known_traits', '_knows_career', '_known_stats', '_knows_major')
def __init__(self, rel_data):
self._rel_data = rel_data
self._known_traits = None
self._knows_career = False
self._known_stats = None
self._knows_major = False
def add_known_trait(self, trait, notify_client=True):
if trait.is_personality_trait:
if self._known_traits is None:
self._known_traits = set()
self._known_traits.add(trait)
if notify_client:
self._rel_data.relationship.send_relationship_info()
else:
logger.error("Try to add non personality trait {} to Sim {}'s knowledge about to Sim {}", trait, self._rel_data.sim_id, self._rel_data.target_sim_id)
@property
def known_traits(self):
if self._known_traits is None:
return ()
return self._known_traits
@property
def knows_career(self):
return self._knows_career
def add_knows_career(self, notify_client=True):
self._knows_career = True
if notify_client:
self._rel_data.relationship.send_relationship_info()
def remove_knows_career(self, notify_client=True):
self._knows_career = False
if notify_client:
self._rel_data.relationship.send_relationship_info()
def get_known_careers(self):
if self._knows_career:
target_sim_info = self._rel_data.find_target_sim_info()
if target_sim_info is not None:
if target_sim_info.career_tracker.has_career:
careers = tuple(career for career in target_sim_info.careers.values() if career.is_visible_career if not career.is_course_slot)
if careers:
return careers
if target_sim_info.career_tracker.retirement is not None:
return (target_sim_info.career_tracker.retirement,)
else:
return (CareerUnemployment(target_sim_info),)
return ()
def get_known_careertrack_ids(self):
return (career_track.current_track_tuning.guid64 for career_track in self.get_known_careers())
def add_known_stat(self, stat, notify_client=True):
if self._known_stats is None:
self._known_stats = set()
self._known_stats.add(stat)
if notify_client:
self._rel_data.relationship.send_relationship_info()
def get_known_stats(self):
return self._known_stats
@property
def knows_major(self):
return self._knows_major
def add_knows_major(self, notify_client=True):
self._knows_major = True
if notify_client:
self._rel_data.relationship.send_relationship_info()
def remove_knows_major(self, notify_client=True):
self._knows_major = False
if notify_client:
self._rel_data.relationship.send_relationship_info()
def get_known_major(self):
if self._knows_major:
target_sim_info = self._rel_data.find_target_sim_info()
if target_sim_info is not None and target_sim_info.degree_tracker:
return target_sim_info.degree_tracker.get_major()
def get_known_major_career(self):
if self._knows_major:
target_sim_info = self._rel_data.find_target_sim_info()
if target_sim_info is not None and target_sim_info.career_tracker.has_career:
careers = tuple(career for career in target_sim_info.careers.values() if career.is_visible_career if career.is_course_slot)
if careers:
return careers
return ()
def get_save_data(self):
save_data = protocols.SimKnowledge()
for trait in self.known_traits:
save_data.trait_ids.append(trait.guid64)
save_data.knows_career = self._knows_career
if self._known_stats is not None:
for stat in self._known_stats:
save_data.stats.append(stat.guid64)
save_data.knows_major = self._knows_major
return save_data
def load_knowledge(self, save_data):
trait_manager = services.get_instance_manager(sims4.resources.Types.TRAIT)
stat_manager = services.get_instance_manager(sims4.resources.Types.STATISTIC)
for trait_inst_id in save_data.trait_ids:
trait = trait_manager.get(trait_inst_id)
if trait is not None:
if self._known_traits is None:
self._known_traits = set()
self._known_traits.add(trait)
for stat_id in save_data.stats:
if self._known_stats is None:
self._known_stats = set()
stat = stat_manager.get(stat_id)
if stat is not None:
self._known_stats.add(stat)
self._knows_career = save_data.knows_career
if hasattr(save_data, 'knows_major'):
self._knows_major = save_data.knows_major
| 2.390625 | 2 |
233_number_of_digt_one.py | gengwg/leetcode | 2 | 11140 | <gh_stars>1-10
# Given an integer n, count the total number of digit 1 appearing
# in all non-negative integers less than or equal to n.
#
# For example:
# Given n = 13,
# Return 6, because digit 1 occurred in the following numbers:
# 1, 10, 11, 12, 13.
#
class Solution:
def countDigitOne(self, n):
"""
:type n: int
:rtype: int
"""
# sum all the '1's inside the n numbers
count = 0
for i in range(1, n+1): # count including n
count += self.numberOfDigitOne(i)
return count
def numberOfDigitOne(self, n):
"""
function to count number of digit ones in a number n.
mod by 10 to test if 1st digit is 1;
then divide by 10 to get next digit;
next test if next digit is 1.
"""
result = 0
while n:
if n % 10 == 1:
result += 1
n = n / 10
return result
if __name__ == "__main__":
print Solution().countDigitOne(13)
| 3.828125 | 4 |
Search Algorithms.py | fzehracetin/A-Star-and-Best-First-Search | 1 | 11141 | from PIL import Image
from math import sqrt
import numpy as np
import time
import matplotlib.backends.backend_tkagg
import matplotlib.pyplot as plt
class Point:
x: float
y: float
f: float
h: float
g: float
def __init__(self, x, y, f):
self.x = x
self.y = y
self.f = f
self.g = 0
self.h = 0
self.parent = None
def equal(self, other):
if self.x == other.x and self.y == other.y:
return True
class Output:
result_image: Image
total_time: float
n_elements: int
max_elements: int
def __init__(self, result_image, total_time, n_elements, max_elements):
self.result_image = result_image
self.total_time = total_time
self.n_elements = n_elements
self.max_elements = max_elements
self.name = None
def plot_times(self, other1, other2, other3):
fig, ax = plt.subplots()
ax.bar([self.name, other1.name, other2.name, other3.name],
[self.total_time, other1.total_time, other2.total_time, other3.total_time])
fig.suptitle("Toplam Zamanlar")
fname = image_name.split('.')
plt.savefig(fname[0] + "times.png")
plt.show()
def plot_n_elements(self, other1, other2, other3):
fig, ax = plt.subplots()
ax.bar([self.name, other1.name, other2.name, other3.name],
[self.n_elements, other1.n_elements, other2.n_elements, other3.n_elements])
fig.suptitle("Stack'ten Çekilen Toplam Eleman Sayısı")
fname = image_name.split('.')
plt.savefig(fname[0] + "n_elements.png")
plt.show()
def plot_max_elements(self, other1, other2, other3):
fig, ax = plt.subplots()
ax.bar([self.name, other1.name, other2.name, other3.name],
[self.max_elements, other1.max_elements, other2.max_elements, other3.max_elements])
fig.suptitle("Stack'te Bulunan Maksimum Eleman Sayısı")
fname = image_name.split('.')
plt.savefig(fname[0] + "max_elements.png")
plt.show()
def distance(point, x, y):
return sqrt((point.x - x)**2 + (point.y - y)**2)
def insert_in_heap(heap, top, point):
heap.append(point)
i = top
parent = (i - 1)/2
while i >= 1 and heap[int(i)].f < heap[int(parent)].f:
heap[int(i)], heap[int(parent)] = heap[int(parent)], heap[int(i)] # swap
i = parent
parent = (i - 1) / 2
return
def calculate_weight(x, y, liste, top, point, visited, index1, index2):
if visited[int(x)][int(y)] == 0:
r, g, b = image.getpixel((x, y))
if x == end.x and y == end.y:
print("Path found.")
if r is 0:
r = 1
new_point = Point(x, y, 0)
new_point.parent = point
new_point.h = distance(end, x, y) * (256 - r)
new_point.g = 0
if index1 == 1: # a_star
new_point.g = new_point.parent.g + 256 - r
new_point.f = new_point.h + new_point.g # bfs'de g = 0
if index2 == 0: # stack
liste.append(new_point)
else: # heap
insert_in_heap(liste, top, new_point)
top += 1
visited[int(x)][int(y)] = 1
return top
def add_neighbours(point, liste, top, visited, index1, index2):
# print(point.x, point.y)
if (point.x == width - 1 and point.y == height - 1) or (point.x == 0 and point.y == 0) or \
(point.x == 0 and point.y == height - 1) or (point.x == width - 1 and point.y == 0):
# print("first if")
if point.x == width - 1 and point.y == height - 1:
constx = -1
consty = -1
elif point.x == 0 and point.y == 0:
constx = 1
consty = 1
elif point.x == width - 1 and point.y == 0:
constx = 1
consty = -1
else:
constx = -1
consty = 1
top = calculate_weight(point.x + constx, point.y, liste, top, point, visited, index1, index2)
top = calculate_weight(point.x, point.y + consty, liste, top, point, visited, index1, index2)
top = calculate_weight(point.x + constx, point.y + consty, liste, top, point, visited, index1, index2)
elif point.x == 0 or point.x == width - 1:
# print("nd if")
top = calculate_weight(point.x, point.y - 1, liste, top, point, visited, index1, index2)
top = calculate_weight(point.x, point.y + 1, liste, top, point, visited, index1, index2)
if point.x == 0:
const = 1
else:
const = -1
top = calculate_weight(point.x + const, point.y - 1, liste, top, point, visited, index1, index2)
top = calculate_weight(point.x + const, point.y + 1, liste, top, point, visited, index1, index2)
top = calculate_weight(point.x + const, point.y, liste, top, point, visited, index1, index2)
elif point.y == 0 or point.y == height - 1:
# print("3rd if")
top = calculate_weight(point.x - 1, point.y, liste, top, point, visited, index1, index2)
top = calculate_weight(point.x + 1, point.y, liste, top, point, visited, index1, index2)
if point.y == 0:
const = 1
else:
const = -1
top = calculate_weight(point.x - 1, point.y + const, liste, top, point, visited, index1, index2)
top = calculate_weight(point.x + 1, point.y + const, liste, top, point, visited, index1, index2)
top = calculate_weight(point.x, point.y + const, liste, top, point, visited, index1, index2)
else:
# print("4th if")
top = calculate_weight(point.x - 1, point.y, liste, top, point, visited, index1, index2)
top = calculate_weight(point.x - 1, point.y - 1, liste, top, point, visited, index1, index2)
top = calculate_weight(point.x - 1, point.y + 1, liste, top, point, visited, index1, index2)
top = calculate_weight(point.x + 1, point.y - 1, liste, top, point, visited, index1, index2)
top = calculate_weight(point.x + 1, point.y, liste, top, point, visited, index1, index2)
top = calculate_weight(point.x + 1, point.y + 1, liste, top, point, visited, index1, index2)
top = calculate_weight(point.x, point.y + 1, liste, top, point, visited, index1, index2)
top = calculate_weight(point.x, point.y - 1, liste, top, point, visited, index1, index2)
return top
def paint(point):
yol = []
while not point.equal(start):
yol.append(point)
image.putpixel((int(point.x), int(point.y)), (60, 255, 0))
point = point.parent
end_time = time.time()
# image.show()
'''print("--------------YOL------------------")
for i in range(len(yol)):
print("x: {}, y:{}, distance:{}".format(yol[i].x, yol[i].y, yol[i].f))
print("------------------------------------")'''
return image, (end_time - start_time)
def bfs_and_a_star_with_stack(index):
stack = []
top = 0
found = False
point = None
stack.append(start)
visited = np.zeros((width, height))
visited[int(start.x)][int(start.y)] = 1
j = 0
max_element = 0
while stack and not found:
point = stack.pop(top)
# print("x: {}, y:{}, f:{}".format(point.x, point.y, point.f))
top -= 1
if point.equal(end):
found = True
else:
top = add_neighbours(point, stack, top, visited, index, 0)
stack.sort(key=lambda point: point.f, reverse=True)
if len(stack) > max_element:
max_element = len(stack)
j += 1
if found:
result_image, total_time = paint(point)
# print("Stackten çekilen eleman sayısı: ", j)
# print("Stackteki maksimum eleman sayısı: ", max_element)
return result_image, total_time, j, max_element
def find_smallest_child(heap, i, top):
if 2 * i + 2 < top: # has two child
if heap[2*i + 1].f < heap[2*i + 2].f:
return 2*i + 1
else:
return 2*i + 2
elif 2*i + 1 < top: # has one child
return 2*i + 1
else: # has no child
return 0
def remove_min(heap, top):
if top == 0:
return None
min_point = heap[0]
top -= 1
heap[0] = heap[top]
del heap[top]
i = 0
index = find_smallest_child(heap, i, top)
while index != 0 and heap[i].f > heap[index].f:
heap[i], heap[index] = heap[index], heap[i]
i = index
index = find_smallest_child(heap, i, top)
return min_point, top
def bfs_and_a_star_with_heap(index):
heap = []
found = False
yol = []
point = None
heap.append(start)
visited = np.zeros((width, height))
visited[int(start.x)][int(start.y)] = 1
j = 0
top = 1
max_element = 0
while heap and not found:
point, top = remove_min(heap, top)
# print("x: {}, y:{}, f:{}".format(point.x, point.y, point.f))
if point.equal(end):
found = True
else:
top = add_neighbours(point, heap, top, visited, index, 1)
if len(heap) > max_element:
max_element = len(heap)
j += 1
if found:
result_image, total_time = paint(point)
else:
return
return result_image, total_time, j, max_element
if __name__ == "__main__":
print("UYARI: Seçilecek görüntü exe dosyası ile aynı klasörde olmalıdır.")
image_name = input("Algoritmanın üzerinde çalışacağı görüntünün ismini giriniz (Örnek input: image.png): ")
print(image_name)
print("-------------------Algoritmalar------------------")
print("1- Best First Search with Stack")
print("2- Best First Search with Heap")
print("3- A* with Stack")
print("4- A* with Heap")
print("5- Analiz (tüm algoritmaların çalışmalarını ve kıyaslamalarını gör)")
alg = input("Algoritmayı ve veri yapısının numarasını seçiniz (Örnek input: 1): ")
image = Image.open(image_name)
width, height = image.size
image = image.convert('RGB')
print("Görüntünün genişliği: {}, yüksekliği: {}".format(width, height))
print("NOT: Başlangıç ve bitiş noktasının koordinatları genişlik ve uzunluktan küçük olmalıdır.")
sx, sy = input("Başlangıç noktasının x ve y piksel koordinatlarını sırasıyla giriniz (Örnek input: 350 100): ").split()
ex, ey = input("Bitiş noktasının x ve y piksel koordinatlarını sırasıyla giriniz (Örnek input: 200 700): ").split()
start = Point(int(sx), int(sy), -1)
start.parent = -1
end = Point(int(ex), int(ey), -1)
start_time = time.time()
if int(alg) == 1:
result_image, total_time, n_elements, max_elements = bfs_and_a_star_with_stack(0)
elif int(alg) == 2:
result_image, total_time, n_elements, max_elements = bfs_and_a_star_with_heap(0)
elif int(alg) == 3:
result_image, total_time, n_elements, max_elements = bfs_and_a_star_with_stack(1)
elif int(alg) == 4:
result_image, total_time, n_elements, max_elements = bfs_and_a_star_with_heap(1)
elif int(alg) == 5:
result_image, total_time, n_elements, max_elements = bfs_and_a_star_with_stack(0)
output1 = Output(result_image, total_time, n_elements, max_elements)
print(n_elements, total_time, max_elements)
output1.name = "BFS with Stack"
print("1/4")
image = Image.open(image_name)
width, height = image.size
image = image.convert('RGB')
start_time = time.time()
result_image, total_time, n_elements, max_elements = bfs_and_a_star_with_heap(0)
output2 = Output(result_image, total_time, n_elements, max_elements)
print(n_elements, total_time, max_elements)
output2.name = "BFS with Heap"
print("2/4")
image = Image.open(image_name)
width, height = image.size
image = image.convert('RGB')
start_time = time.time()
result_image, total_time, n_elements, max_elements = bfs_and_a_star_with_stack(1)
output3 = Output(result_image, total_time, n_elements, max_elements)
output3.name = "A* with Stack"
print(n_elements, total_time, max_elements)
print("3/4")
image = Image.open(image_name)
width, height = image.size
image = image.convert('RGB')
start_time = time.time()
result_image, total_time, n_elements, max_elements = bfs_and_a_star_with_heap(1)
output4 = Output(result_image, total_time, n_elements, max_elements)
output4.name = "A* with Heap"
print("4/4")
output1.plot_times(output2, output3, output4)
output1.plot_max_elements(output2, output3, output4)
output1.plot_n_elements(output2, output3, output4)
print("Bastırılan görüntüler sırasıyla BFS stack, BFS heap, A* stack ve A* heap şeklindedir.")
fname = image_name.split('.')
output1.result_image.show()
output1.result_image.save(fname[0] + "BFS_stack.png")
output2.result_image.show()
output2.result_image.save(fname[0] + "BFS_heap.png")
output3.result_image.show()
output3.result_image.save(fname[0] + "A_star_stack.png")
output4.result_image.show()
output4.result_image.save(fname[0] + "A_star_heap.png")
exit(0)
else:
print("Algoritma numarası hatalı girildi, tekrar deneyin.")
exit(0)
print("Stackten çekilen eleman sayısı: ", n_elements)
print("Stackteki maksimum eleman sayısı: ", max_elements)
print("Toplam süre: ", total_time)
result_image.show()
| 3.1875 | 3 |
time_test.py | Shb742/rnnoise_python | 32 | 11142 | #Author <NAME>
import time
import rnnoise
import numpy as np
def time_rnnoise(rounds=1000):
a = rnnoise.RNNoise()
timer = 0.0
st = time.time()
for i in range(rounds):
inp = np.random.bytes(960)
timer = (time.time() - st)
print(timer)
st = time.time()
for i in range(rounds):
inp = np.random.bytes(960)
va,out = a.process_frame(inp)
time_taken_per_frame = ((time.time()-st)-timer) /rounds
print("time taken for one frame - " + str(time_taken_per_frame ))
print("time in a frame - " +str(480.0/48000.0))
print(str((480.0/48000.0)/time_taken_per_frame )+"X faster than real")
a.destroy()
time_rnnoise() | 2.875 | 3 |
tests/test_shell.py | jakubtyniecki/pact | 2 | 11143 |
""" shell sort tests module """
import unittest
import random
from sort import shell
from tests import helper
class ShellSortTests(unittest.TestCase):
""" shell sort unit tests class """
max = 100
arr = []
def setUp(self):
""" setting up for the test """
self.arr = random.sample(range(self.max), self.max)
def test_null_input(self):
""" should raise when input array is None """
# arrange
inp = None
# act
with self.assertRaises(TypeError) as ex:
shell.sort(inp)
# assert
self.assertEqual("'NoneType' object is not iterable", str(ex.exception))
def test_empty_input(self):
""" should return [] when input array is empty """
# arrange
inp = []
# act
res = shell.sort(inp)
# assert
self.assertEqual(len(inp), len(res))
def test_sort_a_given_array(self):
""" should sort a given array """
# act
res = shell.sort(self.arr[:])
# assert
self.assertTrue(helper.is_sorted(res))
| 3.625 | 4 |
k8s_apps/admin/dump_inventory_file.py | AkadioInc/firefly | 0 | 11144 | <filename>k8s_apps/admin/dump_inventory_file.py<gh_stars>0
import h5pyd
from datetime import datetime
import tzlocal
BUCKET="firefly-hsds"
inventory_domain = "/FIREfly/inventory.h5"
def formatTime(timestamp):
local_timezone = tzlocal.get_localzone() # get pytz timezone
local_time = datetime.fromtimestamp(timestamp, local_timezone)
return local_time
f = h5pyd.File(inventory_domain, "r", bucket=BUCKET)
table = f["inventory"]
for row in table:
filename = row[0].decode('utf-8')
if row[1]:
start = formatTime(row[1])
else:
start = 0
if row[2]:
stop = formatTime(row[2])
else:
stop = 0
print(f"{filename}\t{start}\t{stop}")
print(f"{table.nrows} rows")
| 2.484375 | 2 |
enaml/qt/qt_timer.py | xtuzy/enaml | 1,080 | 11145 | <reponame>xtuzy/enaml
#------------------------------------------------------------------------------
# Copyright (c) 2013-2017, Nucleic Development Team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file LICENSE, distributed with this software.
#------------------------------------------------------------------------------
from atom.api import Typed
from enaml.widgets.timer import ProxyTimer
from .QtCore import QTimer
from .qt_toolkit_object import QtToolkitObject
class QtTimer(QtToolkitObject, ProxyTimer):
""" A Qt implementation of an Enaml ProxyTimer.
"""
#: A reference to the widget created by the proxy.
widget = Typed(QTimer)
#--------------------------------------------------------------------------
# Initialization
#--------------------------------------------------------------------------
def create_widget(self):
""" Create the underlying timer object.
"""
self.widget = QTimer()
def init_widget(self):
""" Initialize the widget.
"""
super(QtTimer, self).init_widget()
d = self.declaration
self.set_interval(d.interval)
self.set_single_shot(d.single_shot)
self.widget.timeout.connect(self.on_timeout)
def destroy(self):
""" A reimplemented destructor.
This stops the timer before invoking the superclass destructor.
"""
self.widget.stop()
super(QtTimer, self).destroy()
#--------------------------------------------------------------------------
# Signal Handlers
#--------------------------------------------------------------------------
def on_timeout(self):
""" Handle the timeout signal for the timer.
"""
d = self.declaration
if d is not None:
d.timeout()
#--------------------------------------------------------------------------
# ProxyTimer API
#--------------------------------------------------------------------------
def set_interval(self, interval):
""" Set the interval on the timer.
"""
self.widget.setInterval(interval)
def set_single_shot(self, single_shot):
""" Set the single shot flag on the timer.
"""
self.widget.setSingleShot(single_shot)
def start(self):
""" Start or restart the timer.
"""
self.widget.start()
def stop(self):
""" Stop the timer.
"""
self.widget.stop()
def is_running(self):
""" Get whether or not the timer is running.
"""
return self.widget.isActive()
| 1.882813 | 2 |
mindhome_alpha/erpnext/erpnext_integrations/doctype/mpesa_settings/test_mpesa_settings.py | Mindhome/field_service | 1 | 11146 | <filename>mindhome_alpha/erpnext/erpnext_integrations/doctype/mpesa_settings/test_mpesa_settings.py
# -*- coding: utf-8 -*-
# Copyright (c) 2020, Frappe Technologies Pvt. Ltd. and Contributors
# See license.txt
from __future__ import unicode_literals
from json import dumps
import frappe
import unittest
from erpnext.erpnext_integrations.doctype.mpesa_settings.mpesa_settings import process_balance_info, verify_transaction
from erpnext.accounts.doctype.pos_invoice.test_pos_invoice import create_pos_invoice
class TestMpesaSettings(unittest.TestCase):
def tearDown(self):
frappe.db.sql('delete from `tabMpesa Settings`')
frappe.db.sql('delete from `tabIntegration Request` where integration_request_service = "Mpesa"')
def test_creation_of_payment_gateway(self):
create_mpesa_settings(payment_gateway_name="_Test")
mode_of_payment = frappe.get_doc("Mode of Payment", "Mpesa-_Test")
self.assertTrue(frappe.db.exists("Payment Gateway Account", {'payment_gateway': "Mpesa-_Test"}))
self.assertTrue(mode_of_payment.name)
self.assertEquals(mode_of_payment.type, "Phone")
def test_processing_of_account_balance(self):
mpesa_doc = create_mpesa_settings(payment_gateway_name="_Account Balance")
mpesa_doc.get_account_balance_info()
callback_response = get_account_balance_callback_payload()
process_balance_info(**callback_response)
integration_request = frappe.get_doc("Integration Request", "AG_20200927_00007cdb1f9fb6494315")
# test integration request creation and successful update of the status on receiving callback response
self.assertTrue(integration_request)
self.assertEquals(integration_request.status, "Completed")
# test formatting of account balance received as string to json with appropriate currency symbol
mpesa_doc.reload()
self.assertEquals(mpesa_doc.account_balance, dumps({
"Working Account": {
"current_balance": "Sh 481,000.00",
"available_balance": "Sh 481,000.00",
"reserved_balance": "Sh 0.00",
"uncleared_balance": "Sh 0.00"
}
}))
integration_request.delete()
def test_processing_of_callback_payload(self):
create_mpesa_settings(payment_gateway_name="Payment")
mpesa_account = frappe.db.get_value("Payment Gateway Account", {"payment_gateway": 'Mpesa-Payment'}, "payment_account")
frappe.db.set_value("Account", mpesa_account, "account_currency", "KES")
frappe.db.set_value("Customer", "_Test Customer", "default_currency", "KES")
pos_invoice = create_pos_invoice(do_not_submit=1)
pos_invoice.append("payments", {'mode_of_payment': 'Mpesa-Payment', 'account': mpesa_account, 'amount': 500})
pos_invoice.contact_mobile = "093456543894"
pos_invoice.currency = "KES"
pos_invoice.save()
pr = pos_invoice.create_payment_request()
# test payment request creation
self.assertEquals(pr.payment_gateway, "Mpesa-Payment")
# submitting payment request creates integration requests with random id
integration_req_ids = frappe.get_all("Integration Request", filters={
'reference_doctype': pr.doctype,
'reference_docname': pr.name,
}, pluck="name")
callback_response = get_payment_callback_payload(Amount=500, CheckoutRequestID=integration_req_ids[0])
verify_transaction(**callback_response)
# test creation of integration request
integration_request = frappe.get_doc("Integration Request", integration_req_ids[0])
# test integration request creation and successful update of the status on receiving callback response
self.assertTrue(integration_request)
self.assertEquals(integration_request.status, "Completed")
pos_invoice.reload()
integration_request.reload()
self.assertEquals(pos_invoice.mpesa_receipt_number, "LGR7OWQX0R")
self.assertEquals(integration_request.status, "Completed")
frappe.db.set_value("Customer", "_Test Customer", "default_currency", "")
integration_request.delete()
pr.reload()
pr.cancel()
pr.delete()
pos_invoice.delete()
def test_processing_of_multiple_callback_payload(self):
create_mpesa_settings(payment_gateway_name="Payment")
mpesa_account = frappe.db.get_value("Payment Gateway Account", {"payment_gateway": 'Mpesa-Payment'}, "payment_account")
frappe.db.set_value("Account", mpesa_account, "account_currency", "KES")
frappe.db.set_value("Mpesa Settings", "Payment", "transaction_limit", "500")
frappe.db.set_value("Customer", "_Test Customer", "default_currency", "KES")
pos_invoice = create_pos_invoice(do_not_submit=1)
pos_invoice.append("payments", {'mode_of_payment': 'Mpesa-Payment', 'account': mpesa_account, 'amount': 1000})
pos_invoice.contact_mobile = "093456543894"
pos_invoice.currency = "KES"
pos_invoice.save()
pr = pos_invoice.create_payment_request()
# test payment request creation
self.assertEquals(pr.payment_gateway, "Mpesa-Payment")
# submitting payment request creates integration requests with random id
integration_req_ids = frappe.get_all("Integration Request", filters={
'reference_doctype': pr.doctype,
'reference_docname': pr.name,
}, pluck="name")
# create random receipt nos and send it as response to callback handler
mpesa_receipt_numbers = [frappe.utils.random_string(5) for d in integration_req_ids]
integration_requests = []
for i in range(len(integration_req_ids)):
callback_response = get_payment_callback_payload(
Amount=500,
CheckoutRequestID=integration_req_ids[i],
MpesaReceiptNumber=mpesa_receipt_numbers[i]
)
# handle response manually
verify_transaction(**callback_response)
# test completion of integration request
integration_request = frappe.get_doc("Integration Request", integration_req_ids[i])
self.assertEquals(integration_request.status, "Completed")
integration_requests.append(integration_request)
# check receipt number once all the integration requests are completed
pos_invoice.reload()
self.assertEquals(pos_invoice.mpesa_receipt_number, ', '.join(mpesa_receipt_numbers))
frappe.db.set_value("Customer", "_Test Customer", "default_currency", "")
[d.delete() for d in integration_requests]
pr.reload()
pr.cancel()
pr.delete()
pos_invoice.delete()
def test_processing_of_only_one_succes_callback_payload(self):
create_mpesa_settings(payment_gateway_name="Payment")
mpesa_account = frappe.db.get_value("Payment Gateway Account", {"payment_gateway": 'Mpesa-Payment'}, "payment_account")
frappe.db.set_value("Account", mpesa_account, "account_currency", "KES")
frappe.db.set_value("Mpesa Settings", "Payment", "transaction_limit", "500")
frappe.db.set_value("Customer", "_Test Customer", "default_currency", "KES")
pos_invoice = create_pos_invoice(do_not_submit=1)
pos_invoice.append("payments", {'mode_of_payment': 'Mpesa-Payment', 'account': mpesa_account, 'amount': 1000})
pos_invoice.contact_mobile = "093456543894"
pos_invoice.currency = "KES"
pos_invoice.save()
pr = pos_invoice.create_payment_request()
# test payment request creation
self.assertEquals(pr.payment_gateway, "Mpesa-Payment")
# submitting payment request creates integration requests with random id
integration_req_ids = frappe.get_all("Integration Request", filters={
'reference_doctype': pr.doctype,
'reference_docname': pr.name,
}, pluck="name")
# create random receipt nos and send it as response to callback handler
mpesa_receipt_numbers = [frappe.utils.random_string(5) for d in integration_req_ids]
callback_response = get_payment_callback_payload(
Amount=500,
CheckoutRequestID=integration_req_ids[0],
MpesaReceiptNumber=mpesa_receipt_numbers[0]
)
# handle response manually
verify_transaction(**callback_response)
# test completion of integration request
integration_request = frappe.get_doc("Integration Request", integration_req_ids[0])
self.assertEquals(integration_request.status, "Completed")
# now one request is completed
# second integration request fails
# now retrying payment request should make only one integration request again
pr = pos_invoice.create_payment_request()
new_integration_req_ids = frappe.get_all("Integration Request", filters={
'reference_doctype': pr.doctype,
'reference_docname': pr.name,
'name': ['not in', integration_req_ids]
}, pluck="name")
self.assertEquals(len(new_integration_req_ids), 1)
frappe.db.set_value("Customer", "_Test Customer", "default_currency", "")
frappe.db.sql("delete from `tabIntegration Request` where integration_request_service = 'Mpesa'")
pr.reload()
pr.cancel()
pr.delete()
pos_invoice.delete()
def create_mpesa_settings(payment_gateway_name="Express"):
if frappe.db.exists("Mpesa Settings", payment_gateway_name):
return frappe.get_doc("Mpesa Settings", payment_gateway_name)
doc = frappe.get_doc(dict( #nosec
doctype="Mpesa Settings",
payment_gateway_name=payment_gateway_name,
consumer_key="<KEY>",
consumer_secret="<KEY>",
online_passkey="L<KEY>",
till_number="174379"
))
doc.insert(ignore_permissions=True)
return doc
def get_test_account_balance_response():
"""Response received after calling the account balance API."""
return {
"ResultType":0,
"ResultCode":0,
"ResultDesc":"The service request has been accepted successfully.",
"OriginatorConversationID":"10816-694520-2",
"ConversationID":"AG_20200927_00007cdb1f9fb6494315",
"TransactionID":"LGR0000000",
"ResultParameters":{
"ResultParameter":[
{
"Key":"ReceiptNo",
"Value":"LGR919G2AV"
},
{
"Key":"Conversation ID",
"Value":"AG_20170727_00004492b1b6d0078fbe"
},
{
"Key":"FinalisedTime",
"Value":20170727101415
},
{
"Key":"Amount",
"Value":10
},
{
"Key":"TransactionStatus",
"Value":"Completed"
},
{
"Key":"ReasonType",
"Value":"Salary Payment via API"
},
{
"Key":"TransactionReason"
},
{
"Key":"DebitPartyCharges",
"Value":"Fee For B2C Payment|KES|33.00"
},
{
"Key":"DebitAccountType",
"Value":"Utility Account"
},
{
"Key":"InitiatedTime",
"Value":20170727101415
},
{
"Key":"Originator Conversation ID",
"Value":"19455-773836-1"
},
{
"Key":"CreditPartyName",
"Value":"254708374149 - <NAME>"
},
{
"Key":"DebitPartyName",
"Value":"600134 - Safaricom157"
}
]
},
"ReferenceData":{
"ReferenceItem":{
"Key":"Occasion",
"Value":"aaaa"
}
}
}
def get_payment_request_response_payload(Amount=500):
"""Response received after successfully calling the stk push process request API."""
CheckoutRequestID = frappe.utils.random_string(10)
return {
"MerchantRequestID": "8071-27184008-1",
"CheckoutRequestID": CheckoutRequestID,
"ResultCode": 0,
"ResultDesc": "The service request is processed successfully.",
"CallbackMetadata": {
"Item": [
{ "Name": "Amount", "Value": Amount },
{ "Name": "MpesaReceiptNumber", "Value": "LGR7OWQX0R" },
{ "Name": "TransactionDate", "Value": 20201006113336 },
{ "Name": "PhoneNumber", "Value": 254723575670 }
]
}
}
def get_payment_callback_payload(Amount=500, CheckoutRequestID="ws_CO_061020201133231972", MpesaReceiptNumber="LGR7OWQX0R"):
"""Response received from the server as callback after calling the stkpush process request API."""
return {
"Body":{
"stkCallback":{
"MerchantRequestID":"19465-780693-1",
"CheckoutRequestID":CheckoutRequestID,
"ResultCode":0,
"ResultDesc":"The service request is processed successfully.",
"CallbackMetadata":{
"Item":[
{ "Name":"Amount", "Value":Amount },
{ "Name":"MpesaReceiptNumber", "Value":MpesaReceiptNumber },
{ "Name":"Balance" },
{ "Name":"TransactionDate", "Value":20170727154800 },
{ "Name":"PhoneNumber", "Value":254721566839 }
]
}
}
}
}
def get_account_balance_callback_payload():
"""Response received from the server as callback after calling the account balance API."""
return {
"Result":{
"ResultType": 0,
"ResultCode": 0,
"ResultDesc": "The service request is processed successfully.",
"OriginatorConversationID": "16470-170099139-1",
"ConversationID": "AG_20200927_00007cdb1f9fb6494315",
"TransactionID": "OIR0000000",
"ResultParameters": {
"ResultParameter": [
{
"Key": "AccountBalance",
"Value": "Working Account|KES|481000.00|481000.00|0.00|0.00"
},
{ "Key": "BOCompletedTime", "Value": 20200927234123 }
]
},
"ReferenceData": {
"ReferenceItem": {
"Key": "QueueTimeoutURL",
"Value": "https://internalsandbox.safaricom.co.ke/mpesa/abresults/v1/submit"
}
}
}
} | 1.71875 | 2 |
b2accessdeprovisioning/configparser.py | EUDAT-B2ACCESS/b2access-deprovisioning-report | 0 | 11147 | <reponame>EUDAT-B2ACCESS/b2access-deprovisioning-report<filename>b2accessdeprovisioning/configparser.py
from __future__ import absolute_import
import yaml
with open("config.yml", "r") as f:
config = yaml.load(f)
| 1.679688 | 2 |
res_mods/mods/packages/xvm_main/python/vehinfo_tiers.py | peterbartha/ImmunoMod | 0 | 11148 | <reponame>peterbartha/ImmunoMod<filename>res_mods/mods/packages/xvm_main/python/vehinfo_tiers.py
""" XVM (c) www.modxvm.com 2013-2017 """
# PUBLIC
def getTiers(level, cls, key):
return _getTiers(level, cls, key)
# PRIVATE
from logger import *
from gui.shared.utils.requesters import REQ_CRITERIA
from helpers import dependency
from skeletons.gui.shared import IItemsCache
_special = {
# Data from http://forum.worldoftanks.ru/index.php?/topic/41221-
# Last update: 23.05.2017
# level 2
'germany:G53_PzI': [ 2, 2 ],
'uk:GB76_Mk_VIC': [ 2, 2 ],
'usa:A19_T2_lt': [ 2, 4 ],
'usa:A93_T7_Combat_Car': [ 2, 2 ],
# level 3
'germany:G36_PzII_J': [ 3, 4 ],
'japan:J05_Ke_Ni_B': [ 3, 4 ],
'ussr:R34_BT-SV': [ 3, 4 ],
'ussr:R50_SU76I': [ 3, 4 ],
'ussr:R56_T-127': [ 3, 4 ],
'ussr:R67_M3_LL': [ 3, 4 ],
'ussr:R86_LTP': [ 3, 4 ],
# level 4
'france:F14_AMX40': [ 4, 6 ],
'germany:G35_B-1bis_captured': [ 4, 4 ],
'japan:J06_Ke_Ho': [ 4, 6 ],
'uk:GB04_Valentine': [ 4, 6 ],
'uk:GB60_Covenanter': [ 4, 6 ],
'ussr:R12_A-20': [ 4, 6 ],
'ussr:R31_Valentine_LL': [ 4, 4 ],
'ussr:R44_T80': [ 4, 6 ],
'ussr:R68_A-32': [ 4, 5 ],
# level 5
'germany:G104_Stug_IV': [ 5, 6 ],
'germany:G32_PzV_PzIV': [ 5, 6 ],
'germany:G32_PzV_PzIV_ausf_Alfa': [ 5, 6 ],
'germany:G70_PzIV_Hydro': [ 5, 6 ],
'uk:GB20_Crusader': [ 5, 7 ],
'uk:GB51_Excelsior': [ 5, 6 ],
'uk:GB68_Matilda_Black_Prince': [ 5, 6 ],
'usa:A21_T14': [ 5, 6 ],
'usa:A44_M4A2E4': [ 5, 6 ],
'ussr:R32_Matilda_II_LL': [ 5, 6 ],
'ussr:R33_Churchill_LL': [ 5, 6 ],
'ussr:R38_KV-220': [ 5, 6 ],
'ussr:R38_KV-220_beta': [ 5, 6 ],
'ussr:R78_SU_85I': [ 5, 6 ],
# level 6
'germany:G32_PzV_PzIV_CN': [ 6, 7 ],
'germany:G32_PzV_PzIV_ausf_Alfa_CN': [ 6, 7 ],
'uk:GB63_TOG_II': [ 6, 7 ],
# level 7
'germany:G48_E-25': [ 7, 8 ],
'germany:G78_Panther_M10': [ 7, 8 ],
'uk:GB71_AT_15A': [ 7, 8 ],
'usa:A86_T23E3': [ 7, 8 ],
'ussr:R98_T44_85': [ 7, 8 ],
'ussr:R99_T44_122': [ 7, 8 ],
# level 8
'china:Ch01_Type59': [ 8, 9 ],
'china:Ch03_WZ-111': [ 8, 9 ],
'china:Ch14_T34_3': [ 8, 9 ],
'china:Ch23_112': [ 8, 9 ],
'france:F65_FCM_50t': [ 8, 9 ],
'germany:G65_JagdTiger_SdKfz_185': [ 8, 9 ],
'usa:A45_M6A2E1': [ 8, 9 ],
'usa:A80_T26_E4_SuperPershing': [ 8, 9 ],
'ussr:R54_KV-5': [ 8, 9 ],
'ussr:R61_Object252': [ 8, 9 ],
'ussr:R61_Object252_BF': [ 8, 9 ],
}
def _getTiers(level, cls, key):
if key in _special:
return _special[key]
# HT: (=T4 max+1)
if level == 4 and cls == 'heavyTank':
return (4, 5)
# default: (<T3 max+1) & (>=T3 max+2) & (>T9 max=11)
return (level, level + 1 if level < 3 else 11 if level > 9 else level + 2)
def _test_specials():
for veh_name in _special.keys():
itemsCache = dependency.instance(IItemsCache)
if not itemsCache.items.getVehicles(REQ_CRITERIA.VEHICLE.SPECIFIC_BY_NAME(veh_name)):
warn('vehinfo_tiers: vehicle %s declared in _special does not exist!' % veh_name)
| 2.015625 | 2 |
pypy/module/cpyext/test/test_pystrtod.py | m4sterchain/mesapy | 381 | 11149 | <filename>pypy/module/cpyext/test/test_pystrtod.py
import math
from pypy.module.cpyext import pystrtod
from pypy.module.cpyext.test.test_api import BaseApiTest, raises_w
from rpython.rtyper.lltypesystem import rffi
from rpython.rtyper.lltypesystem import lltype
from pypy.module.cpyext.pystrtod import PyOS_string_to_double
class TestPyOS_string_to_double(BaseApiTest):
def test_simple_float(self, space):
s = rffi.str2charp('0.4')
null = lltype.nullptr(rffi.CCHARPP.TO)
r = PyOS_string_to_double(space, s, null, None)
assert r == 0.4
rffi.free_charp(s)
def test_empty_string(self, space):
s = rffi.str2charp('')
null = lltype.nullptr(rffi.CCHARPP.TO)
with raises_w(space, ValueError):
PyOS_string_to_double(space, s, null, None)
rffi.free_charp(s)
def test_bad_string(self, space):
s = rffi.str2charp(' 0.4')
null = lltype.nullptr(rffi.CCHARPP.TO)
with raises_w(space, ValueError):
PyOS_string_to_double(space, s, null, None)
rffi.free_charp(s)
def test_overflow_pos(self, space):
s = rffi.str2charp('1e500')
null = lltype.nullptr(rffi.CCHARPP.TO)
r = PyOS_string_to_double(space, s, null, None)
assert math.isinf(r)
assert r > 0
rffi.free_charp(s)
def test_overflow_neg(self, space):
s = rffi.str2charp('-1e500')
null = lltype.nullptr(rffi.CCHARPP.TO)
r = PyOS_string_to_double(space, s, null, None)
assert math.isinf(r)
assert r < 0
rffi.free_charp(s)
def test_overflow_exc(self, space):
s = rffi.str2charp('1e500')
null = lltype.nullptr(rffi.CCHARPP.TO)
with raises_w(space, ValueError):
PyOS_string_to_double(space, s, null, space.w_ValueError)
rffi.free_charp(s)
def test_endptr_number(self, space):
s = rffi.str2charp('0.4')
endp = lltype.malloc(rffi.CCHARPP.TO, 1, flavor='raw')
r = PyOS_string_to_double(space, s, endp, None)
assert r == 0.4
endp_addr = rffi.cast(rffi.LONG, endp[0])
s_addr = rffi.cast(rffi.LONG, s)
assert endp_addr == s_addr + 3
rffi.free_charp(s)
lltype.free(endp, flavor='raw')
def test_endptr_tail(self, space):
s = rffi.str2charp('0.4 foo')
endp = lltype.malloc(rffi.CCHARPP.TO, 1, flavor='raw')
r = PyOS_string_to_double(space, s, endp, None)
assert r == 0.4
endp_addr = rffi.cast(rffi.LONG, endp[0])
s_addr = rffi.cast(rffi.LONG, s)
assert endp_addr == s_addr + 3
rffi.free_charp(s)
lltype.free(endp, flavor='raw')
def test_endptr_no_conversion(self, space):
s = rffi.str2charp('foo')
endp = lltype.malloc(rffi.CCHARPP.TO, 1, flavor='raw')
with raises_w(space, ValueError):
PyOS_string_to_double(space, s, endp, None)
endp_addr = rffi.cast(rffi.LONG, endp[0])
s_addr = rffi.cast(rffi.LONG, s)
assert endp_addr == s_addr
rffi.free_charp(s)
lltype.free(endp, flavor='raw')
class TestPyOS_double_to_string(BaseApiTest):
def test_format_code(self, api):
ptype = lltype.malloc(rffi.INTP.TO, 1, flavor='raw')
r = api.PyOS_double_to_string(150.0, 'e', 1, 0, ptype)
assert '1.5e+02' == rffi.charp2str(r)
type_value = rffi.cast(lltype.Signed, ptype[0])
assert pystrtod.Py_DTST_FINITE == type_value
rffi.free_charp(r)
lltype.free(ptype, flavor='raw')
def test_precision(self, api):
ptype = lltype.malloc(rffi.INTP.TO, 1, flavor='raw')
r = api.PyOS_double_to_string(3.14159269397, 'g', 5, 0, ptype)
assert '3.1416' == rffi.charp2str(r)
type_value = rffi.cast(lltype.Signed, ptype[0])
assert pystrtod.Py_DTST_FINITE == type_value
rffi.free_charp(r)
lltype.free(ptype, flavor='raw')
def test_flags_sign(self, api):
ptype = lltype.malloc(rffi.INTP.TO, 1, flavor='raw')
r = api.PyOS_double_to_string(-3.14, 'g', 3, 1, ptype)
assert '-3.14' == rffi.charp2str(r)
type_value = rffi.cast(lltype.Signed, ptype[0])
assert pystrtod.Py_DTST_FINITE == type_value
rffi.free_charp(r)
lltype.free(ptype, flavor='raw')
def test_flags_add_dot_0(self, api):
ptype = lltype.malloc(rffi.INTP.TO, 1, flavor='raw')
r = api.PyOS_double_to_string(3, 'g', 5, 2, ptype)
assert '3.0' == rffi.charp2str(r)
type_value = rffi.cast(lltype.Signed, ptype[0])
assert pystrtod.Py_DTST_FINITE == type_value
rffi.free_charp(r)
lltype.free(ptype, flavor='raw')
def test_flags_alt(self, api):
ptype = lltype.malloc(rffi.INTP.TO, 1, flavor='raw')
r = api.PyOS_double_to_string(314., 'g', 3, 4, ptype)
assert '314.' == rffi.charp2str(r)
type_value = rffi.cast(lltype.Signed, ptype[0])
assert pystrtod.Py_DTST_FINITE == type_value
rffi.free_charp(r)
lltype.free(ptype, flavor='raw')
def test_ptype_nan(self, api):
ptype = lltype.malloc(rffi.INTP.TO, 1, flavor='raw')
r = api.PyOS_double_to_string(float('nan'), 'g', 3, 4, ptype)
assert 'nan' == rffi.charp2str(r)
type_value = rffi.cast(lltype.Signed, ptype[0])
assert pystrtod.Py_DTST_NAN == type_value
rffi.free_charp(r)
lltype.free(ptype, flavor='raw')
def test_ptype_infinity(self, api):
ptype = lltype.malloc(rffi.INTP.TO, 1, flavor='raw')
r = api.PyOS_double_to_string(1e200 * 1e200, 'g', 0, 0, ptype)
assert 'inf' == rffi.charp2str(r)
type_value = rffi.cast(lltype.Signed, ptype[0])
assert pystrtod.Py_DTST_INFINITE == type_value
rffi.free_charp(r)
lltype.free(ptype, flavor='raw')
def test_ptype_null(self, api):
ptype = lltype.nullptr(rffi.INTP.TO)
r = api.PyOS_double_to_string(3.14, 'g', 3, 0, ptype)
assert '3.14' == rffi.charp2str(r)
assert ptype == lltype.nullptr(rffi.INTP.TO)
rffi.free_charp(r)
| 2.203125 | 2 |
mathics/core/systemsymbols.py | Mathics3/mathics-core | 90 | 11150 | <gh_stars>10-100
# -*- coding: utf-8 -*-
from mathics.core.symbols import Symbol
# Some other common Symbols. This list is sorted in alphabetic order.
SymbolAssumptions = Symbol("$Assumptions")
SymbolAborted = Symbol("$Aborted")
SymbolAll = Symbol("All")
SymbolAlternatives = Symbol("Alternatives")
SymbolAnd = Symbol("And")
SymbolAppend = Symbol("Append")
SymbolApply = Symbol("Apply")
SymbolAssociation = Symbol("Association")
SymbolAutomatic = Symbol("Automatic")
SymbolBlank = Symbol("Blank")
SymbolBlend = Symbol("Blend")
SymbolByteArray = Symbol("ByteArray")
SymbolCatalan = Symbol("Catalan")
SymbolColorData = Symbol("ColorData")
SymbolComplex = Symbol("Complex")
SymbolComplexInfinity = Symbol("ComplexInfinity")
SymbolCondition = Symbol("Condition")
SymbolConditionalExpression = Symbol("ConditionalExpression")
Symbol_Context = Symbol("$Context")
Symbol_ContextPath = Symbol("$ContextPath")
SymbolCos = Symbol("Cos")
SymbolD = Symbol("D")
SymbolDerivative = Symbol("Derivative")
SymbolDirectedInfinity = Symbol("DirectedInfinity")
SymbolDispatch = Symbol("Dispatch")
SymbolE = Symbol("E")
SymbolEdgeForm = Symbol("EdgeForm")
SymbolEqual = Symbol("Equal")
SymbolExpandAll = Symbol("ExpandAll")
SymbolEulerGamma = Symbol("EulerGamma")
SymbolFailed = Symbol("$Failed")
SymbolFunction = Symbol("Function")
SymbolGamma = Symbol("Gamma")
SymbolGet = Symbol("Get")
SymbolGoldenRatio = Symbol("GoldenRatio")
SymbolGraphics = Symbol("Graphics")
SymbolGreater = Symbol("Greater")
SymbolGreaterEqual = Symbol("GreaterEqual")
SymbolGrid = Symbol("Grid")
SymbolHoldForm = Symbol("HoldForm")
SymbolIndeterminate = Symbol("Indeterminate")
SymbolImplies = Symbol("Implies")
SymbolInfinity = Symbol("Infinity")
SymbolInfix = Symbol("Infix")
SymbolInteger = Symbol("Integer")
SymbolIntegrate = Symbol("Integrate")
SymbolLeft = Symbol("Left")
SymbolLess = Symbol("Less")
SymbolLessEqual = Symbol("LessEqual")
SymbolLog = Symbol("Log")
SymbolMachinePrecision = Symbol("MachinePrecision")
SymbolMakeBoxes = Symbol("MakeBoxes")
SymbolMessageName = Symbol("MessageName")
SymbolMinus = Symbol("Minus")
SymbolMap = Symbol("Map")
SymbolMatrixPower = Symbol("MatrixPower")
SymbolMaxPrecision = Symbol("$MaxPrecision")
SymbolMemberQ = Symbol("MemberQ")
SymbolMinus = Symbol("Minus")
SymbolN = Symbol("N")
SymbolNeeds = Symbol("Needs")
SymbolNIntegrate = Symbol("NIntegrate")
SymbolNone = Symbol("None")
SymbolNot = Symbol("Not")
SymbolNull = Symbol("Null")
SymbolNumberQ = Symbol("NumberQ")
SymbolNumericQ = Symbol("NumericQ")
SymbolOptionValue = Symbol("OptionValue")
SymbolOr = Symbol("Or")
SymbolOverflow = Symbol("Overflow")
SymbolPackages = Symbol("$Packages")
SymbolPattern = Symbol("Pattern")
SymbolPi = Symbol("Pi")
SymbolPiecewise = Symbol("Piecewise")
SymbolPoint = Symbol("Point")
SymbolPossibleZeroQ = Symbol("PossibleZeroQ")
SymbolQuiet = Symbol("Quiet")
SymbolRational = Symbol("Rational")
SymbolReal = Symbol("Real")
SymbolRow = Symbol("Row")
SymbolRowBox = Symbol("RowBox")
SymbolRGBColor = Symbol("RGBColor")
SymbolSuperscriptBox = Symbol("SuperscriptBox")
SymbolRule = Symbol("Rule")
SymbolRuleDelayed = Symbol("RuleDelayed")
SymbolSequence = Symbol("Sequence")
SymbolSeries = Symbol("Series")
SymbolSeriesData = Symbol("SeriesData")
SymbolSet = Symbol("Set")
SymbolSimplify = Symbol("Simplify")
SymbolSin = Symbol("Sin")
SymbolSlot = Symbol("Slot")
SymbolStringQ = Symbol("StringQ")
SymbolStyle = Symbol("Style")
SymbolTable = Symbol("Table")
SymbolToString = Symbol("ToString")
SymbolUndefined = Symbol("Undefined")
SymbolXor = Symbol("Xor")
| 2.296875 | 2 |
exercises/ali/cartpole-MCTS/cartpole.py | alik604/ra | 0 | 11151 | # from https://github.com/kvwoerden/mcts-cartpole
# ---------------------------------------------------------------------------- #
# Imports #
# ---------------------------------------------------------------------------- #
import os
import time
import random
import argparse
<<<<<<< HEAD
=======
from types import SimpleNamespace
>>>>>>> MCTS
import gym
from gym import logger
from gym.wrappers.monitoring.video_recorder import VideoRecorder
from Simple_mcts import MCTSAgent
<<<<<<< HEAD
# ---------------------------------------------------------------------------- #
# Constants #
# ---------------------------------------------------------------------------- #
SEED = 28
EPISODES = 1
ENVIRONMENT = 'CartPole-v0'
LOGGER_LEVEL = logger.WARN
ITERATION_BUDGET = 80
LOOKAHEAD_TARGET = 100
MAX_EPISODE_STEPS = 1500
VIDEO_BASEPATH = '.\\video' # './video'
START_CP = 20
=======
from Agent import dqn_agent
# ---------------------------------------------------------------------------- #
# Constants #
# ---------------------------------------------------------------------------- #
LOGGER_LEVEL = logger.WARN
args = dict()
args['env_name'] = 'CartPole-v0'
args['episodes'] = 10
args['seed'] = 28
args['iteration_budget'] = 8000 # The number of iterations for each search step. Increasing this should lead to better performance.')
args['lookahead_target'] = 10000 # The target number of steps the agent aims to look forward.'
args['max_episode_steps'] = 1500 # The maximum number of steps to play.
args['video_basepath'] = '.\\video' # './video'
args['start_cp'] = 20 # The start value of C_p, the value that the agent changes to try to achieve the lookahead target. Decreasing this makes the search tree deeper, increasing this makes the search tree wider.
args = SimpleNamespace(**args)
>>>>>>> MCTS
# ---------------------------------------------------------------------------- #
# Main loop #
# ---------------------------------------------------------------------------- #
if __name__ == '__main__':
<<<<<<< HEAD
random.seed(SEED)
parser = argparse.ArgumentParser(
description='Run a Monte Carlo Tree Search agent on the Cartpole environment', formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--env_id', nargs='?', default=ENVIRONMENT,
help='The environment to run (only CartPole-v0 is supperted)')
parser.add_argument('--episodes', nargs='?', default=EPISODES, type=int,
help='The number of episodes to run.')
parser.add_argument('--iteration_budget', nargs='?', default=ITERATION_BUDGET, type=int,
help='The number of iterations for each search step. Increasing this should lead to better performance.')
parser.add_argument('--lookahead_target', nargs='?', default=LOOKAHEAD_TARGET, type=int,
help='The target number of steps the agent aims to look forward.')
parser.add_argument('--max_episode_steps', nargs='?', default=MAX_EPISODE_STEPS, type=int,
help='The maximum number of steps to play.')
parser.add_argument('--video_basepath', nargs='?', default=VIDEO_BASEPATH,
help='The basepath where the videos will be stored.')
parser.add_argument('--start_cp', nargs='?', default=START_CP, type=int,
help='The start value of C_p, the value that the agent changes to try to achieve the lookahead target. Decreasing this makes the search tree deeper, increasing this makes the search tree wider.')
parser.add_argument('--seed', nargs='?', default=SEED, type=int,
help='The random seed.')
args = parser.parse_args()
logger.set_level(LOGGER_LEVEL)
env = gym.make(args.env_id)
env.seed(args.seed)
agent = MCTSAgent(args.iteration_budget, args.env_id)
=======
logger.set_level(LOGGER_LEVEL)
random.seed(args.seed)
env = gym.make(args.env_name)
env.seed(args.seed)
Q_net = dqn_agent()
agent = MCTSAgent(args.iteration_budget, env, Q_net)
>>>>>>> MCTS
timestr = time.strftime("%Y%m%d-%H%M%S")
reward = 0
done = False
for i in range(args.episodes):
ob = env.reset()
env._max_episode_steps = args.max_episode_steps
video_path = os.path.join(
args.video_basepath, f"output_{timestr}_{i}.mp4")
<<<<<<< HEAD
rec = VideoRecorder(env, path=video_path)
=======
# rec = VideoRecorder(env, path=video_path)
>>>>>>> MCTS
try:
sum_reward = 0
node = None
all_nodes = []
C_p = args.start_cp
while True:
print("################")
env.render()
<<<<<<< HEAD
rec.capture_frame()
=======
# rec.capture_frame()
>>>>>>> MCTS
action, node, C_p = agent.act(env.state, n_actions=env.action_space.n, node=node, C_p=C_p, lookahead_target=args.lookahead_target)
ob, reward, done, _ = env.step(action)
print("### observed state: ", ob)
sum_reward += reward
print("### sum_reward: ", sum_reward)
if done:
<<<<<<< HEAD
rec.close()
break
except KeyboardInterrupt as e:
rec.close()
=======
# rec.close()
break
except KeyboardInterrupt as e:
# rec.close()
>>>>>>> MCTS
env.close()
raise e
env.close()
| 1.984375 | 2 |
wildlifecompliance/components/applications/cron.py | preranaandure/wildlifecompliance | 1 | 11152 | from django_cron import CronJobBase, Schedule
class VerifyLicenceSpeciesJob(CronJobBase):
"""
Verifies LicenceSpecies against TSC server.
"""
RUN_AT_TIMES = ['00:00']
schedule = Schedule(run_at_times=RUN_AT_TIMES)
code = 'applications.verify_licence_species'
def do(self):
pass
| 2.046875 | 2 |
optional-plugins/CSVPlugin/CSVContext.py | owlfish/pubtal | 0 | 11153 | import ASV
from simpletal import simpleTAL, simpleTALES
try:
import logging
except:
import InfoLogging as logging
import codecs
class ColumnSorter:
def __init__ (self, columnList):
self.columnList = columnList
self.log = logging.getLogger ('ColumnSorter')
def setup (self, fieldNames):
mapList = []
for columnName, translationMap in self.columnList:
try:
colNum = fieldNames.index (columnName)
mapList.append ((colNum, translationMap))
except ValueError, e:
self.log.error ("No such column name as %s" % name)
raise e
self.mapList = mapList
def sort (self, row1, row2):
result = 0
for colNum, map in self.mapList:
result = self.doSort (row1, row2, colNum, map)
if (result != 0):
return result
return result
def doSort (self, row1, row2, colNum, map):
if (map is None):
col1 = row1[colNum]
col2 = row2[colNum]
else:
try:
col1 = map [row1[colNum]]
except KeyError, e:
self.log.warn ("No key found for key %s - assuming low value" % row1[colNum])
return -1
try:
col2 = map [row2[colNum]]
except KeyError, e:
self.log.warn ("No key found for key %s - assuming low value" % row1[colNum])
return 1
if (col1 < col2):
return -1
if (col1 == col2):
return 0
if (col1 > col2):
return 1
class CsvContextCreator:
def __init__ (self, fileName, fileCodec):
self.log = logging.getLogger ("CSVTemplate.CsvContextCreator")
self.csvData = ASV.ASV()
self.csvData.input_from_file(fileName, ASV.CSV(), has_field_names = 1)
self.fieldNames = self.csvData.get_field_names()
self.conv = fileCodec
def getContextMap (self, sorter=None):
orderList = []
for row in self.csvData:
orderList.append (row)
if (sorter is not None):
sorter.setup (self.fieldNames)
try:
orderList.sort (sorter.sort)
except Exception, e:
self.log.error ("Exception occured executing sorter: " + str (e))
raise e
contextList = []
for row in orderList:
rowMap = {}
colCount = 0
for col in row:
if (col != ""):
rowMap[self.fieldNames[colCount]] = self.conv(col)[0]
colCount += 1
contextList.append (rowMap)
return contextList
def getRawData (self):
return unicode (self.csvData)
class CSVTemplateExpander:
def __init__ (self, sourceFile, name="csvList"):
self.contextFactory = CsvContextCreator (sourceFile)
self.name = name
self.template=None
def expandTemplate (self, templateName, outputName, additionalContext = None, sorter=None):
context = simpleTALES.Context()
context.addGlobal (self.name, self.contextFactory.getContextMap (sorter))
if (additionalContext is not None):
context.addGlobal (additionalContext[0], additionalContext[1])
if (self.template is None):
templateFile = open (templateName, 'r')
self.template = simpleTAL.compileHTMLTemplate (templateFile)
templateFile.close()
outputFile = open (outputName, 'w')
self.template.expand (context, outputFile)
outputFile.close()
| 2.828125 | 3 |
wagtail/admin/forms/comments.py | stephiescastle/wagtail | 0 | 11154 | <reponame>stephiescastle/wagtail<gh_stars>0
from django.forms import BooleanField, ValidationError
from django.utils.timezone import now
from django.utils.translation import gettext as _
from .models import WagtailAdminModelForm
class CommentReplyForm(WagtailAdminModelForm):
class Meta:
fields = ("text",)
def clean(self):
cleaned_data = super().clean()
user = self.for_user
if not self.instance.pk:
self.instance.user = user
elif self.instance.user != user:
# trying to edit someone else's comment reply
if any(field for field in self.changed_data):
# includes DELETION_FIELD_NAME, as users cannot delete each other's individual comment replies
# if deleting a whole thread, this should be done by deleting the parent Comment instead
self.add_error(
None, ValidationError(_("You cannot edit another user's comment."))
)
return cleaned_data
class CommentForm(WagtailAdminModelForm):
"""
This is designed to be subclassed and have the user overridden to enable user-based validation within the edit handler system
"""
resolved = BooleanField(required=False)
class Meta:
formsets = {
"replies": {
"form": CommentReplyForm,
"inherit_kwargs": ["for_user"],
}
}
def clean(self):
cleaned_data = super().clean()
user = self.for_user
if not self.instance.pk:
self.instance.user = user
elif self.instance.user != user:
# trying to edit someone else's comment
if any(
field
for field in self.changed_data
if field not in ["resolved", "position"]
):
# users can resolve each other's base comments and change their positions within a field
self.add_error(
None, ValidationError(_("You cannot edit another user's comment."))
)
return cleaned_data
def save(self, *args, **kwargs):
if self.cleaned_data.get("resolved", False):
if not getattr(self.instance, "resolved_at"):
self.instance.resolved_at = now()
self.instance.resolved_by = self.for_user
else:
self.instance.resolved_by = None
self.instance.resolved_at = None
return super().save(*args, **kwargs)
| 2.21875 | 2 |
run_db_data.py | MahirMahbub/email-client | 0 | 11155 | <filename>run_db_data.py
import os
from sqlalchemy.orm import Session
from db.database import SessionLocal
class DbData:
def __init__(self):
self.root_directory: str = "db_merge_scripts"
self.scripts = [
"loader.sql"
]
def sync(self, db: Session):
for script in self.scripts:
try:
directory = os.path.join(self.root_directory, script)
print(directory)
sql = open(directory, "r").read()
db.execute(sql)
db.commit()
print(greed("Data file processed: " + directory))
except Exception as e:
print(red("Error to process data file: " + directory))
print(e)
def colored(text, r, g, b):
return "\033[38;2;{};{};{}m{} \033[38;2;255;255;255m".format(r, g, b, text)
def red(text):
return colored(text, 255, 0, 0)
def greed(text):
return colored(text, 0, 255, 0)
def add_master_data():
db = SessionLocal()
DbData().sync(db)
db.close()
| 2.65625 | 3 |
scobra/analysis/compare_elements.py | nihalzp/scobra | 7 | 11156 |
def compareMetaboliteDicts(d1, d2):
sorted_d1_keys = sorted(d1.keys())
sorted_d2_keys = sorted(d2.keys())
for i in range(len(sorted_d1_keys)):
if not compareMetabolites(sorted_d1_keys[i], sorted_d2_keys[i], naive=True):
return False
elif not d1[sorted_d1_keys[i]] == d2[sorted_d2_keys[i]]:
return False
else:
return True
def compareMetabolites(met1, met2, naive=False):
if isinstance(met1, set):
return compareReactions(list(met1), list(met2), naive)
if isinstance(met1, list):
if not isinstance(met2, list):
return False
elif len(met1) != len(met2):
return False
else:
for i in range(len(met1)):
if not compareMetabolites(met1[i], met2[i], naive):
return False
else:
return True
else:
if not True:
#can never be entered
pass
elif not met1._bound == met2._bound:
return False
elif not met1._constraint_sense == met2._constraint_sense:
return False
#elif not met1.annotation == met2.annotation:
# return False
elif not met1.charge == met2.charge:
return False
elif not met1.compartment == met2.compartment:
return False
elif not met1.name == met2.name:
return False
elif not met1.compartment == met2.compartment:
return False
#elif not met1.notes == met2.notes:
# return False
elif not naive:
if not compareReactions(met1._reaction, met2._reaction, naive=True):
return False
elif not compareModels(met1._model, met2._model, naive=True):
return False
else:
return True
else:
return True
def compareReactions(r1, r2, naive=False):
if isinstance(r1, set):
return compareReactions(list(r1), list(r2), naive)
if isinstance(r1, list):
if not isinstance(r2, list):
return False
elif len(r1) != len(r2):
return False
else:
for i in range(len(r1)):
if not compareReactions(r1[i], r2[i],naive):
return False
else:
return True
else:
if not True:
#can never be entered
pass
#elif not r1._compartments == r2._compartments:
# return False
#elif not r1._forward_variable == r2._forward_variable:
# return False
elif not r1._gene_reaction_rule == r2._gene_reaction_rule:
return False
elif not r1._id == r2._id:
return False
elif not r1._lower_bound == r2._lower_bound:
return False
#elif not r1._model == r2._model:
# return False
#elif not r1._reverse_variable == r2._reverse_variable:
# return False
elif not r1._upper_bound == r2._upper_bound:
return False
#elif not r1.annotation == r2.annotation:
# return False
elif not r1.name== r2.name:
return False
#elif not r1.notes == r2.notes:
# return False
elif not r1.subsystem == r2.subsystem:
return False
elif not r1.variable_kind == r2.variable_kind:
return False
elif not naive:
if not compareMetaboliteDicts(r1._metabolites, r2._metabolites):
return False
elif not compareGenes(r1._genes,r2._genes, naive=True):
return False
else:
return True
else:
return True
def compareGenes(g1, g2, naive=False):
if isinstance(g1, set):
return compareGenes(list(g1), list(g2), naive)
if isinstance(g1, list):
if not isinstance(g2, list):
return False
elif len(g1) != len(g2):
return False
else:
for i in range(len(g1)):
if not compareGenes(g1[i], g2[i], naive):
return False
else:
return True
else:
if not True:
#can never be entered
pass
elif not g1._functional == g2._functional:
return False
elif not g1._id == g2._id:
return False
#elif not g1._model == g2._model:
# return False
elif not g1.annotation == g2.annotation:
return False
elif not g1.name == g2.name:
return False
#elif not g1.notes == g2.notes:
# return False
elif not naive:
if not compareReactions(g1._reaction,g2._reaction, naive=True):
return False
else:
return True
else:
return True
def compareModels(m1, m2, naive=False):
if not True:
#can never be entered
pass
#elif not m1._compartments == m2._compartments:
# return False
#elif not m1._contexts == m2._contexts:
# return False
#elif not m1._solver == m2._solver:
# return False
elif not m1._id == m2._id:
return False
#elif not m1._trimmed == m2.trimmed:
# return False
#elif not m1._trimmed_genes == m2._trimmed_genes:
# return False
#elif not m1._trimmed_reactions == m2._trimmed_reactions:
# return False
#elif not m1.annotation == m2.annotation:
# return False
elif not m1.bounds == m2.bounds:
return False
elif not m1.name == m2.name:
return False
#elif not m1.notes == m2.notes:
# return False
#elif not m1.quadratic_component == m2.quadratic_component:
# return False
elif not naive:
if not compareGenes(m1.genes, m2.genes):
return False
elif not compareMetabolites(m1.metabolites, m2.metabolites):
return False
elif not compareReactions(m1.reactions,m2.reactions):
return False
else:
return True
else:
return True
| 2.609375 | 3 |
five/five_copy.py | ngd-b/python-demo | 1 | 11157 | <filename>five/five_copy.py<gh_stars>1-10
#!/usr/bin/python
# -*- coding:utf-8 -*-
print("hello world")
f = None
try:
f = open("./hello.txt","r",encoding="utf8")
print(f.read(5),end='')
print(f.read(5),end='')
print(f.read(5))
except IOError as e:
print(e)
finally:
if f:
f.close()
# with auto call the methods' close
with open("./hello.txt","r",encoding="utf8") as f:
print(f.read())
# readlines() 按行读取文件
with open("./hello.txt","r",encoding="utf8") as f:
for line in f.readlines():
print(line.strip())
# 写入数据
with open("./hello_1.txt","w",encoding="utf8") as f:
f.write("北京欢迎你!")
with open("./hello.txt","a",encoding="utf8") as f:
f.write("祖国 70!")
# StringIO / BytesIO
from io import StringIO
# 创建
str = StringIO('init')
# 读取初始化进去的值
while True:
s = str.readline()
if s == '':
break
print(s.strip())
# 写入
str.write("你好!")
str.write(" 南京")
# 获取
print(str.getvalue())
'''
while True:
s = str.readline()
if s == '':
break
print(s.strip())
'''
# 写入二进制数据
from io import BytesIO
bi = BytesIO()
bi.write("你好".encode("utf-8"))
print(bi.getvalue())
by = BytesIO(b'\xe4\xbd\xa0\xe5\xa5\xbd')
print(by.read())
# 操作系统文件目录 OS
import os
# 当前环境 nt
print(os.name)
# python 执行文件 <module 'ntpath' from 'G:\\python-3.7\\lib\\ntpath.py'>
print(os.path)
# 系统环境配置目录 包括系统环境变量、用户变量、
print(os.environ)
# 获取当前控制台的管理用户名 'bobol'
print(os.getlogin())
# 创建一个文件、目录
os.mkdir("./foo/")
# 删除一个目录
os.rmdir("./foo/")
'''
os.path 可以处理部分路径问题
'''
# 获取指定路径的绝对路径 'G:\\pythonDemo\\python-demo\\five'
print(os.path.abspath("./"))
# 返回指定路径是否存在系统文件路径中 False
print(os.path.exists("./foo"))
# 获取指定路径下文件的大小 4096
print(os.path.getsize("../"))
# 返回指定路径是否为绝对路径 False
print(os.path.isabs("../"))
| 3.546875 | 4 |
prepare_sets.py | mechtal/Vaccination_UK | 0 | 11158 | def prepare_sets(dataset, feature_columns, y_column):
train_X, val_X, train_y, val_y = train_test_split(dataset[feature_columns], dataset[y_column], random_state=1)
return train_X, val_X, train_y, val_y | 2.421875 | 2 |
examples/batch_ts_insert.py | bureau14/qdb-api-python | 9 | 11159 | # Copyright (c) 2009-2020, quasardb SAS. All rights reserved.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of quasardb nor the names of its contributors may
# be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY QUASARDB AND CONTRIBUTORS ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE REGENTS AND CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
from __future__ import print_function
from builtins import range as xrange, int
import os
from socket import gethostname
import sys
import inspect
import traceback
import random
import time
import datetime
import locale
import numpy as np
import quasardb
STOCK_COLUMN = "stock_id"
OPEN_COLUMN = "open"
CLOSE_COLUMN = "close"
HIGH_COLUMN = "high"
LOW_COLUMN = "low"
VOLUME_COLUMN = "volume"
def time_execution(str, f, *args):
print(" - ", str, end='')
start_time = time.time()
res = f(*args)
end_time = time.time()
print(" [duration: {}s]".format(end_time - start_time))
return res
def gen_ts_name():
return "test.{}.{}.{}".format(gethostname(), os.getpid(), random.randint(0, 100000))
def create_ts(q, name):
ts = q.ts(name)
ts.create([quasardb.ColumnInfo(quasardb.ColumnType.Int64, STOCK_COLUMN),
quasardb.ColumnInfo(quasardb.ColumnType.Double, OPEN_COLUMN),
quasardb.ColumnInfo(quasardb.ColumnType.Double, CLOSE_COLUMN),
quasardb.ColumnInfo(quasardb.ColumnType.Double, HIGH_COLUMN),
quasardb.ColumnInfo(quasardb.ColumnType.Double, LOW_COLUMN),
quasardb.ColumnInfo(quasardb.ColumnType.Int64, VOLUME_COLUMN)])
return ts
def create_many_ts(q, names):
return [create_ts(q, x) for x in names]
def generate_prices(price_count):
return np.random.uniform(-100.0, 100.0, price_count)
def generate_points(points_count):
start_time = np.datetime64('2017-01-01', 'ns')
dates = np.array([(start_time + np.timedelta64(i, 'm')) for i in range(points_count)]).astype('datetime64[ns]')
stock_ids = np.random.randint(1, 25, size=points_count)
prices = np.array([generate_prices(60) for i in range(points_count)]).astype('double')
volumes = np.random.randint(0, 10000, points_count)
return (dates, stock_ids, prices, volumes)
def batch_ts_columns(ts_name, prealloc_size):
return (quasardb.BatchColumnInfo(ts_name, STOCK_COLUMN, prealloc_size),
quasardb.BatchColumnInfo(ts_name, OPEN_COLUMN, prealloc_size),
quasardb.BatchColumnInfo(ts_name, CLOSE_COLUMN, prealloc_size),
quasardb.BatchColumnInfo(ts_name, HIGH_COLUMN, prealloc_size),
quasardb.BatchColumnInfo(ts_name, LOW_COLUMN, prealloc_size),
quasardb.BatchColumnInfo(ts_name, VOLUME_COLUMN, prealloc_size))
def calculate_minute_bar(prices):
# Takes all prices for a single minute, and calculate OHLC
return (prices[0], prices[-1], np.amax(prices), np.amin(prices))
def bulk_insert(q, ts_names, dates, stock_ids, prices, volumes):
# We generate a flattened list of columns for each timeseries; for example,
# for 2 columns for 4 timeseries each, we have 8 columns.
columns = [column for nested in (batch_ts_columns(ts_name, len(dates))
for ts_name in ts_names)
for column in nested]
batch_inserter = q.ts_batch(columns)
for i in range(len(stock_ids)):
# We use the known layout of column (2 for each timeseries, alternating with
# STOCK_COLUMN and PRICE_COLUMN) to set the values.
for j in range(0, len(ts_names) * 6, 6):
(o, c, h, l) = calculate_minute_bar(prices[i])
batch_inserter.start_row(dates[i])
batch_inserter.set_int64(j, stock_ids[i]) # set stock_id
batch_inserter.set_double(j + 1, o) # open
batch_inserter.set_double(j + 2, c) # close
batch_inserter.set_double(j + 3, h) # high
batch_inserter.set_double(j + 4, l) # low
batch_inserter.set_int64(j + 5, volumes[i]) # low
batch_inserter.push()
def make_it_so(q, points_count):
ts_names = [gen_ts_name(), gen_ts_name()]
ts = time_execution("Creating a time series with names {}".format(ts_names), create_many_ts, q, ts_names)
(dates, stock_ids, prices, volumes) = time_execution("Generating {:,} points".format(points_count), generate_points, points_count)
time_execution("Inserting {:,} points into timeseries with names {}".format(points_count, ts_names), bulk_insert, q, ts_names, dates, stock_ids, prices, volumes)
return (ts_names, dates, np.unique(stock_ids))
def main(quasardb_uri, points_count):
print("Connecting to: ", quasardb_uri)
q = quasardb.Cluster(uri=quasardb_uri)
print(" *** Inserting {:,} into {}".format(points_count, quasardb_uri))
make_it_so(q, points_count)
if __name__ == "__main__":
try:
if len(sys.argv) != 3:
print("usage: ", sys.argv[0], " quasardb_uri points_count")
sys.exit(1)
main(sys.argv[1], int(sys.argv[2]))
except Exception as ex: # pylint: disable=W0703
print("An error ocurred:", str(ex))
traceback.print_exc()
| 1.539063 | 2 |
tests/backends/test_flashtext_backend.py | openredact/pii-identifier | 14 | 11160 | from nerwhal.backends.flashtext_backend import FlashtextBackend
from nerwhal.recognizer_bases import FlashtextRecognizer
def test_single_recognizer(embed):
class TestRecognizer(FlashtextRecognizer):
TAG = "XX"
SCORE = 1.0
@property
def keywords(self):
return ["abc", "cde"]
backend = FlashtextBackend()
backend.register_recognizer(TestRecognizer)
text = "Das ist abc und cde."
ents = backend.run(text)
assert embed(text, ents) == "Das ist XX und XX."
assert ents[0].start_char == 8
assert ents[0].end_char == 11
assert ents[0].tag == "XX"
assert ents[0].text == "abc"
assert ents[0].score == 1.0
assert ents[0].recognizer == "TestRecognizer"
def test_multiple_recognizers(embed):
class TestRecognizerA(FlashtextRecognizer):
TAG = "A"
SCORE = 1.0
@property
def keywords(self):
return ["abc"]
class TestRecognizerB(FlashtextRecognizer):
TAG = "B"
SCORE = 0.5
@property
def keywords(self):
return ["cde"]
backend = FlashtextBackend()
backend.register_recognizer(TestRecognizerA)
backend.register_recognizer(TestRecognizerB)
text = "Das ist abc und cde."
ents = backend.run(text)
assert embed(text, ents) == "Das ist A und B."
assert ents[0].tag == "A"
assert ents[0].score == 1.0
assert ents[1].tag == "B"
assert ents[1].score == 0.5
def test_overlapping_recognizers(embed):
class TestRecognizerA(FlashtextRecognizer):
TAG = "A"
SCORE = 1.0
@property
def keywords(self):
return ["abc", "cde"]
class TestRecognizerB(FlashtextRecognizer):
TAG = "B"
SCORE = 0.5
@property
def keywords(self):
return ["cde", "fgh"]
backend = FlashtextBackend()
backend.register_recognizer(TestRecognizerA)
backend.register_recognizer(TestRecognizerB)
text = "Das ist cde."
ents = backend.run(text)
# Recognizer B overwrites the keyword "cde"
assert embed(text, ents) == "Das ist B."
| 2.515625 | 3 |
new_rdsmysql.py | AdminTurnedDevOps/AWS_Solutions_Architect_Python | 30 | 11161 | import boto3
import sys
import time
import logging
import getpass
def new_rdsmysql(dbname, instanceID, storage, dbInstancetype, dbusername):
masterPass = getpass.getpass('DBMasterPassword: ')
if len(masterPass) < 10:
logging.warning('Password is not at least 10 characters. Please try again')
time.sleep(5)
exit
else:
None
try:
rds_instance = boto3.client('rds')
create_instance = rds_instance.create_db_instance(
DBName = dbname,
DBInstanceIdentifier = instanceID,
AllocatedStorage = int(storage),
DBInstanceClass = dbInstancetype,
Engine = 'mysql',
MasterUsername = dbusername,
MasterUserPassword = str(<PASSWORD>),
MultiAZ = True,
EngineVersion = '5.7.23',
AutoMinorVersionUpgrade = False,
LicenseModel = 'general-public-license',
PubliclyAccessible = False,
Tags = [
{
'Key': 'Name',
'Value' : dbname
}
]
)
print(create_instance)
except Exception as e:
logging.warning('An error has occured')
print(e)
dbname = sys.argv[1]
instanceID = sys.argv[2]
storage = sys.argv[3]
dbInstancetype = sys.argv[4]
dbusername = sys.argv[5]
new_rdsmysql(dbname, instanceID, storage, dbInstancetype, dbusername) | 2.578125 | 3 |
src/tzscan/tzscan_block_api.py | Twente-Mining/tezos-reward-distributor | 0 | 11162 | <gh_stars>0
import random
import requests
from api.block_api import BlockApi
from exception.tzscan import TzScanException
from log_config import main_logger
logger = main_logger
HEAD_API = {'MAINNET': {'HEAD_API_URL': 'https://api%MIRROR%.tzscan.io/v2/head'},
'ALPHANET': {'HEAD_API_URL': 'http://api.alphanet.tzscan.io/v2/head'},
'ZERONET': {'HEAD_API_URL': 'http://api.zeronet.tzscan.io/v2/head'}
}
REVELATION_API = {'MAINNET': {'HEAD_API_URL': 'https://api%MIRROR%.tzscan.io/v1/operations/%PKH%?type=Reveal'},
'ALPHANET': {'HEAD_API_URL': 'https://api.alphanet.tzscan.io/v1/operations/%PKH%?type=Reveal'},
'ZERONET': {'HEAD_API_URL': 'https://api.zeronet.tzscan.io/v1/operations/%PKH%?type=Reveal'}
}
class TzScanBlockApiImpl(BlockApi):
def __init__(self, nw):
super(TzScanBlockApiImpl, self).__init__(nw)
self.head_api = HEAD_API[nw['NAME']]
if self.head_api is None:
raise Exception("Unknown network {}".format(nw))
self.revelation_api = REVELATION_API[nw['NAME']]
def get_current_level(self, verbose=False):
uri = self.head_api['HEAD_API_URL'].replace("%MIRROR%", str(self.rand_mirror()))
if verbose:
logger.debug("Requesting {}".format(uri))
resp = requests.get(uri)
if resp.status_code != 200:
# This means something went wrong.
raise TzScanException('GET {} {}'.format(uri, resp.status_code))
root = resp.json()
if verbose:
logger.debug("Response from tzscan is: {}".format(root))
current_level = int(root["level"])
return current_level
def get_revelation(self, pkh, verbose=False):
uri = self.revelation_api['HEAD_API_URL'].replace("%MIRROR%", str(self.rand_mirror())).replace("%PKH%", pkh)
if verbose:
logger.debug("Requesting {}".format(uri))
resp = requests.get(uri)
if resp.status_code != 200:
# This means something went wrong.
raise TzScanException('GET {} {}'.format(uri, resp.status_code))
root = resp.json()
if verbose:
logger.debug("Response from tzscan is: {}".format(root))
return len(root) > 0
def rand_mirror(self):
mirror = random.randint(1, 6)
if mirror == 4: # has problem lately
mirror = 3
return mirror
def test_get_revelation():
address_api = TzScanBlockApiImpl({"NAME":"ALPHANET"})
address_api.get_revelation("tz3WXYtyDUNL91qfiCJtVUX746QpNv5i5ve5")
if __name__ == '__main__':
test_get_revelation() | 2.3125 | 2 |
python/cuxfilter/tests/charts/core/test_core_non_aggregate.py | Anhmike/cuxfilter | 201 | 11163 | import pytest
import cudf
import mock
from cuxfilter.charts.core.non_aggregate.core_non_aggregate import (
BaseNonAggregate,
)
from cuxfilter.dashboard import DashBoard
from cuxfilter import DataFrame
from cuxfilter.layouts import chart_view
class TestCoreNonAggregateChart:
def test_variables(self):
bnac = BaseNonAggregate()
# BaseChart variables
assert bnac.chart_type is None
assert bnac.x is None
assert bnac.y is None
assert bnac.aggregate_fn == "count"
assert bnac.color is None
assert bnac.height == 0
assert bnac.width == 0
assert bnac.add_interaction is True
assert bnac.chart is None
assert bnac.source is None
assert bnac.source_backup is None
assert bnac.data_points == 0
assert bnac._library_specific_params == {}
assert bnac.stride is None
assert bnac.stride_type == int
assert bnac.min_value == 0.0
assert bnac.max_value == 0.0
assert bnac.x_label_map == {}
assert bnac.y_label_map == {}
assert bnac.title == ""
# test chart name setter
bnac.x = "x"
bnac.y = "y"
bnac.chart_type = "test_chart_type"
assert bnac.name == "x_y_count_test_chart_type_"
# BaseNonAggregateChart variables
assert bnac.use_data_tiles is False
assert bnac.reset_event is None
assert bnac.x_range is None
assert bnac.y_range is None
assert bnac.aggregate_col is None
def test_label_mappers(self):
bnac = BaseNonAggregate()
library_specific_params = {
"x_label_map": {"a": 1, "b": 2},
"y_label_map": {"a": 1, "b": 2},
}
bnac.library_specific_params = library_specific_params
assert bnac.x_label_map == {"a": 1, "b": 2}
assert bnac.y_label_map == {"a": 1, "b": 2}
@pytest.mark.parametrize("chart, _chart", [(None, None), (1, 1)])
def test_view(self, chart, _chart):
bnac = BaseNonAggregate()
bnac.chart = chart
bnac.width = 400
bnac.title = "test_title"
assert str(bnac.view()) == str(
chart_view(_chart, width=bnac.width, title=bnac.title)
)
def test_get_selection_geometry_callback(self):
bnac = BaseNonAggregate()
df = cudf.DataFrame({"a": [1, 2, 2], "b": [3, 4, 5]})
dashboard = DashBoard(dataframe=DataFrame.from_dataframe(df))
assert (
bnac.get_selection_geometry_callback(dashboard).__name__
== "selection_callback"
)
assert callable(type(bnac.get_selection_geometry_callback(dashboard)))
def test_box_selection_callback(self):
bnac = BaseNonAggregate()
bnac.x = "a"
bnac.y = "b"
bnac.chart_type = "temp"
self.result = None
def t_function(data, patch_update=False):
self.result = data
bnac.reload_chart = t_function
df = cudf.DataFrame({"a": [1, 2, 2], "b": [3, 4, 5]})
dashboard = DashBoard(dataframe=DataFrame.from_dataframe(df))
dashboard._active_view = bnac
class evt:
geometry = dict(x0=1, x1=2, y0=3, y1=4, type="rect")
t = bnac.get_selection_geometry_callback(dashboard)
t(evt)
assert self.result.equals(df.query("1<=a<=2 and 3<=b<=4"))
def test_lasso_election_callback(self):
bnac = BaseNonAggregate()
bnac.x = "a"
bnac.y = "b"
bnac.chart_type = "temp"
def t_function(data, patch_update=False):
self.result = data
bnac.reload_chart = t_function
df = cudf.DataFrame({"a": [1, 2, 2], "b": [3, 4, 5]})
dashboard = DashBoard(dataframe=DataFrame.from_dataframe(df))
class evt:
geometry = dict(x=[1, 1, 2], y=[1, 2, 1], type="poly")
final = True
t = bnac.get_selection_geometry_callback(dashboard)
with mock.patch("cuspatial.point_in_polygon") as pip:
pip.return_value = cudf.DataFrame(
{"selection": [True, False, True]}
)
t(evt)
assert pip.called
@pytest.mark.parametrize(
"data, _data",
[
(cudf.DataFrame(), cudf.DataFrame()),
(
cudf.DataFrame({"a": [1, 2, 2], "b": [3, 4, 5]}),
cudf.DataFrame({"a": [1, 2, 2], "b": [3, 4, 5]}),
),
],
)
def test_calculate_source(self, data, _data):
"""
Calculate source just calls to the format_source_data function
which is implemented by chart types inheriting this class.
"""
bnac = BaseNonAggregate()
self.result = None
def t_function(data, patch_update=False):
self.result = data
bnac.format_source_data = t_function
bnac.calculate_source(data)
assert self.result.equals(_data)
@pytest.mark.parametrize(
"x_range, y_range, query, local_dict",
[
(
(1, 2),
(3, 4),
"@x_min<=x<=@x_max and @y_min<=y<=@y_max",
{"x_min": 1, "x_max": 2, "y_min": 3, "y_max": 4},
),
(
(0, 2),
(3, 5),
"@x_min<=x<=@x_max and @y_min<=y<=@y_max",
{"x_min": 0, "x_max": 2, "y_min": 3, "y_max": 5},
),
],
)
def test_compute_query_dict(self, x_range, y_range, query, local_dict):
bnac = BaseNonAggregate()
bnac.chart_type = "test"
bnac.x = "x"
bnac.y = "y"
bnac.x_range = x_range
bnac.y_range = y_range
df = cudf.DataFrame({"x": [1, 2, 2], "y": [3, 4, 5]})
dashboard = DashBoard(dataframe=DataFrame.from_dataframe(df))
bnac.compute_query_dict(
dashboard._query_str_dict, dashboard._query_local_variables_dict
)
bnac_key = (
f"{bnac.x}_{bnac.y}"
f"{'_' + bnac.aggregate_col if bnac.aggregate_col else ''}"
f"_{bnac.aggregate_fn}_{bnac.chart_type}_{bnac.title}"
)
assert dashboard._query_str_dict[bnac_key] == query
for key in local_dict:
assert (
dashboard._query_local_variables_dict[key] == local_dict[key]
)
@pytest.mark.parametrize(
"add_interaction, reset_event, event_1, event_2",
[
(True, None, "selection_callback", None),
(True, "test_event", "selection_callback", "reset_callback"),
(False, "test_event", None, "reset_callback"),
],
)
def test_add_events(self, add_interaction, reset_event, event_1, event_2):
bnac = BaseNonAggregate()
bnac.add_interaction = add_interaction
bnac.reset_event = reset_event
df = cudf.DataFrame({"a": [1, 2, 2], "b": [3, 4, 5]})
dashboard = DashBoard(dataframe=DataFrame.from_dataframe(df))
self.event_1 = None
self.event_2 = None
def t_func(fn):
self.event_1 = fn.__name__
def t_func1(event, fn):
self.event_2 = fn.__name__
bnac.add_selection_geometry_event = t_func
bnac.add_event = t_func1
bnac.add_events(dashboard)
assert self.event_1 == event_1
assert self.event_2 == event_2
def test_add_reset_event(self):
bnac = BaseNonAggregate()
bnac.chart_type = "test"
bnac.x = "a"
bnac.x_range = (0, 2)
bnac.y_range = (3, 5)
df = cudf.DataFrame({"a": [1, 2, 2], "b": [3, 4, 5]})
dashboard = DashBoard(dataframe=DataFrame.from_dataframe(df))
dashboard._active_view = bnac
def t_func1(event, fn):
fn("event")
bnac.add_event = t_func1
bnac.add_reset_event(dashboard)
assert bnac.x_range is None
assert bnac.y_range is None
def test_query_chart_by_range(self):
bnac = BaseNonAggregate()
bnac.chart_type = "test"
bnac.x = "a"
bnac_1 = BaseNonAggregate()
bnac_1.chart_type = "test"
bnac_1.x = "b"
query_tuple = (4, 5)
df = cudf.DataFrame({"a": [1, 2, 3, 4], "b": [3, 4, 5, 6]})
bnac.source = df
self.result = None
self.patch_update = None
def t_func(data, patch_update):
self.result = data
self.patch_update = patch_update
# creating a dummy reload chart fn as its not implemented in core
# non aggregate chart class
bnac.reload_chart = t_func
bnac.query_chart_by_range(
active_chart=bnac_1, query_tuple=query_tuple, datatile=None
)
assert self.result.to_string() == " a b\n1 2 4\n2 3 5"
assert self.patch_update is False
@pytest.mark.parametrize(
"new_indices, result",
[
([4, 5], " a b\n1 2 4\n2 3 5"),
([], " a b\n0 1 3\n1 2 4\n2 3 5\n3 4 6"),
([3], " a b\n0 1 3"),
],
)
def test_query_chart_by_indices(self, new_indices, result):
bnac = BaseNonAggregate()
bnac.chart_type = "test"
bnac.x = "a"
bnac_1 = BaseNonAggregate()
bnac_1.chart_type = "test"
bnac_1.x = "b"
new_indices = new_indices
df = cudf.DataFrame({"a": [1, 2, 3, 4], "b": [3, 4, 5, 6]})
bnac.source = df
self.result = None
self.patch_update = None
def t_func(data, patch_update):
self.result = data
self.patch_update = patch_update
# creating a dummy reload chart fn as its not implemented in core
# non aggregate chart class
bnac.reload_chart = t_func
bnac.query_chart_by_indices(
active_chart=bnac_1,
old_indices=[],
new_indices=new_indices,
datatile=None,
)
assert self.result.to_string() == result
assert self.patch_update is False
| 2.078125 | 2 |
tunobase/tagging/migrations/0001_initial.py | unomena/tunobase-core | 0 | 11164 | <gh_stars>0
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Tag'
db.create_table(u'tagging_tag', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('title', self.gf('django.db.models.fields.CharField')(max_length=32, db_index=True)),
('description', self.gf('django.db.models.fields.TextField')(null=True, blank=True)),
('site', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['sites.Site'], null=True, blank=True)),
))
db.send_create_signal(u'tagging', ['Tag'])
# Adding unique constraint on 'Tag', fields ['title', 'site']
db.create_unique(u'tagging_tag', ['title', 'site_id'])
# Adding model 'ContentObjectTag'
db.create_table(u'tagging_contentobjecttag', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('content_type', self.gf('django.db.models.fields.related.ForeignKey')(related_name='content_type_set_for_contentobjecttag', to=orm['contenttypes.ContentType'])),
('object_pk', self.gf('django.db.models.fields.PositiveIntegerField')()),
('site', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['sites.Site'])),
('tag', self.gf('django.db.models.fields.related.ForeignKey')(related_name='content_object_tags', to=orm['tagging.Tag'])),
))
db.send_create_signal(u'tagging', ['ContentObjectTag'])
def backwards(self, orm):
# Removing unique constraint on 'Tag', fields ['title', 'site']
db.delete_unique(u'tagging_tag', ['title', 'site_id'])
# Deleting model 'Tag'
db.delete_table(u'tagging_tag')
# Deleting model 'ContentObjectTag'
db.delete_table(u'tagging_contentobjecttag')
models = {
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'sites.site': {
'Meta': {'ordering': "(u'domain',)", 'object_name': 'Site', 'db_table': "u'django_site'"},
'domain': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'tagging.contentobjecttag': {
'Meta': {'object_name': 'ContentObjectTag'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'content_type_set_for_contentobjecttag'", 'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_pk': ('django.db.models.fields.PositiveIntegerField', [], {}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['sites.Site']"}),
'tag': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'content_object_tags'", 'to': u"orm['tagging.Tag']"})
},
u'tagging.tag': {
'Meta': {'unique_together': "[('title', 'site')]", 'object_name': 'Tag'},
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['sites.Site']", 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'})
}
}
complete_apps = ['tagging'] | 2.171875 | 2 |
recogym/envs/session.py | philomenec/reco-gym | 413 | 11165 | <reponame>philomenec/reco-gym
class Session(list):
"""Abstract Session class"""
def to_strings(self, user_id, session_id):
"""represent session as list of strings (one per event)"""
user_id, session_id = str(user_id), str(session_id)
session_type = self.get_type()
strings = []
for event, product in self:
columns = [user_id, session_type, session_id, event, str(product)]
strings.append(','.join(columns))
return strings
def get_type(self):
raise NotImplemented
class OrganicSessions(Session):
def __init__(self):
super(OrganicSessions, self).__init__()
def next(self, context, product):
self.append(
{
't': context.time(),
'u': context.user(),
'z': 'pageview',
'v': product
}
)
def get_type(self):
return 'organic'
def get_views(self):
return [p for _, _, e, p in self if e == 'pageview']
| 2.71875 | 3 |
message_handlers/location_handler.py | pratyushmore/lunch-tag-bot | 0 | 11166 | def location(messaging_adaptor, user, channel, location):
message = "Your location has been set to `{}`. You are ready to be matched for Lunch Tag :)".format(location)
messaging_adaptor.send_message(channel, message)
| 2.390625 | 2 |
b.py | lbarchive/b.py | 0 | 11167 | <gh_stars>0
#!/usr/bin/env python
# Copyright (C) 2013-2016 by <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""
============
b.py command
============
Commands
========
============= =======================
command supported services
============= =======================
``blogs`` ``b``
``post`` ``b``, ``wp``
``generate`` ``base``, ``b``, ``wp``
``checklink`` ``base``, ``b``, ``wp``
``search`` ``b``
============= =======================
Descriptions:
``blogs``
list blogs. This can be used for blog IDs lookup.
``post``
post or update a blog post.
``generate``
generate HTML file at ``<TEMP>/draft.html``, where ``<TEMP>`` is the system's
temporary directory.
The generation can output a preview html at ``<TEMP>/preview.html`` if there
is ``tmpl.html``. It will replace ``%%Title%%`` with post title and
``%%Content%%`` with generated HTML.
``checklink``
check links in generated HTML using lnkckr_.
``search``
search blog
.. _lnkckr: https://pypi.python.org/pypi/lnkckr
"""
from __future__ import print_function
import argparse as ap
import codecs
import imp
import logging
import os
import sys
import traceback
from bpy.handlers import handlers
from bpy.services import find_service, services
__program__ = 'b.py'
__description__ = 'Post to Blogger or WordPress in markup language seamlessly'
__copyright__ = 'Copyright 2013-2016, <NAME>'
__license__ = 'MIT License'
__version__ = '0.11.0'
__website__ = 'http://bitbucket.org/livibetter/b.py'
__author__ = '<NAME>'
__author_email__ = '<EMAIL>'
# b.py stuff
############
# filename of local configuration without '.py' suffix.
BRC = 'brc'
def parse_args():
p = ap.ArgumentParser()
p.add_argument('--version', action='version',
version='%(prog)s ' + __version__)
p.add_argument('-d', '--debug', action='store_true',
help='turn on debugging messages')
p.add_argument('-s', '--service', default='base',
help='what service to use. (Default: %(default)s)')
sp = p.add_subparsers(help='commands')
pblogs = sp.add_parser('blogs', help='list blogs')
pblogs.set_defaults(subparser=pblogs, command='blogs')
psearch = sp.add_parser('search', help='search for posts')
psearch.add_argument('-b', '--blog', help='Blog ID')
psearch.add_argument('q', nargs='+', help='query text')
psearch.set_defaults(subparser=psearch, command='search')
pgen = sp.add_parser('generate', help='generate html')
pgen.add_argument('filename')
pgen.set_defaults(subparser=pgen, command='generate')
pchk = sp.add_parser('checklink', help='check links in chkerateed html')
pchk.add_argument('filename')
pchk.set_defaults(subparser=pchk, command='checklink')
ppost = sp.add_parser('post', help='post or update a blog post')
ppost.add_argument('filename')
ppost.set_defaults(subparser=ppost, command='post')
args = p.parse_args()
return args
def load_config():
rc = None
try:
search_path = [os.getcwd()]
_mod_data = imp.find_module(BRC, search_path)
print('Loading local configuration...')
try:
rc = imp.load_module(BRC, *_mod_data)
finally:
if _mod_data[0]:
_mod_data[0].close()
except ImportError:
pass
except Exception:
traceback.print_exc()
print('Error in %s, aborted.' % _mod_data[1])
sys.exit(1)
return rc
def main():
args = parse_args()
logging.basicConfig(
format=(
'%(asctime)s '
'%(levelname).4s '
'%(module)5.5s:%(funcName)-10.10s:%(lineno)04d '
'%(message)s'
),
datefmt='%H:%M:%S',
)
if args.debug:
logging.getLogger().setLevel(logging.DEBUG)
encoding = sys.stdout.encoding
if not encoding.startswith('UTF'):
msg = (
'standard output encoding is %s, '
'try to set with UTF-8 if there is output issues.'
)
logging.warning(msg % encoding)
if sys.version_info.major == 2:
sys.stdout = codecs.getwriter(encoding)(sys.stdout, 'replace')
sys.stderr = codecs.getwriter(encoding)(sys.stderr, 'replace')
elif sys.version_info.major == 3:
sys.stdout = codecs.getwriter(encoding)(sys.stdout.buffer, 'replace')
sys.stderr = codecs.getwriter(encoding)(sys.stderr.buffer, 'replace')
rc = load_config()
service_options = {'blog': None}
if rc:
if hasattr(rc, 'handlers'):
for name, handler in rc.handlers.items():
if name in handlers:
handlers[name].update(handler)
else:
handlers[name] = handler.copy()
if hasattr(rc, 'services'):
for name, service in rc.services.items():
if name in services:
services[name].update(service)
else:
services[name] = service.copy()
if hasattr(rc, 'service'):
args.service = rc.service
if hasattr(rc, 'service_options'):
service_options.update(rc.service_options)
if hasattr(args, 'blog') and args.blog is not None:
service_options['blog'] = args.blog
filename = args.filename if hasattr(args, 'filename') else None
service = find_service(args.service, service_options, filename)
if args.command == 'blogs':
service.list_blogs()
elif args.command == 'search':
service.search(' '.join(args.q))
elif args.command == 'generate':
service.generate()
elif args.command == 'checklink':
service.checklink()
elif args.command == 'post':
service.post()
if __name__ == '__main__':
main()
| 1.359375 | 1 |
spyder/plugins/outlineexplorer/api.py | suokunlong/spyder | 1 | 11168 | # -*- coding: utf-8 -*-
#
# Copyright © Spyder Project Contributors
# Licensed under the terms of the MIT License
# (see spyder/__init__.py for details)
"""Outline explorer API.
You need to declare a OutlineExplorerProxy, and a function for handle the
edit_goto Signal.
class OutlineExplorerProxyCustom(OutlineExplorerProxy):
...
def handle_go_to(name, line, text):
...
outlineexplorer = OutlineExplorerWidget(None)
oe_proxy = OutlineExplorerProxyCustom(name)
outlineexplorer.set_current_editor(oe_proxy, update=True, clear=False)
outlineexplorer.edit_goto.connect(handle_go_to)
"""
import re
from qtpy.QtCore import Signal, QObject
from qtpy.QtGui import QTextBlock
from spyder.config.base import _
from spyder.config.base import running_under_pytest
def document_cells(block, forward=True):
"""
Get cells oedata before or after block in the document.
Parameters
----------
forward : bool, optional
Whether to iterate forward or backward from the current block.
"""
if not block.isValid():
# Not a valid block
return
if forward:
block = block.next()
else:
block = block.previous()
while block.isValid():
data = block.userData()
if (data
and data.oedata
and data.oedata.def_type == OutlineExplorerData.CELL):
yield data.oedata
if forward:
block = block.next()
else:
block = block.previous()
def is_cell_header(block):
"""Check if the given block is a cell header."""
if not block.isValid():
return False
data = block.userData()
return (data
and data.oedata
and data.oedata.def_type == OutlineExplorerData.CELL)
def cell_index(block):
"""Get the cell index of the given block."""
index = len(list(document_cells(block, forward=False)))
if is_cell_header(block):
return index + 1
return index
def cell_name(block):
"""
Get the cell name the block is in.
If the cell is unnamed, return the cell index instead.
"""
if is_cell_header(block):
header = block.userData().oedata
else:
try:
header = next(document_cells(block, forward=False))
except StopIteration:
# This cell has no header, so it is the first cell.
return 0
if header.has_name():
return header.def_name
else:
# No name, return the index
return cell_index(block)
class OutlineExplorerProxy(QObject):
"""
Proxy class between editors and OutlineExplorerWidget.
"""
sig_cursor_position_changed = Signal(int, int)
sig_outline_explorer_data_changed = Signal()
def __init__(self):
super(OutlineExplorerProxy, self).__init__()
self.fname = None
def is_python(self):
"""Return whether the editor is a python file or not."""
raise NotImplementedError
def get_id(self):
"""Return an unique id, used for identify objects in a dict"""
raise NotImplementedError
def give_focus(self):
"""Give focus to the editor, called when toogling visibility of
OutlineExplorerWidget."""
raise NotImplementedError
def get_line_count(self):
"""Return the number of lines of the editor (int)."""
raise NotImplementedError
def parent(self):
"""This is used for diferenciate editors in multi-window mode."""
return None
def get_cursor_line_number(self):
"""Return the cursor line number."""
raise NotImplementedError
def outlineexplorer_data_list(self):
"""Returns a list of outline explorer data."""
raise NotImplementedError
class OutlineExplorerData(QObject):
CLASS, FUNCTION, STATEMENT, COMMENT, CELL = list(range(5))
FUNCTION_TOKEN = 'def'
CLASS_TOKEN = 'class'
# Emitted if the OutlineExplorerData was changed
sig_update = Signal()
def __init__(self, block, text=None, fold_level=None, def_type=None,
def_name=None, color=None):
"""
Args:
text (str)
fold_level (int)
def_type (int): [CLASS, FUNCTION, STATEMENT, COMMENT, CELL]
def_name (str)
color (PyQt.QtGui.QTextCharFormat)
"""
super(OutlineExplorerData, self).__init__()
self.text = text
self.fold_level = fold_level
self.def_type = def_type
self.def_name = def_name
self.color = color
if running_under_pytest():
# block might be a dummy
self.block = block
else:
# Copy the text block to make sure it is not deleted
self.block = QTextBlock(block)
def is_not_class_nor_function(self):
return self.def_type not in (self.CLASS, self.FUNCTION)
def is_class_or_function(self):
return self.def_type in (self.CLASS, self.FUNCTION)
def is_comment(self):
return self.def_type in (self.COMMENT, self.CELL)
def get_class_name(self):
if self.def_type == self.CLASS:
return self.def_name
def get_function_name(self):
if self.def_type == self.FUNCTION:
return self.def_name
def get_token(self):
if self.def_type == self.FUNCTION:
token = self.FUNCTION_TOKEN
elif self.def_type == self.CLASS:
token = self.CLASS_TOKEN
return token
@property
def def_name(self):
"""Get the cell name."""
# Non cell don't need unique names.
if self.def_type != self.CELL:
return self._def_name
def get_name(oedata):
name = oedata._def_name
if not name:
name = _('Unnamed Cell')
return name
self_name = get_name(self)
existing_numbers = []
def check_match(oedata):
# Look for "string"
other_name = get_name(oedata)
pattern = '^' + re.escape(self_name) + r'(?:, #(\d+))?$'
match = re.match(pattern, other_name)
if match:
# Check if already has a number
number = match.groups()[0]
if number:
existing_numbers.append(int(number))
return True
return False
# Count cells
N_prev = 0
for oedata in document_cells(self.block, forward=False):
if check_match(oedata):
N_prev += 1
N_fix_previous = len(existing_numbers)
N_next = 0
for oedata in document_cells(self.block, forward=True):
if check_match(oedata):
N_next += 1
# Get the remaining indexeswe can use
free_indexes = [idx for idx in range(N_prev + N_next + 1)
if idx + 1 not in existing_numbers]
idx = free_indexes[N_prev - N_fix_previous]
if N_prev + N_next > 0:
return self_name + ', #{}'.format(idx + 1)
return self_name
@def_name.setter
def def_name(self, value):
"""Set name."""
self._def_name = value
def update(self, other):
"""Try to update to avoid reloading everything."""
if (self.def_type == other.def_type and
self.fold_level == other.fold_level):
self.text = other.text
old_def_name = self._def_name
self._def_name = other._def_name
self.color = other.color
self.sig_update.emit()
if self.def_type == self.CELL:
if self.cell_level != other.cell_level:
return False
# Must update all other cells whose name has changed.
for oedata in document_cells(self.block, forward=True):
if oedata._def_name in [self._def_name, old_def_name]:
oedata.sig_update.emit()
return True
return False
def is_valid(self):
"""Check if the oedata has a valid block attached."""
block = self.block
return (block
and block.isValid()
and block.userData()
and hasattr(block.userData(), 'oedata')
and block.userData().oedata == self
)
def has_name(self):
"""Check if cell has a name."""
if self._def_name:
return True
else:
return False
def get_block_number(self):
"""Get the block number."""
if not self.is_valid():
# Avoid calling blockNumber if not a valid block
return None
return self.block.blockNumber()
| 2.515625 | 3 |
seamless/core/__init__.py | sjdv1982/seamless | 15 | 11169 | <filename>seamless/core/__init__.py
import weakref
class IpyString(str):
def _repr_pretty_(self, p, cycle):
return p.text(str(self))
class SeamlessBase:
_destroyed = False
_context = None
_cached_path = None
name = None
def _get_macro(self):
return self._context()._macro
@property
def path(self):
if self._cached_path is not None:
return self._cached_path
if self._context is None:
return ()
elif self._context() is None:
return ("<None>", self.name)
elif self._context().path is None:
return ("<None>", self.name)
else:
return self._context().path + (self.name,)
def _validate_path(self, required_path=None):
if required_path is None:
required_path = self.path
else:
assert self.path == required_path, (self.path, required_path)
return required_path
def _set_context(self, context, name):
from .context import Context, UnboundContext
assert isinstance(context, (Context, UnboundContext))
if self._context is not None and self._context() is context:
assert self.name in context._auto
context._children.pop(self.name)
context._auto.discard(self.name)
self.name = name
return
if isinstance(context, UnboundContext):
assert self._context is None
else:
assert self._context is None or isinstance(self._context(), UnboundContext), self._context
ctx = weakref.ref(context)
self._context = ctx
self.name = name
return self
def _get_manager(self):
assert self._context is not None, self.name #worker/cell must have a context
assert self._context() is not None, self.name #worker/cell must have a context
return self._context()._get_manager()
def _root(self):
if self._context is None:
return None
if self._context() is None:
return None
return self._context()._root()
def _format_path(self):
path = self.path
if path is None:
ret = "<None>"
else:
path = [str(p) for p in path]
ret = "." + ".".join(path)
return ret
def __str__(self):
ret = "Seamless object: " + self._format_path()
return ret
def __repr__(self):
return self.__str__()
def _set_macro_object(self, macro_object):
self._macro_object = macro_object
@property
def self(self):
return self
def destroy(self, **kwargs):
self._destroyed = True
from .mount import mountmanager
from .macro_mode import get_macro_mode, macro_mode_on
from . import cell as cell_module
from .cell import Cell, cell
from . import context as context_module
from .context import Context, context
from .worker import Worker
from .transformer import Transformer, transformer
from .structured_cell import StructuredCell, Inchannel, Outchannel
from .macro import Macro, macro, path
from .reactor import Reactor, reactor
from .unilink import unilink | 2.203125 | 2 |
oguilem/configuration/config.py | dewberryants/oGUIlem | 2 | 11170 | import os
import re
import sys
from oguilem.configuration.fitness import OGUILEMFitnessFunctionConfiguration
from oguilem.configuration.ga import OGUILEMGlobOptConfig
from oguilem.configuration.geometry import OGUILEMGeometryConfig
from oguilem.configuration.utils import ConnectedValue, ConfigFileManager
from oguilem.resources import options
class OGUILEMConfig:
def __init__(self):
self.ui = OGUILEMUIConfig()
self.globopt = OGUILEMGlobOptConfig()
self.options = OGUILEMGeneralConfig()
self.geometry = OGUILEMGeometryConfig()
self.fitness = OGUILEMFitnessFunctionConfiguration()
self.file_manager = ConfigFileManager()
def save_to_file(self, path: str):
content = "###OGOLEM###\n"
content += self.globopt.get_finished_config()
content += self.geometry.get_finished_config(path)
content += self.fitness.get_finished_config()
content += self.options.get_finished_config()
with open(path, "w") as conf_file:
conf_file.write(content)
self.file_manager.signal_saved(path)
def load_from_file(self, path: str, preset=False):
self.options.set_to_default()
with open(path, "r") as conf_file:
content = conf_file.readlines()
# Find geometry block and split off
iter_content = iter(content)
geo_block = list()
backend_defs = list()
charge_block = list()
spin_block = list()
offset = 0
# Separate off blocks
for n, line in enumerate(iter_content):
# Charge and Spin Blocks
if line.strip().startswith("<CHARGES>"):
start = n + offset
try:
charge_line = next(iter_content).strip()
except StopIteration:
raise RuntimeError("Config ends after <CHARGES> tag!?")
while not charge_line.startswith("</CHARGES>"):
charge_block.append(charge_line)
try:
charge_line = next(iter_content).strip()
except StopIteration:
raise RuntimeError("Dangling <GEOMETRY> tag in configuration!")
end = start + len(charge_block) + 2
content = content[:start] + content[end:]
offset -= 1
if line.strip().startswith("<SPINS>"):
start = n + offset
try:
spin_line = next(iter_content).strip()
except StopIteration:
raise RuntimeError("Config ends after <SPINS> tag!?")
while not spin_line.startswith("</SPINS>"):
spin_block.append(spin_line)
try:
spin_line = next(iter_content).strip()
except StopIteration:
raise RuntimeError("Dangling <SPINS> tag in configuration!")
end = start + len(spin_block) + 2
content = content[:start] + content[end:]
offset -= 1
# Geometry Block
if line.strip().startswith("<GEOMETRY>"):
start = n + offset
try:
geo_line = next(iter_content).strip()
except StopIteration:
raise RuntimeError("Config ends after <GEOMETRY> tag!?")
while not geo_line.startswith("</GEOMETRY>"):
geo_block.append(geo_line)
try:
geo_line = next(iter_content).strip()
except StopIteration:
raise RuntimeError("Dangling <GEOMETRY> tag in configuration!")
end = start + len(geo_block) + 2
content = content[:start] + content[end:]
offset -= 1
# Any Backend Definitions
if line.strip().startswith("<CLUSTERBACKEND>"):
back_block = list()
start = n + offset
try:
back_line = next(iter_content).strip()
except StopIteration:
raise RuntimeError("Config ends after <CLUSTERBACKEND> tag!?")
while not back_line.startswith("</CLUSTERBACKEND>"):
back_block.append(back_line)
try:
back_line = next(iter_content).strip()
except StopIteration:
raise RuntimeError("Dangling <CLUSTERBACKEND> tag in configuration!")
end = start + len(back_block) + 2
backend_defs.append(back_block)
content = content[:start] + content[end:]
offset -= 1
# Parse them
self.geometry.parse_from_block(geo_block)
self.geometry.parse_charge_block(charge_block)
self.geometry.parse_spin_block(spin_block)
self.fitness.parse_backend_tags(backend_defs)
# Deal with the rest
for line in content:
if line.strip().startswith("LocOptAlgo="):
self.fitness.parse_locopt_algo(line.strip()[11:])
elif line.strip().startswith("GlobOptAlgo="):
self.globopt.parse_globopt_string(line.strip()[12:])
else:
for key in self.options.values:
type = self.options.values[key].type
if re.match(key + "=", line.strip()):
value, index = parse_value(line.strip()[len(key) + 1:], type)
if value is not None:
print("Option {:>30} set to: {:>30}".format(key, str(value)))
self.options.values[key].set(value, index)
else:
print("ERROR: Could not set Option %s. Set to default instead!" % key)
self.options.values[key].set(self.options.defaults[key])
if not preset:
self.file_manager.signal_saved(path)
else:
self.file_manager.signal_modification()
def parse_value(line, type):
value = None
index = -1
work = line.strip()
if type is str:
value = work
elif type is int:
value = int(work)
elif type is float:
value = float(work)
elif type is bool:
value = work.lower() == "true"
elif type is list:
tmp = work.split(";")
value = [float(tmp[0]), float(tmp[1]), float(tmp[2])]
return value, index
class OGUILEMGeneralConfig:
def __init__(self):
self.defaults = dict()
self.values = dict()
for key in options:
type, default = options[key]
if type == "str":
self.defaults[key] = default
elif type == "int":
self.defaults[key] = int(default)
elif type == "float":
self.defaults[key] = float(default)
elif type == "bool":
self.defaults[key] = (default.lower() == "true")
elif type == "3;float":
default = default.strip().split(";")
self.defaults[key] = [float(default[0]), float(default[1]), float(default[2])]
else:
raise IOError("Could not parse xml key %s in general configs!" % key)
self.values[key] = ConnectedValue(self.defaults[key])
def set_to_default(self):
for key in options:
self.values[key].set(self.defaults[key])
def get_finished_config(self) -> str:
content = ""
for key in self.values:
self.values[key].request_update()
value = self.values[key].value
if value != self.defaults[key]:
content += "\n" + key + "=" + str(self.values[key])
return content
def find_config_folder():
if sys.platform == 'Windows':
path = os.path.join(os.environ['APPDATA'], 'oguilem')
else:
path = os.path.join(os.environ['HOME'], '.config', 'oguilem')
if not os.path.isdir(path):
os.mkdir(path)
return path
class OGUILEMUIConfig:
def __init__(self):
self.window_size = None
self.window_position = None
self.java_path = None
self.java_vm_variables = None
self.ogo_path = None
self.ogo_args = None
self.environmental_variables = None
self.recover_from_file()
def get_run_command(self, custom_run_command=""):
run_cmd = custom_run_command if custom_run_command else self.ogo_args
if not all([self.java_path, self.ogo_path, self.ogo_args]):
raise RuntimeError("Cannot run ogolem without knowing java and ogolem paths as well as ogolem arguments!")
if self.java_vm_variables:
return "%s %s -jar %s %s" % (self.java_path, self.java_vm_variables, self.ogo_path, run_cmd)
return "%s -jar %s %s" % (self.java_path, self.ogo_path, run_cmd)
def recover_from_file(self):
path = os.path.join(find_config_folder(), "oguilem.cfg")
try:
with open(path, "r") as config:
lines = config.readlines()
for line in lines:
work = line.strip()
if work.startswith("WINDOWSIZE"):
self.window_size = (int(work.split()[1]), int(work.split()[2]))
elif work.startswith("WINDOWPOS"):
self.window_position = (int(work.split()[1]), int(work.split()[2]))
elif work.startswith("JAVAPATH"):
self.java_path = work[8:].strip()
elif work.startswith("JAVAVM"):
self.java_vm_variables = work[6:].strip()
elif work.startswith("OGOPATH"):
self.ogo_path = work[7:].strip()
elif work.startswith("OGOARGS"):
self.ogo_args = work[7:].strip()
elif work.startswith("ENV"):
self.environmental_variables = work[3:].strip()
except ValueError:
print("There are format errors in the UI config file in '%s'. Using defaults." % find_config_folder())
except IOError:
print("Config file not found. A new one will generate once the program exits.")
def save_to_file(self):
path = os.path.join(find_config_folder(), "oguilem.cfg")
with open(path, "w") as config:
if self.window_size:
config.write("WINDOWSIZE %d %d\n" % (self.window_size[0], self.window_size[1]))
if self.window_position:
config.write("WINDOWPOS %d %d\n" % (self.window_position[0], self.window_position[1]))
if self.java_path:
config.write("JAVAPATH %s\n" % self.java_path)
if self.java_vm_variables:
config.write("JAVAVM %s\n" % self.java_vm_variables)
if self.ogo_path:
config.write("OGOPATH %s\n" % self.ogo_path)
if self.ogo_args:
config.write("OGOARGS %s\n" % self.ogo_args)
if self.java_path:
config.write("ENV %s\n" % self.environmental_variables)
| 2.09375 | 2 |
xpd_workflow/temp_graph.py | CJ-Wright/xpd_workflow | 0 | 11171 | <filename>xpd_workflow/temp_graph.py
from __future__ import (division, print_function)
import matplotlib.cm as cmx
import matplotlib.colors as colors
from matplotlib import gridspec
from metadatastore.api import db_connect as mds_db_connect
from filestore.api import db_connect as fs_db_connect
fs_db_connect(
**{'database': 'data-processing-dev', 'host': 'localhost', 'port': 27017})
mds_db_connect(
**{'database': 'data-processing-dev', 'host': 'localhost', 'port': 27017})
from databroker import db, get_events
from datamuxer import DataMuxer
from sidewinder_spec.utils.handlers import *
import logging
from xpd_workflow.parsers import parse_xrd_standard
logger = logging.getLogger(__name__)
if __name__ == '__main__':
import os
import numpy as np
import matplotlib.pyplot as plt
save = True
lam = 1.54059
# Standard reflections for sample components
niox_hkl = ['111', '200', '220', '311', '222', '400', '331',
'420', '422', '511']
niox_tth = np.asarray(
[37.44, 43.47, 63.20, 75.37, 79.87, 95.58, 106.72, 111.84,
129.98, 148.68])
pr3_hkl = ['100', '001', '110', '101', '111', '200', '002', '210', '211',
'112', '202']
pr3_tth = np.asarray(
[22.96, 24.33, 32.70, 33.70, 41.18, 46.92, 49.86, 52.86, 59.00, 60.91,
70.87]
)
pr4_hkl = ['111', '113', '008', '117', '200', '119', '028', '0014', '220',
'131', '1115', '0214', '317', '31Na', '2214', '040', '400']
pr4_tth = np.asarray(
[23.43, 25.16, 25.86, 32.62, 33.36, 37.67, 42.19, 46.11, 47.44, 53.18,
55.55, 57.72, 59.10, 59.27, 68.25, 68.71, 70.00]
)
pr2_tth, pr2int, pr2_hkl = parse_xrd_standard(
'/mnt/bulk-data/research_data/Pr2NiO4orthorhombicPDF#97-008-1577.txt')
pr2_tth = pr2_tth[pr2int > 5.]
prox_hkl = ['111', '200', '220', '311', '222', '400', '331', '420', '422',
'511', '440', '531', '600']
prox_tth = np.asarray(
[28.25, 32.74, 46.99, 55.71, 58.43, 68.59, 75.73, 78.08, 87.27,
94.12, 105.63, 112.90, 115.42]
)
standard_names = [
# 'NiO',
'Pr3Ni2O7',
'Pr2NiO4',
# 'Pr4'
'Pr6O11'
]
master_hkl = [
# niox_hkl,
pr3_hkl,
pr2_hkl,
# pr4_hkl
prox_hkl
]
master_tth = [
# niox_tth,
pr3_tth,
pr2_tth,
# pr4_tth
prox_tth
]
color_map = [
# 'red',
'blue',
'black',
'red'
]
line_style = ['--', '-.', ':', ]
ns = [1, 2, 3, 4, 5,
# 18, 20, 22, 16, 28, 29, 27, 26
]
# ns = [26]
ns.sort()
#
for i in ns:
legended_hkl = []
print(i)
folder = '/mnt/bulk-data/research_data/USC_beamtime/APS_March_2016/S' + str(
i) + '/temp_exp'
hdr = db(run_folder=folder)[0]
dm = DataMuxer()
dm.append_events(get_events(hdr))
df = dm.to_sparse_dataframe()
print(df.keys())
binned = dm.bin_on('img', interpolation={'T': 'linear'})
# key_list = [f for f in os.listdir(folder) if
# f.endswith('.gr') and not f.startswith('d')]
key_list = [f for f in os.listdir(folder) if
f.endswith('.chi') and not f.startswith('d') and f.strip(
'0.chi') != '' and int(
f.lstrip('0').strip('.chi')) % 2 == 1]
key_list.sort()
key_list = key_list[:-1]
# key_list2.sort()
idxs = [int(os.path.splitext(f)[0]) for f in key_list]
Ts = binned['T'].values[idxs]
output = os.path.splitext(key_list[0])[-1][1:]
if key_list[0].endswith('.gr'):
offset = .1
skr = 0
else:
skr = 8
offset = .001
data_list = [(np.loadtxt(os.path.join(folder, f),
skiprows=skr
)[:, 0],
np.loadtxt(os.path.join(folder, f),
skiprows=skr
)[:, 1])
for f
in key_list]
ylim_min = None
for xmax, length in zip(
[len(data_list[0][0]) - 1, len(data_list[0][0]) - 1],
['short', 'full']):
fig = plt.figure(figsize=(26, 12))
gs = gridspec.GridSpec(1, 2, width_ratios=[5, 1])
ax1 = plt.subplot(gs[0])
if length == 'short':
ax1.set_xlim(1.5, 4.5)
ax2 = plt.subplot(gs[1], sharey=ax1)
plt.setp(ax2.get_yticklabels(), visible=False)
cm = plt.get_cmap('viridis')
cNorm = colors.Normalize(vmin=0, vmax=len(key_list))
scalarMap = cmx.ScalarMappable(norm=cNorm, cmap=cm)
for idx in range(len(key_list)):
xnm, y = data_list[idx]
colorVal = scalarMap.to_rgba(idx)
if output == 'chi':
x = xnm / 10.
ax1.plot(x[:xmax], y[:xmax] + idx * offset,
color=colorVal)
ax2.plot(Ts[idx], y[-1] + idx * offset, marker='o',
color=colorVal)
if ylim_min is None or ylim_min > np.min(
y[:xmax + idx * offset]):
ylim_min = np.min(y[:xmax + idx * offset])
ax2.set_xticklabels([str(f) for f in ax2.get_xticks()],
rotation=90)
if output == 'gr':
bnds = ['O-Pr', 'O-Ni', 'Ni-Ni', 'Pr-Pr', 'Ni-Pr', 'O-Pr',
'O-Ni',
'Ni-Ni-Ni', 'Pr-Ni', 'Pr-Pr', 'Pr-Ni-O', 'Ni-Pr-Ni',
'Pr-Pr', 'Rs:Pr-Pr', 'Rs:Pr_Pr']
bnd_lens = [2.320, 1.955, 3.883, 3.765, 3.186, 2.771, 2.231,
7.767, 4.426, 6.649, 4.989, 5.404, 3.374, 3.910,
8.801]
# ax1.grid(True)
# ax2.grid(True)
for bnd, bnd_len in zip(bnds, bnd_lens):
ax1.axvline(bnd_len, color='grey', linestyle='--')
ax3 = ax1.twiny()
ax3.set_xticks(np.asarray(bnd_lens) / x[xmax])
ax3.set_xticklabels(bnds, rotation=90)
else:
std_axis = []
for n, hkls, tths, color, ls in zip(standard_names, master_hkl,
master_tth,
color_map, line_style):
std_axis.append(ax1.twiny())
ax3 = std_axis[-1]
hkl_q = np.pi * 4 * np.sin(np.deg2rad(tths / 2)) / lam
for k, (hkl, q) in enumerate(zip(hkls, hkl_q)):
if n not in legended_hkl:
ax1.axvline(q, color=color, linestyle=ls,
lw=2,
label=n
)
legended_hkl.append(n)
else:
ax1.axvline(q, color=color, linestyle=ls,
lw=2,
)
a = hkl_q > ax1.get_xlim()[0]
b = hkl_q < ax1.get_xlim()[1]
c = a & b
ax3.set_xticks(list((hkl_q[c] - ax1.get_xlim()[0]) / (
ax1.get_xlim()[1] - ax1.get_xlim()[0])
))
ax3.set_xticklabels(hkls, rotation=90, color=color)
ax2.set_xlabel('Temperature C')
if output == 'gr':
fig.suptitle('S{} PDF'.format(i))
ax1.set_xlabel(r"$r (\AA)$")
ax1.set_ylabel(r"$G (\AA^{-2})$")
elif output == 'chi':
fig.suptitle('S{} I(Q)'.format(i))
ax1.set_xlabel(r"$Q (\AA^{-1})$")
ax1.set_ylabel(r"$I (Q) $")
ax1.set_ylim(ylim_min)
ax1.legend()
gs.tight_layout(fig, rect=[0, 0, 1, .98], w_pad=1e-6)
if save:
fig.savefig(os.path.join('/mnt/bulk-data/Dropbox/',
'S{}_{}_output_{}.png'.format(
i, length, output)))
fig.savefig(os.path.join('/mnt/bulk-data/Dropbox/',
'S{}_{}_output_{}.eps'.format(
i, length, output)))
else:
plt.show()
| 2 | 2 |
winnow/core.py | bgschiller/winnow | 3 | 11172 | from __future__ import unicode_literals
import copy
import json
from six import string_types
from . import default_operators
from . import sql_prepare
from . import values
from .error import WinnowError
from .templating import SqlFragment
from .templating import WinnowSql
class Winnow(object):
"""
Winnow is a SQL query builder specifically designed for
powerful filtering on a table. It is designed to be
efficient and low-magic.
"""
# Take care here -- In order to avoid mucking up the parent's copy of this
# static value we have to deep copy it to every subclass.
_special_cases = {}
sql_class = WinnowSql
def __init__(self, table, sources):
self.table = table
self.sources = sources
self.sql = self.sql_class()
def prepare_query(self, *args, **kwargs):
"""
Proxy to self.sql
"""
return self.sql.prepare_query(*args, **kwargs)
def resolve(self, filt):
"""
Given a filter, resolve (expand) all it's clauses.
A resolved clause includes information about the
value type of the data source, and how to perform
queries against that data source.
return the modified filter.
"""
filt['logical_op'] = filt.get('logical_op', '&')
if filt['logical_op'] not in '&|':
raise WinnowError("Logical op must be one of &, |. Given: {}".format(
filt['logical_op']))
for ix in range(len(filt['filter_clauses'])):
filt['filter_clauses'][ix] = self.resolve_clause(
filt['filter_clauses'][ix])
return filt
def validate(self, filt):
"""
Make sure a filter is valid (resolves properly), but avoid bulking up
the json object (probably because it's about to go into the db, or
across the network)
"""
self.resolve(copy.deepcopy(filt))
return filt
def resolve_clause(self, filter_clause):
"""
Given a filter_clause, check that it's valid.
Return a dict-style filter_clause with a vivified
value field.
"""
if 'logical_op' in filter_clause:
# nested filter
return self.resolve(filter_clause)
ds, op = self.resolve_components(filter_clause)
value = self.vivify(op['value_type'], filter_clause['value'])
filter_clause['data_source_resolved'] = ds
filter_clause['operator_resolved'] = op
filter_clause['value_vivified'] = value
filter_clause['summary'] = self.summarize(filter_clause)
return filter_clause
def summarize(self, filter_clause):
ds = filter_clause['data_source_resolved']
op = filter_clause['operator_resolved']
value = filter_clause['value_vivified']
cvt = self.coalesce_value_type(op['value_type'])
value_string = value
operator_string = op.get('summary_template') or '{{data_source}} {} {{value}}'.format(op['name'])
if cvt == 'collection':
operator_string, value_string = self.summarize_collection(filter_clause)
elif cvt == 'relative_date':
value_string = value.replace('_', ' ')
elif cvt == 'numeric':
value_string = '{:,}'.format(value)
return operator_string.format(data_source=ds['display_name'], value=value_string)
@classmethod
def coalesce_value_type(cls, value_type):
for op in cls.operators:
if op['value_type'] == value_type:
return op.get('coalesced_value_type', value_type)
return value_type
@classmethod
def summarize_collection(cls, filter_clause):
value = filter_clause['value'] if isinstance(filter_clause['value'], list) else json.loads(filter_clause['value'])
operator_string = '{data_source} any of {value}' if len(value) != 1 else '{data_source} is {value}'
if not value:
value_string = '(none)'
else:
value_string = ', '.join(value)
return operator_string, value_string
@staticmethod
def empty_filter():
return dict(logial_op='&', filter_clauses=[])
@classmethod
def vivify(cls, value_type, value):
"""De-stringify <value> into <value_type>
Raises WinnowError if <value> is not well formatted for that type."""
cvt = cls.coalesce_value_type(value_type)
if cvt == 'string':
return values.vivify_string(value)
elif cvt == 'collection':
return values.vivify_collection(value)
elif cvt in ('numeric', 'string_length'):
return values.vivify_numeric(value)
elif cvt == 'relative_date':
return values.vivify_relative_date(value)
elif cvt == 'absolute_date':
return values.vivify_absolute_date(value)
elif cvt in ('bool', 'nullable'):
return values.vivify_bool(value)
elif cvt == 'single_choice':
return values.vivify_single_choice(value)
else:
raise WinnowError("Unknown value_type, '{}'".format(value_type))
@classmethod
def stringify(cls, value_type, value):
cvt = cls.coalesce_value_type(value_type)
if isinstance(value, string_types):
value = cls.vivify(value_type, value)
if cvt == 'string':
return values.stringify_string(value)
elif cvt == 'collection':
return values.stringify_collection(value)
elif cvt in ('numeric', 'string_length'):
return values.stringify_numeric(value)
elif cvt == 'relative_date':
return values.stringify_relative_date(value)
elif cvt == 'absolute_date':
return values.stringify_absolute_date(value)
elif cvt in ('bool', 'nullable'):
return values.stringify_bool(value)
elif cvt == 'single_choice':
return values.stringify_single_choice(value)
raise WinnowError("Unknown value_type, '{}'".format(value_type))
operators = default_operators.OPERATORS
def resolve_operator(self, op_name, value_types):
'''Given an operator name, return an Op object.
Raise an error if the operator is not found'''
if not isinstance(op_name, string_types):
raise WinnowError("Bad operator type, '{}'. expected string".format(type(op_name)))
op_name = op_name.lower()
matches = [op for op in self.operators
if op['name'].lower() == op_name and op['value_type'] in value_types]
if len(matches) == 0:
raise WinnowError("Unknown operator '{}'".format(op_name))
return matches.pop()
def resolve_source(self, source_name):
"""
Given a source name, return a resolved data source.
Raise an error if the source name is not allowable
"""
matches = [source for source in self.sources
if source['display_name'] == source_name]
if len(matches) == 0:
raise WinnowError("Unknown data source '{}'".format(source_name))
elif len(matches) > 1:
raise WinnowError("Ambiguous data source '{}'".format(source_name))
return matches.pop()
def resolve_components(self, clause):
source = self.resolve_source(clause['data_source'])
operator = self.resolve_operator(clause['operator'],
source['value_types'])
return source, operator
def query(self, filt):
return self.prepare_query(
"SELECT * FROM {{ table | sqlsafe }} WHERE {{ condition }}",
table=self.table,
condition=self.where_clauses(filt))
def strip(self, filt):
"""
Perform the opposite of resolving a filter.
"""
for k in ('data_source_resolved', 'operator_resolved', 'value_vivified'):
filt.pop(k, None)
if 'filter_clauses' in filt:
filt['filter_clauses'] = [self.strip(f) for f in filt['filter_clauses']]
return filt
def where_clauses(self, filt):
'''
Apply a user filter.
Returns a paren-wrapped WHERE clause suitable for using
in a SELECT statement on the opportunity table.
'''
if not filt['filter_clauses']:
return True
filt = self.resolve(filt)
where_clauses = []
for clause in filt['filter_clauses']:
if 'logical_op' in clause:
# nested filter
where_clauses.append(self.where_clauses(clause))
elif 'data_source_resolved' in clause:
where_clauses.append(self._dispatch_clause(clause))
else:
# I don't expect to ever get here, because we should hit this
# issue when we call `filt = self.resolve(filt)`
raise WinnowError("Somehow, this is neither a nested filter, nor a resolved clause")
if not where_clauses:
return True
sep = '\nAND \n ' if filt['logical_op'] == '&' else '\nOR \n '
self.strip(filt)
sql_frag = SqlFragment.join(sep, where_clauses)
sql_frag.query = '(' + sql_frag.query + ')'
return sql_frag
def _dispatch_clause(self, clause):
"""
Evaluates whether a clause is standard, special, or custom
and calls the appropriate specialization function.
Each specialization returns a paren-wrapped WHERE clause, to be AND'd or OR'd
together to produce a final clause."""
for k in ('data_source_resolved', 'operator_resolved', 'value_vivified'):
if k not in clause:
raise WinnowError('failed to resolve component: {}'.format(k))
op = clause['operator_resolved']
special_handler = self.special_case_handler(
source_name=clause['data_source'],
value_type=op['value_type'])
if special_handler is not None:
return special_handler(self, clause)
return self._default_clause(clause)
def where_clause(self, data_source, operator, value):
return sql_prepare.where_clause(data_source['column'], operator, value)
def _default_clause(self, clause):
"""
Given a filter_clause, convert it to a WHERE clause
"""
ds = clause['data_source_resolved']
op = clause['operator_resolved']
value = clause['value_vivified']
return self.where_clause(ds, op, value)
@classmethod
def special_case(cls, source_name, *value_types):
"""
Register a special case handler. A special case handler is a function s:
s(Winnow(), clause) -> WHERE clause string
"""
if cls._special_cases is getattr(super(cls, cls), '_special_cases', None):
raise RuntimeError('Please define your own _special_cases dict, so as to avoid modifying your parent. '
'Note to self: come up with a more durable way to handle this.')
# ideas:
# proxy the _special_cases as the union of own and parent's version.
def decorator(func):
"""
Register a function in the handler table.
"""
for value_type in value_types:
if (source_name, value_type) in cls._special_cases:
raise WinnowError("Conflicting handlers registered for ({},{}): {} and {}".format(
value_type, source_name,
cls._special_cases[(source_name, value_type)].__name__, func.__name__))
cls._special_cases[(source_name, value_type)] = func
return func
return decorator
def special_case_handler(self, source_name, value_type):
"""
Check if a given value_type, source_name pair has
a special case handler.
:return: A function handler for that case accepting
the winnow instance and the clause.
"""
return self._special_cases.get((source_name, value_type))
| 2.359375 | 2 |
ibis/bigquery/client.py | tswast/ibis | 0 | 11173 | import regex as re
import time
import collections
import datetime
import six
import pandas as pd
import google.cloud.bigquery as bq
from multipledispatch import Dispatcher
import ibis
import ibis.common as com
import ibis.expr.operations as ops
import ibis.expr.types as ir
import ibis.expr.schema as sch
import ibis.expr.datatypes as dt
import ibis.expr.lineage as lin
from ibis.compat import parse_version
from ibis.client import Database, Query, SQLClient
from ibis.bigquery import compiler as comp
from google.api.core.exceptions import BadRequest
NATIVE_PARTITION_COL = '_PARTITIONTIME'
def _ensure_split(table_id, dataset_id):
split = table_id.split('.')
if len(split) > 1:
assert len(split) == 2
if dataset_id:
raise ValueError(
"Can't pass a fully qualified table name *AND* a dataset_id"
)
(dataset_id, table_id) = split
return (table_id, dataset_id)
_IBIS_TYPE_TO_DTYPE = {
'string': 'STRING',
'int64': 'INT64',
'double': 'FLOAT64',
'boolean': 'BOOL',
'timestamp': 'TIMESTAMP',
'date': 'DATE',
}
_DTYPE_TO_IBIS_TYPE = {
'INT64': dt.int64,
'FLOAT64': dt.double,
'BOOL': dt.boolean,
'STRING': dt.string,
'DATE': dt.date,
# FIXME: enforce no tz info
'DATETIME': dt.timestamp,
'TIME': dt.time,
'TIMESTAMP': dt.timestamp,
'BYTES': dt.binary,
}
_LEGACY_TO_STANDARD = {
'INTEGER': 'INT64',
'FLOAT': 'FLOAT64',
'BOOLEAN': 'BOOL',
}
@dt.dtype.register(bq.schema.SchemaField)
def bigquery_field_to_ibis_dtype(field):
typ = field.field_type
if typ == 'RECORD':
fields = field.fields
assert fields
names = [el.name for el in fields]
ibis_types = list(map(dt.dtype, fields))
ibis_type = dt.Struct(names, ibis_types)
else:
ibis_type = _LEGACY_TO_STANDARD.get(typ, typ)
ibis_type = _DTYPE_TO_IBIS_TYPE.get(ibis_type, ibis_type)
if field.mode == 'REPEATED':
ibis_type = dt.Array(ibis_type)
return ibis_type
@sch.infer.register(bq.table.Table)
def bigquery_schema(table):
pairs = [(el.name, dt.dtype(el)) for el in table.schema]
try:
if table.list_partitions():
pairs.append((NATIVE_PARTITION_COL, dt.timestamp))
except BadRequest:
pass
return sch.schema(pairs)
class BigQueryCursor(object):
"""Cursor to allow the BigQuery client to reuse machinery in ibis/client.py
"""
def __init__(self, query):
self.query = query
def fetchall(self):
return list(self.query.fetch_data())
@property
def columns(self):
return [field.name for field in self.query.schema]
def __enter__(self):
# For compatibility when constructed from Query.execute()
return self
def __exit__(self, exc_type, exc_value, traceback):
pass
def _find_scalar_parameter(expr):
""":func:`~ibis.expr.lineage.traverse` function to find all
:class:`~ibis.expr.types.ScalarParameter` instances and yield the operation
and the parent expresssion's resolved name.
Parameters
----------
expr : ibis.expr.types.Expr
Returns
-------
Tuple[bool, object]
"""
op = expr.op()
if isinstance(op, ops.ScalarParameter):
result = op, expr.get_name()
else:
result = None
return lin.proceed, result
class BigQueryQuery(Query):
def __init__(self, client, ddl, query_parameters=None):
super(BigQueryQuery, self).__init__(client, ddl)
# self.expr comes from the parent class
query_parameter_names = dict(
lin.traverse(_find_scalar_parameter, self.expr))
self.query_parameters = [
bigquery_param(
param.to_expr().name(query_parameter_names[param]), value
) for param, value in (query_parameters or {}).items()
]
def _fetch(self, cursor):
df = pd.DataFrame(cursor.fetchall(), columns=cursor.columns)
return self.schema().apply_to(df)
def execute(self):
# synchronous by default
with self.client._execute(
self.compiled_sql,
results=True,
query_parameters=self.query_parameters
) as cur:
result = self._fetch(cur)
return self._wrap_result(result)
class BigQueryAPIProxy(object):
def __init__(self, project_id):
self._client = bq.Client(project_id)
@property
def client(self):
return self._client
@property
def project_id(self):
return self.client.project
def get_datasets(self):
return list(self.client.list_datasets())
def get_dataset(self, dataset_id):
return self.client.dataset(dataset_id)
def get_table(self, table_id, dataset_id, reload=True):
(table_id, dataset_id) = _ensure_split(table_id, dataset_id)
table = self.client.dataset(dataset_id).table(table_id)
if reload:
table.reload()
return table
def get_schema(self, table_id, dataset_id):
return self.get_table(table_id, dataset_id).schema
def run_sync_query(self, stmt):
query = self.client.run_sync_query(stmt)
query.use_legacy_sql = False
query.run()
# run_sync_query is not really synchronous: there's a timeout
while not query.job.done():
query.job.reload()
time.sleep(0.1)
return query
class BigQueryDatabase(Database):
pass
bigquery_param = Dispatcher('bigquery_param')
@bigquery_param.register(ir.StructScalar, collections.OrderedDict)
def bq_param_struct(param, value):
field_params = [bigquery_param(param[k], v) for k, v in value.items()]
return bq.StructQueryParameter(param.get_name(), *field_params)
@bigquery_param.register(ir.ArrayValue, list)
def bq_param_array(param, value):
param_type = param.type()
assert isinstance(param_type, dt.Array), str(param_type)
try:
bigquery_type = _IBIS_TYPE_TO_DTYPE[str(param_type.value_type)]
except KeyError:
raise com.UnsupportedBackendType(param_type)
else:
return bq.ArrayQueryParameter(param.get_name(), bigquery_type, value)
@bigquery_param.register(
ir.TimestampScalar,
six.string_types + (datetime.datetime, datetime.date)
)
def bq_param_timestamp(param, value):
assert isinstance(param.type(), dt.Timestamp)
# TODO(phillipc): Not sure if this is the correct way to do this.
timestamp_value = pd.Timestamp(value, tz='UTC').to_pydatetime()
return bq.ScalarQueryParameter(
param.get_name(), 'TIMESTAMP', timestamp_value)
@bigquery_param.register(ir.StringScalar, six.string_types)
def bq_param_string(param, value):
return bq.ScalarQueryParameter(param.get_name(), 'STRING', value)
@bigquery_param.register(ir.IntegerScalar, six.integer_types)
def bq_param_integer(param, value):
return bq.ScalarQueryParameter(param.get_name(), 'INT64', value)
@bigquery_param.register(ir.FloatingScalar, float)
def bq_param_double(param, value):
return bq.ScalarQueryParameter(param.get_name(), 'FLOAT64', value)
@bigquery_param.register(ir.BooleanScalar, bool)
def bq_param_boolean(param, value):
return bq.ScalarQueryParameter(param.get_name(), 'BOOL', value)
@bigquery_param.register(ir.DateScalar, six.string_types)
def bq_param_date_string(param, value):
return bigquery_param(param, pd.Timestamp(value).to_pydatetime().date())
@bigquery_param.register(ir.DateScalar, datetime.datetime)
def bq_param_date_datetime(param, value):
return bigquery_param(param, value.date())
@bigquery_param.register(ir.DateScalar, datetime.date)
def bq_param_date(param, value):
return bq.ScalarQueryParameter(param.get_name(), 'DATE', value)
class BigQueryClient(SQLClient):
sync_query = BigQueryQuery
database_class = BigQueryDatabase
proxy_class = BigQueryAPIProxy
dialect = comp.BigQueryDialect
def __init__(self, project_id, dataset_id):
self._proxy = type(self).proxy_class(project_id)
self._dataset_id = dataset_id
@property
def project_id(self):
return self._proxy.project_id
@property
def dataset_id(self):
return self._dataset_id
@property
def _table_expr_klass(self):
return ir.TableExpr
def table(self, *args, **kwargs):
t = super(BigQueryClient, self).table(*args, **kwargs)
if NATIVE_PARTITION_COL in t.columns:
col = ibis.options.bigquery.partition_col
assert col not in t
return (t
.mutate(**{col: t[NATIVE_PARTITION_COL]})
.drop([NATIVE_PARTITION_COL]))
return t
def _build_ast(self, expr, context):
result = comp.build_ast(expr, context)
return result
def _execute_query(self, dml, async=False):
klass = self.async_query if async else self.sync_query
inst = klass(self, dml, query_parameters=dml.context.params)
df = inst.execute()
return df
def _fully_qualified_name(self, name, database):
dataset_id = database or self.dataset_id
return dataset_id + '.' + name
def _get_table_schema(self, qualified_name):
return self.get_schema(qualified_name)
def _execute(self, stmt, results=True, query_parameters=None):
# TODO(phillipc): Allow **kwargs in calls to execute
query = self._proxy.client.run_sync_query(stmt)
query.use_legacy_sql = False
query.query_parameters = query_parameters or []
query.run()
# run_sync_query is not really synchronous: there's a timeout
while not query.job.done():
query.job.reload()
time.sleep(0.1)
return BigQueryCursor(query)
def database(self, name=None):
if name is None:
name = self.dataset_id
return self.database_class(name, self)
@property
def current_database(self):
return self.database(self.dataset_id)
def set_database(self, name):
self._dataset_id = name
def exists_database(self, name):
return self._proxy.get_dataset(name).exists()
def list_databases(self, like=None):
results = [dataset.name
for dataset in self._proxy.get_datasets()]
if like:
results = [
dataset_name for dataset_name in results
if re.match(like, dataset_name)
]
return results
def exists_table(self, name, database=None):
(table_id, dataset_id) = _ensure_split(name, database)
return self._proxy.get_table(table_id, dataset_id).exists()
def list_tables(self, like=None, database=None):
dataset = self._proxy.get_dataset(database or self.dataset_id)
result = [table.name for table in dataset.list_tables()]
if like:
result = [
table_name for table_name in result
if re.match(like, table_name)
]
return result
def get_schema(self, name, database=None):
(table_id, dataset_id) = _ensure_split(name, database)
bq_table = self._proxy.get_table(table_id, dataset_id)
return sch.infer(bq_table)
@property
def version(self):
return parse_version(bq.__version__)
_DTYPE_TO_IBIS_TYPE = {
'INT64': dt.int64,
'FLOAT64': dt.double,
'BOOL': dt.boolean,
'STRING': dt.string,
'DATE': dt.date,
# FIXME: enforce no tz info
'DATETIME': dt.timestamp,
'TIME': dt.time,
'TIMESTAMP': dt.timestamp,
'BYTES': dt.binary,
}
_LEGACY_TO_STANDARD = {
'INTEGER': 'INT64',
'FLOAT': 'FLOAT64',
'BOOLEAN': 'BOOL',
}
def _discover_type(field):
typ = field.field_type
if typ == 'RECORD':
fields = field.fields
assert fields
names = [el.name for el in fields]
ibis_types = [_discover_type(el) for el in fields]
ibis_type = dt.Struct(names, ibis_types)
else:
ibis_type = _LEGACY_TO_STANDARD.get(typ, typ)
ibis_type = _DTYPE_TO_IBIS_TYPE.get(ibis_type, ibis_type)
if field.mode == 'REPEATED':
ibis_type = dt.Array(ibis_type)
return ibis_type
def bigquery_table_to_ibis_schema(table):
pairs = [(el.name, _discover_type(el)) for el in table.schema]
try:
if table.list_partitions():
pairs.append((NATIVE_PARTITION_COL, dt.timestamp))
except BadRequest:
pass
return ibis.schema(pairs)
| 1.992188 | 2 |
5 kyu/Family Tree Ancestors.py | mwk0408/codewars_solutions | 6 | 11174 | <filename>5 kyu/Family Tree Ancestors.py<gh_stars>1-10
from math import log, ceil
def chart(person):
res=helper(tuple(sorted(person.parents(), key=lambda x: x.sex, reverse=True)), 2, [], 16, 16)
res.append((person.name, 16))
dict={j:i for i,j in res}
dict2=helper2(16)
chart=[]
for i in range(1, 32):
temp=((4-depth(i))*11-1)*" "+("|" if 4-depth(i)!=0 else "")
name=dict.get(i, "_______")
number=str(dict2[i]).rjust(2, "0")
temp+=f"{number} {name}"
chart.append(list(temp))
for index,i in enumerate(chart):
digits=int("".join(j for j in "".join(i) if j.isdigit()))
num=5-ceil(log(digits+1, 2))
if digits==1 or num==0:
continue
for k in range(1, 2**num):
chart[index+(-k if digits%2 else k)][44-num*11-1]="|"
chart[16][32]=" "
chart[14][32]=" "
return "\n".join("".join(i) for i in chart)+"\n"
def helper(person, index, arr, row, rate):
if person==None or index>31:
return
rate//=2
for i,j in enumerate(person):
if (index+i)<=31:
arr.append((j.name, row-rate if j.sex=="M" else row+rate))
helper(tuple(sorted(j.parents(), key=lambda x: x.sex, reverse=True)), (index+i)*2, arr, row-rate if j.sex=="M" else row+rate, rate)
return arr
def depth(num):
total=0
while num%2==0:
num//=2
total+=1
return total
def helper2(num):
start=0
dict={}
while num>0:
increment=2**(start+1)
for i in range(num):
dict[2**start+i*increment]=num+i
start+=1
num//=2
return dict | 3.03125 | 3 |
tests/make_expected_lookup.py | bfis/coffea | 77 | 11175 | import numpy as np
import ROOT
from dummy_distributions import dummy_pt_eta
counts, test_in1, test_in2 = dummy_pt_eta()
f = ROOT.TFile.Open("samples/testSF2d.root")
sf = f.Get("scalefactors_Tight_Electron")
xmin, xmax = sf.GetXaxis().GetXmin(), sf.GetXaxis().GetXmax()
ymin, ymax = sf.GetYaxis().GetXmin(), sf.GetYaxis().GetXmax()
test_out = np.empty_like(test_in1)
for i, (eta, pt) in enumerate(zip(test_in1, test_in2)):
if xmax <= eta:
eta = xmax - 1.0e-5
elif eta < xmin:
eta = xmin
if ymax <= pt:
pt = ymax - 1.0e-5
elif pt < ymin:
pt = ymin
ib = sf.FindBin(eta, pt)
test_out[i] = sf.GetBinContent(ib)
print(repr(test_out))
| 1.851563 | 2 |
engine/sentiment_analysis.py | zgeorg03/nesase | 2 | 11176 | <filename>engine/sentiment_analysis.py
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Mar 14 17:42:27 2018
@author: zgeorg03
"""
import re
import json # Used for converting json to dictionary
import datetime # Used for date conversions
import matplotlib.pyplot as plt
import numpy as np
from sentiment import Sentiment
import json
class NewsArticle:
def __init__(self,hash,title,author,url,content,date,topics, feed):
self.hash = hash
self.title = title
self.author = author
self.url = url
self.content = content
self.date = datetime.datetime.fromtimestamp(date/1000.0)
self.topics = topics
self.feed = feed
self.sep = re.compile("[.!?]")
def __repr__(self):
return "hash={},title={},author={},date={},topics={}".format(
self.hash, self.title, self.author,
self.date, self.topics, self.feed)
def __str__(self):
return self.__repr__()
def produce_title_scores(self, sentiment):
lines = self.sep.split(self.title)
sentiment.score(lines)
neg,neu,pos,com,count = sentiment.get_avg_scores()
return (float("{0:.2f}".format(neg*100)), float("{0:.2f}".format(neu*100))
, float("{0:.2f}".format(pos*100)), float("{0:.2f}".format(com*100)),count
)
def produce_content_scores(self, sentiment):
lines = self.sep.split(self.content)
sentiment.score(lines)
neg,neu,pos,com,count = sentiment.get_avg_scores()
return (float("{0:.2f}".format(neg*100)), float("{0:.2f}".format(neu*100))
, float("{0:.2f}".format(pos*100)), float("{0:.2f}".format(com*100)),count
)
class Parser:
def __init__(self,file_in,max_articles=None,file_out=None):
self.file_name = file_name
self.max_articles = max_articles
self.articles = []
self.sentiment = Sentiment()
self.results = []
self.file_out = file_out
def parse(self):
count = 0
with open(self.file_name,"r",encoding="UTF-8") as file:
for line in file:
if line.startswith(','):
continue
self.articles.append(self.parse_news_article(line))
count += 1
if self.max_articles:
if count >= self.max_articles:
break
def write(self):
for i,article in enumerate(self.articles):
if i % 100 == 0:
print('Finished: {} docs'.format(i))
self.write_article(article)
if self.file_out:
with open(self.file_out, 'w') as outfile:
json.dump(self.results, outfile,sort_keys=True,indent=4)
else:
print(json.dumps(self.results,sort_keys=True,indent=4))
def write_article(self,article):
res = {}
res['neg_title'],res['neu_title'],res['pos_title'],res['score_title'], _ = article.produce_title_scores(self.sentiment)
res['neg_content'],res['neu_content'],res['pos_content'],res['score_content'], _ = article.produce_content_scores(self.sentiment)
res['id'] = article.hash
res['title'] = article.title
res['date'] = int(article.date.timestamp())
res['content'] = article.content
res['topics'] = article.topics
res['feed'] = article.feed
res['url'] = article.url
res['author'] = article.author
res['overall_score']= float(res['score_title'])*0.75 + float(res['score_content'])*0.25
overall_score = res['overall_score']
if overall_score <= -50:
res['class']= 'Very Negative'
res['class_code'] = 4
elif overall_score <= 0:
res['class']= 'Negative'
res['class_code'] = 3
elif overall_score <= 50:
res['class']= 'Positive'
res['class_code'] = 2
elif overall_score <= 100:
res['class']= 'Very Positive'
res['class_code'] = 1
self.results.append(res)
def parse_news_article(self, line):
data = json.loads(line)
hash = data['hash']
title = data['title']
author = data['author']
content = data['content']
date = data['date']
topics = list(set(data['topics']))
feed = data['feed']
url = data['link']
return NewsArticle(hash,title,author,url,content,date,topics,feed)
if __name__ == '__main__':
file_name = "./log"
#max_articles = 1000
p = Parser(file_name,file_out='data-26-04.json')
p.parse()
p.write()
print('Finished')
def test():
plt.figure(figsize=(12,9))
plt.title('Articles: {}'.format(max_articles))
plt.plot(x[:,0],'x',label="Negative {0:.2f}".format(np.average(x[:,0])))
plt.plot(x[:,2],'+',label="Positive {0:.2f}".format(np.average(x[:,2])))
plt.plot(x[:,1],'.',label="Neutral {0:.2f}".format(np.average(x[:,1])))
plt.plot(x[:,3],'.',label="Compound {0:.2f}".format(np.average(x[:,3])))
plt.legend()
x = []
for i in range(0,max_articles):
x.append(articles[i].produce_content_scores(sentiment))
x = np.array(x)
print(x[:,0])
plt.figure(figsize=(12,9))
plt.title('Articles: {}'.format(max_articles))
plt.plot(x[:,0],'x',label="Negative {0:.2f}".format(np.average(x[:,0])))
plt.plot(x[:,2],'+',label="Positive {0:.2f}".format(np.average(x[:,2])))
plt.plot(x[:,1],'.',label="Neutral {0:.2f}".format(np.average(x[:,1])))
plt.plot(x[:,3],'.',label="Compound {0:.2f}".format(np.average(x[:,3])))
plt.legend()
| 2.75 | 3 |
ch05/recursion.py | laszlokiraly/LearningAlgorithms | 74 | 11177 | """Recursive implementations."""
def find_max(A):
"""invoke recursive function to find maximum value in A."""
def rmax(lo, hi):
"""Use recursion to find maximum value in A[lo:hi+1]."""
if lo == hi: return A[lo]
mid = (lo+hi) // 2
L = rmax(lo, mid)
R = rmax(mid+1, hi)
return max(L, R)
return rmax(0, len(A)-1)
def find_max_with_count(A):
"""Count number of comparisons."""
def frmax(lo, hi):
"""Use recursion to find maximum value in A[lo:hi+1] incl. count"""
if lo == hi: return (0, A[lo])
mid = (lo+hi)//2
ctleft,left = frmax(lo, mid)
ctright,right = frmax(mid+1, hi)
return (1+ctleft+ctright, max(left, right))
return frmax(0, len(A)-1)
def count(A,target):
"""invoke recursive function to return number of times target appears in A."""
def rcount(lo, hi, target):
"""Use recursion to find maximum value in A[lo:hi+1]."""
if lo == hi:
return 1 if A[lo] == target else 0
mid = (lo+hi)//2
left = rcount(lo, mid, target)
right = rcount(mid+1, hi, target)
return left + right
return rcount(0, len(A)-1, target)
| 4.03125 | 4 |
setup.py | koonimaru/DeepGMAP | 11 | 11178 | #from distutils.core import setup
from setuptools import setup, find_packages
from distutils.extension import Extension
import re
import os
import codecs
here = os.path.abspath(os.path.dirname(__file__))
def read(*parts):
# intentionally *not* adding an encoding option to open, See:
# https://github.com/pypa/virtualenv/issues/201#issuecomment-3145690
with codecs.open(os.path.join(here, *parts), 'r') as fp:
return fp.read()
def find_version(*file_paths):
version_file = read(*file_paths)
version_match = re.search(
r"^__version__ = ['\"]([^'\"]*)['\"]",
version_file,
re.M,
)
if version_match:
return version_match.group(1)
raise RuntimeError("Unable to find version string.")
try:
from Cython.Distutils import build_ext
except ImportError:
use_cython = False
else:
use_cython = True
cmdclass = { }
ext_modules = [ ]
if use_cython:
ext_modules += [
Extension("deepgmap.data_preprocessing_tools.seq_to_binary2", [ "deepgmap/data_preprocessing_tools/seq_to_binary2.pyx" ]),
#Extension("data_preprocessing_tools.queue", [ "deepgmap/data_preprocessing_tools/queue.pyx" ],libraries=["calg"]),
Extension("deepgmap.post_train_tools.cython_util", [ "deepgmap/post_train_tools/cython_util.pyx" ]),
]
cmdclass.update({ 'build_ext': build_ext })
else:
ext_modules += [
Extension("deepgmap.data_preprocessing_tools.seq_to_binary2", [ "deepgmap/data_preprocessing_tools/seq_to_binary2.c" ]),
Extension("deepgmap.post_train_tools.cython_util", [ "deepgmap/post_train_tools/cython_util.c" ]),
]
#print(find_version("deepgmap", "__init__.py"))
setup(
name='DeepGMAP',
#version=VERSION,
version=find_version("deepgmap", "__init__.py"),
description='Learning and predicting gene regulatory sequences in genomes',
author='<NAME>',
author_email='<EMAIL>',
url='',
packages=['deepgmap','deepgmap.train','deepgmap.network_constructors','deepgmap.post_train_tools','deepgmap.data_preprocessing_tools','deepgmap.misc'],
#packages=find_packages('deepgmap'),
#packages=['deepgmap.'],
package_dir={'DeepGMAP':'deepgmap'},
#package_data = {
# '': ['enhancer_prediction/*', '*.pyx', '*.pxd', '*.c', '*.h'],
#},
scripts=['bin/deepgmap',
],
#packages=find_packages(),
cmdclass = cmdclass,
ext_modules=ext_modules,
classifiers=[
'Development Status :: 3 - Alpha',
'Environment :: Console',
'Intended Audience :: Developers',
'Programming Language :: Python :: 3.6',
'License :: OSI Approved :: Apache Software License ',
'Operating System :: POSIX :: Linux',
'Topic :: Scientific/Engineering :: Bio-Informatics',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
],
install_requires=['tensorflow>=1.15', 'numpy', 'matplotlib', 'sklearn', 'tornado', 'natsort', 'psutil', 'pyBigWig'],
long_description=open('README.rst').read(),
)
| 1.96875 | 2 |
{{cookiecutter.project_slug}}/api/__init__.py | Steamboat/cookiecutter-devops | 0 | 11179 |
import logging
from flask import Flask
from flask_sqlalchemy import SQLAlchemy as _BaseSQLAlchemy
from flask_migrate import Migrate
from flask_cors import CORS
from flask_talisman import Talisman
from flask_ipban import IpBan
from config import Config, get_logger_handler
# database
class SQLAlchemy(_BaseSQLAlchemy):
def apply_pool_defaults(self, app, options):
super(SQLAlchemy, self).apply_pool_defaults(app, options)
options["pool_pre_ping"] = True
db = SQLAlchemy()
migrate = Migrate()
cors = CORS()
talisman = Talisman()
global_config = Config()
ip_ban = IpBan(ban_seconds=200, ban_count=global_config.IP_BAN_LIST_COUNT)
# logging
logger = logging.getLogger('frontend')
def create_app(config_class=None):
app = Flask(__name__)
if config_class is None:
config_class = Config()
app.config.from_object(config_class)
db.init_app(app)
migrate.init_app(app, db)
# TODO - Refine and update when build pipeline is stable. Get from global_config
cors.init_app(app, origins=["http://localhost:5000", "http://localhost:3000", '*'])
if app.config["ENV"] in ("staging", "production"):
# Secure the application and implement best practice https redirects and a content security policy
talisman.init_app(app, content_security_policy=None)
# ip_ban.init_app(app)
# ip_ban.load_nuisances(global_config.IP_BAN_REGEX_FILE)
from api.routes import bp as api_bp
app.register_blueprint(api_bp)
if not app.debug and not app.testing:
app.logger.addHandler(get_logger_handler())
@app.teardown_appcontext
def shutdown_session(exception=None):
db.session.remove()
return app
from api import models
| 2.125 | 2 |
weibospider/pipelines.py | czyczyyzc/WeiboSpider | 2 | 11180 | <filename>weibospider/pipelines.py
# -*- coding: utf-8 -*-
import os
import csv
import pymongo
from pymongo.errors import DuplicateKeyError
from settings import MONGO_HOST, MONGO_PORT, SAVE_ROOT
class MongoDBPipeline(object):
def __init__(self):
client = pymongo.MongoClient(MONGO_HOST, MONGO_PORT)
db = client['weibo']
self.Users = db["Users"]
self.Tweets = db["Tweets"]
self.Comments = db["Comments"]
self.Relationships = db["Relationships"]
self.Reposts = db["Reposts"]
def process_item(self, item, spider):
if spider.name == 'comment_spider':
self.insert_item(self.Comments, item)
elif spider.name == 'fan_spider':
self.insert_item(self.Relationships, item)
elif spider.name == 'follower_spider':
self.insert_item(self.Relationships, item)
elif spider.name == 'user_spider':
self.insert_item(self.Users, item)
elif spider.name == 'tweet_spider':
self.insert_item(self.Tweets, item)
elif spider.name == 'repost_spider':
self.insert_item(self.Reposts, item)
return item
@staticmethod
def insert_item(collection, item):
try:
collection.insert(dict(item))
except DuplicateKeyError:
pass
class CSVPipeline(object):
def __init__(self):
if not os.path.exists(SAVE_ROOT):
os.makedirs(SAVE_ROOT)
users_file = open(os.path.join(SAVE_ROOT, 'users.csv'), 'w', encoding='utf-8-sig', newline='')
tweets_file = open(os.path.join(SAVE_ROOT, 'tweets.csv'), 'w', encoding='utf-8-sig', newline='')
comments_file = open(os.path.join(SAVE_ROOT, 'comments.csv'), 'w', encoding='utf-8-sig', newline='')
relationships_file = open(os.path.join(SAVE_ROOT, 'relationships.csv'), 'w', encoding='utf-8-sig', newline='')
reposts_file = open(os.path.join(SAVE_ROOT, 'reposts.csv'), 'w', encoding='utf-8-sig', newline='')
self.users_writer = csv.writer(users_file, dialect='excel')
self.tweets_writer = csv.writer(tweets_file, dialect='excel')
self.comments_writer = csv.writer(comments_file, dialect='excel')
self.relationships_writer = csv.writer(relationships_file, dialect='excel')
self.reposts_writer = csv.writer(reposts_file, dialect='excel')
self.users_head = False
self.tweets_head = False
self.comments_head = False
self.relationships_head = False
self.reposts_head = False
self.users_ids = []
self.tweets_ids = []
self.comments_ids = []
self.relationships_ids = []
self.reposts_ids = []
def process_item(self, item, spider):
item = dict(item)
if spider.name == 'comment_spider':
if not self.comments_head:
self.comments_writer.writerow(list(item.keys()))
self.comments_head = True
# if item['_id'] not in self.comments_ids:
self.comments_writer.writerow(list(item.values()))
self.comments_ids.append(item['_id'])
elif spider.name == 'fan_spider':
if not self.relationships_head:
self.relationships_writer.writerow(list(item.keys()))
self.relationships_head = True
# if item['_id'] not in self.relationships_ids:
self.relationships_writer.writerow(list(item.values()))
self.relationships_ids.append(item['_id'])
elif spider.name == 'follower_spider':
if not self.relationships_head:
self.relationships_writer.writerow(list(item.keys()))
self.relationships_head = True
# if item['_id'] not in self.relationships_ids:
self.relationships_writer.writerow(list(item.values()))
self.relationships_ids.append(item['_id'])
elif spider.name == 'user_spider':
if not self.users_head:
self.users_writer.writerow(list(item.keys()))
self.users_head = True
# if item['_id'] not in self.users_ids:
self.users_writer.writerow(list(item.values()))
self.users_ids.append(item['_id'])
elif spider.name == 'tweet_spider':
if not self.tweets_head:
self.tweets_writer.writerow(list(item.keys()))
self.tweets_head = True
# if item['_id'] not in self.tweets_ids:
self.tweets_writer.writerow(list(item.values()))
self.tweets_ids.append(item['_id'])
elif spider.name == 'repost_spider':
if not self.reposts_head:
self.reposts_writer.writerow(list(item.keys()))
self.reposts_head = True
# if item['_id'] not in self.reposts_ids:
self.reposts_writer.writerow(list(item.values()))
self.reposts_ids.append(item['_id'])
return item
| 2.6875 | 3 |
tests/test_gc3_config.py | ericmharris/gc3-query | 0 | 11181 | from pathlib import Path
from requests.auth import _basic_auth_str
import pytest
from bravado_core.formatter import SwaggerFormat, NO_OP
from gc3_query.lib.gc3_config import GC3Config, IDMCredential
TEST_BASE_DIR: Path = Path(__file__).parent.joinpath("GC3Config")
config_dir = TEST_BASE_DIR.joinpath("config")
def test_setup():
assert TEST_BASE_DIR.exists()
assert config_dir.exists()
def test_init():
gc3_config = GC3Config()
assert 'gc30003' in gc3_config['idm']['domains']
assert gc3_config.user.cloud_username == '<EMAIL>'
def test_set_credential():
gc3_config = GC3Config()
assert 'gc3test' in gc3_config['idm']['domains']
assert gc3_config.user.cloud_username == '<EMAIL>'
credential = gc3_config.set_credential(idm_domain_name='gc3test', password='<PASSWORD>' )
assert credential
assert credential.password == '<PASSWORD>'
assert credential.idm_domain_name == 'gc3test'
def test_set_gc3pilot_credential():
gc3_config = GC3Config()
assert 'gc3pilot' in gc3_config['idm']['domains']
assert gc3_config.user.cloud_username == '<EMAIL>'
credential = gc3_config.set_credential(idm_domain_name='gc3pilot', password='<PASSWORD>!' )
assert credential
assert credential.password == '<PASSWORD>!'
assert credential.idm_domain_name == 'gc3pilot'
@pytest.fixture()
def get_credential_setup() -> IDMCredential:
gc3_config = GC3Config()
assert 'gc3test' in gc3_config['idm']['domains']
assert gc3_config.user.cloud_username == '<EMAIL>'
credential = gc3_config.set_credential(idm_domain_name='gc3test', password='<PASSWORD>' )
yield (credential)
def test_load_atoml_files_individually(get_credential_setup):
credential = get_credential_setup
gc3_config = GC3Config()
assert 'gc3test' in gc3_config['idm']['domains']
assert gc3_config.user.cloud_username == '<EMAIL>'
check_credential = gc3_config.get_credential(idm_domain_name='gc3test')
assert check_credential==credential
def test_credential_basic_auth(get_credential_setup):
credential = get_credential_setup
credential_expected_basic_auth =_basic_auth_str('<EMAIL>', '<PASSWORD>')
gc3_config = GC3Config()
check_credential = gc3_config.get_credential(idm_domain_name='gc30003')
assert gc3_config.user.cloud_username == '<EMAIL>'
assert check_credential.idm_domain_name=='gc30003'
assert check_credential.basic_auth_str.startswith('Basic')
assert check_credential.basic_auth_str != credential.basic_auth_str
def test_get_main_credential():
gc3_config = GC3Config()
check_credential = gc3_config.get_credential(idm_domain_name='gc30003')
assert gc3_config.user.cloud_username == '<EMAIL>'
assert check_credential.idm_domain_name=='gc30003'
# @pytest.fixture()
# def get_bravado_config_setup():
# gc3_config = GC3Config()
# assert 'iaas_classic' in gc3_config
# yield (gc3_config)
#
# def test_bravado_client_config(get_bravado_config_setup):
# gc3_config = get_bravado_config_setup
# assert 'iaas_classic' in gc3_config
# bravado_client_config = gc3_config.bravado_client_config
# assert bravado_client_config
# assert 'formats' not in bravado_client_config
# assert not 'include_missing_properties' in bravado_client_config
# assert 'also_return_response' in bravado_client_config
# bravado_client_config_2 = gc3_config.bravado_client_config
# assert bravado_client_config==bravado_client_config_2
# assert bravado_client_config is not bravado_client_config_2
# assert isinstance(bravado_client_config, dict)
#
# def test_bravado_core_config(get_bravado_config_setup):
# gc3_config = get_bravado_config_setup
# assert 'iaas_classic' in gc3_config
# bravado_core_config = gc3_config.bravado_core_config
# assert bravado_core_config
# assert 'formats' in bravado_core_config
# assert 'include_missing_properties' in bravado_core_config
# assert not 'also_return_response' in bravado_core_config
# bravado_core_config_2 = gc3_config.bravado_core_config
# assert bravado_core_config==bravado_core_config_2
# assert bravado_core_config is not bravado_core_config_2
# assert isinstance(bravado_core_config, dict)
# assert isinstance(bravado_core_config['formats'], list)
#
#
#
# def test_bravado_config(get_bravado_config_setup):
# gc3_config = get_bravado_config_setup
# assert 'iaas_classic' in gc3_config
# bravado_config = gc3_config.bravado_config
# assert bravado_config
# assert 'formats' in bravado_config
# assert 'include_missing_properties' in bravado_config
# assert 'also_return_response' in bravado_config
# bravado_config_2 = gc3_config.bravado_config
# assert bravado_config==bravado_config_2
# assert bravado_config is not bravado_config_2
# assert isinstance(bravado_config, dict)
# assert isinstance(bravado_config['formats'], list)
#
@pytest.fixture()
def get_constants_setup():
gc3_config = GC3Config()
assert 'iaas_classic' in gc3_config
yield (gc3_config)
def test_open_api_catalog_dir(get_constants_setup):
gc3_config = get_constants_setup
open_api_catalog_dir = gc3_config.OPEN_API_CATALOG_DIR
assert open_api_catalog_dir
# def test_BRAVADO_CONFIG(get_constants_setup):
# gc3_config = get_constants_setup
# bravado_config = gc3_config.BRAVADO_CONFIG
# assert bravado_config
# assert 'formats' in bravado_config
# assert 'include_missing_properties' in bravado_config
# assert 'also_return_response' in bravado_config
# assert isinstance(bravado_config, dict)
# assert isinstance(bravado_config['formats'], list)
# assert bravado_config['formats']
# formats = [f.format for f in bravado_config['formats']]
# assert 'json-bool' in formats
# assert all([isinstance(i , SwaggerFormat) for i in bravado_config['formats']])
| 1.914063 | 2 |
lab6/server/datapredict.py | zhiji95/iot | 2 | 11182 | <reponame>zhiji95/iot<filename>lab6/server/datapredict.py
import machine
from machine import *
import ssd1306
import time
import socket
import urequests as requests
import json
word = {'body':8}
labels = ['c', 'o', 'l', 'u', 'm', 'b', 'i', 'a','null']
HOST = '192.168.127.12'
PORT = 8080
flag = 0
stop = False
data = {}
xdata = []
ydata = []
n = 0
def dp(d):
if (d > 128):
return d - 255
return d
def do_connect():
import network
wlan = network.WLAN(network.STA_IF)
wlan.active(True)
if not wlan.isconnected():
print('connecting to network...')
wlan.connect(b'Columbia University')
while not wlan.isconnected():
pass
print('network config:', wlan.ifconfig())
do_connect()
def http_post(url, d):
r = requests.post(url, data=json.dumps(d))
return r.json()
def sendData():
global label
global xdata
global ydata
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((HOST, PORT))
l = {
"label": 'a',
"n": 0,
"number": len(xdata),
"content": {
"data": {
"x": xdata,
"y": ydata
}
}
}
l = json.dumps(l).encode()
s.sendall(l)
data = s.recv(1024)
data = json.loads(data.decode())
xdata, ydata = [], []
return data
def switchAcallback(p):
global flag
time.sleep(0.1)
if p.value() == 1:
flag = 1
def switchCcallback(p):
global stop
if p.value() == 1:
stop = True
switchA = machine.Pin(0, machine.Pin.IN, machine.Pin.PULL_UP)
switchA.irq(trigger=machine.Pin.IRQ_RISING, handler=switchAcallback)
switchC = machine.Pin(2, machine.Pin.IN, machine.Pin.PULL_UP)
switchC.irq(trigger=machine.Pin.IRQ_RISING, handler=switchCcallback)
spi = machine.SPI(1, baudrate=2000000, polarity=1, phase=1)
cs = machine.Pin(15, machine.Pin.OUT)
cs.value(0)
spi.write(b'\x2d')
spi.write(b'\x2b')
cs.value(1)
cs.value(0)
spi.write(b'\x31')
spi.write(b'\x0f')
cs.value(1)
i2c = machine.I2C(-1, machine.Pin(5), machine.Pin(4))
oled = ssd1306.SSD1306_I2C(128, 32, i2c)
while True:
x = 0
y = 0
sendstatus = "null"
if (flag):
cs.value(0)
test1 = spi.read(5, 0xf2)
cs.value(1)
cs.value(0)
test2 = spi.read(5, 0xf3)
cs.value(1)
cs.value(0)
test3 = spi.read(5, 0xf4)
cs.value(1)
cs.value(0)
test4 = spi.read(5, 0xf5)
cs.value(1)
x = dp(test2[1])
y = dp(test4[1])
xdata.append(x)
ydata.append(y)
sendstatus = "collect" + str(len(xdata)) + ' '+ ' ' + str(x) + ' ' + str(y)
if send:
word = sendData()
sendstatus = "send success"
flag = 0
send = False
oled.fill(0)
oled.text(labels[word['body']], 0, 0)
oled.text(sendstatus, 0,10)
oled.show()
| 2.796875 | 3 |
Geometry/VeryForwardGeometry/python/dd4hep/geometryRPFromDD_2021_cfi.py | PKUfudawei/cmssw | 2 | 11183 | <gh_stars>1-10
from Geometry.VeryForwardGeometry.dd4hep.v5.geometryRPFromDD_2021_cfi import *
| 1.007813 | 1 |
examples/plots/warmup_schedule.py | shuoyangd/pytorch_warmup | 170 | 11184 | import argparse
import matplotlib.pyplot as plt
import torch
from pytorch_warmup import *
def get_rates(warmup_cls, beta2, max_step):
rates = []
p = torch.nn.Parameter(torch.arange(10, dtype=torch.float32))
optimizer = torch.optim.Adam([{'params': p}], lr=1.0, betas=(0.9, beta2))
lr_scheduler = torch.optim.lr_scheduler.LambdaLR(optimizer, lr_lambda=lambda step: 1.0)
warmup_scheduler = warmup_cls(optimizer)
for step in range(1, max_step+1):
rates.append(optimizer.param_groups[0]['lr'])
optimizer.zero_grad()
optimizer.step()
lr_scheduler.step()
warmup_scheduler.dampen()
return rates
parser = argparse.ArgumentParser(description='Warmup schedule')
parser.add_argument('--output', type=str, default='none',
choices=['none', 'png', 'pdf'],
help='Output file type (default: none)')
args = parser.parse_args()
beta2 = 0.999
max_step = 3000
plt.plot(range(1, max_step+1), get_rates(RAdamWarmup, beta2, max_step), label='RAdam')
plt.plot(range(1, max_step+1), get_rates(UntunedExponentialWarmup, beta2, max_step), label='Untuned Exponential')
plt.plot(range(1, max_step+1), get_rates(UntunedLinearWarmup, beta2, max_step), label='Untuned Linear')
plt.legend()
plt.title('Warmup Schedule')
plt.xlabel('Iteration')
plt.ylabel(r'Warmup factor $(\omega_t)$')
if args.output == 'none':
plt.show()
else:
plt.savefig(f'warmup_schedule.{args.output}')
| 2.53125 | 3 |
plugins/httpev.py | wohali/gizzy | 3 | 11185 | <reponame>wohali/gizzy
"""\
This plugin merely enables other plugins to accept data over HTTP. If
a plugin defines a module level function named "httpev" it will be
invoked for POST requests to the url http://$hostname/event/$pluginname.
The function is invoked from the thread in the web.py request context
and as such has access to the full web.py API.
"""
import base64
import json
import web
web.config.debug = False
class Event(object):
def POST(self, plugin):
self.check_authorized()
func = self.find_handler(plugin)
try:
func()
except web.webapi.HTTPError:
raise
except:
log.exception("Plugin '%s' broke handling HTTP event" % plugin)
raise web.webapi.internalerror()
def check_authorized(self):
auth = web.ctx.env.get('HTTP_AUTHORIZATION')
if auth is None:
raise web.webapi.unauthorized()
if not auth.startswith("Basic "):
raise web.webapi.unauthorized()
try:
auth = auth.split(None, 1)[1]
raw = base64.decodestring(auth)
if tuple(raw.split(":", 1)) == config.httpev["auth"]:
return
except:
raise web.webapi.badrequest("Invalid Authorization header")
raise web.webapi.unauthorized()
def find_handler(self, name):
for p in plugin_manager.plugins:
if p.name == name:
func = p.data.get("httpev")
if callable(func):
return func
raise web.webapi.notfound()
class Server(threading.Thread):
def __init__(self):
super(Server, self).__init__()
self.setDaemon(True)
self.urls = ("/event/(.+)", "Event")
self.app = web.application(self.urls, {"Event": Event})
self.addr = ('0.0.0.0', config.httpev["port"])
self.srv = web.httpserver.WSGIServer(self.addr, self.app.wsgifunc())
def stop(self):
self.srv.stop()
def run(self):
self.srv.start()
def load():
s = Server()
s.start()
return s
def unload(s):
s.stop()
| 2.421875 | 2 |
ex056.py | danilodelucio/Exercicios_Curso_em_Video | 0 | 11186 | somaIdade = 0
maiorIdade = 0
nomeVelho = ''
totmulher20 = 0
for p in range(1, 3):
print('---- {}ª PESSOA ----'.format(p))
nome = str(input('Nome: ')).strip()
idade = int(input('Idade: '))
sexo = str(input('Sexo [M/F]: '))
somaIdade += idade
if p == 1 and sexo in 'Mm':
maiorIdade = idade
nomeVelho = nome
if sexo in 'Mm' and idade > maiorIdade:
maiorIdade = idade
nomeVelho = nome
if sexo in 'Ff' and idade < 20:
totmulher20 += 1
mediaIdade = int(somaIdade / 4)
print('A média de idade do grupo de pessoas é de {} anos.'.format(mediaIdade))
print('O homem mais velho tem {} anos e se chama {}.'.format(maiorIdade, nomeVelho))
print('Ao todo são {} mulher com menos de 20 anos.'.format(totmulher20)) | 3.6875 | 4 |
python/promort.py | simleo/promort_pipeline | 0 | 11187 | """\
PROMORT example.
"""
import argparse
import random
import sys
import pyecvl.ecvl as ecvl
import pyeddl.eddl as eddl
from pyeddl.tensor import Tensor
import models
def VGG16(in_layer, num_classes):
x = in_layer
x = eddl.ReLu(eddl.Conv(x, 64, [3, 3]))
x = eddl.MaxPool(eddl.ReLu(eddl.Conv(x, 64, [3, 3])), [2, 2], [2, 2])
x = eddl.ReLu(eddl.Conv(x, 128, [3, 3]))
x = eddl.MaxPool(eddl.ReLu(eddl.Conv(x, 128, [3, 3])), [2, 2], [2, 2])
x = eddl.ReLu(eddl.Conv(x, 256, [3, 3]))
x = eddl.ReLu(eddl.Conv(x, 256, [3, 3]))
x = eddl.MaxPool(eddl.ReLu(eddl.Conv(x, 256, [3, 3])), [2, 2], [2, 2])
x = eddl.ReLu(eddl.Conv(x, 512, [3, 3]))
x = eddl.ReLu(eddl.Conv(x, 512, [3, 3]))
x = eddl.MaxPool(eddl.ReLu(eddl.Conv(x, 512, [3, 3])), [2, 2], [2, 2])
x = eddl.ReLu(eddl.Conv(x, 512, [3, 3]))
x = eddl.ReLu(eddl.Conv(x, 512, [3, 3]))
x = eddl.MaxPool(eddl.ReLu(eddl.Conv(x, 512, [3, 3])), [2, 2], [2, 2])
x = eddl.Reshape(x, [-1])
x = eddl.ReLu(eddl.Dense(x, 256))
x = eddl.Softmax(eddl.Dense(x, num_classes))
return x
def main(args):
num_classes = 2
size = [256, 256] # size of images
in_ = eddl.Input([3, size[0], size[1]])
out = models.VGG16_promort(in_, num_classes)
net = eddl.Model([in_], [out])
eddl.build(
net,
eddl.rmsprop(1e-6),
#eddl.sgd(0.001, 0.9),
["soft_cross_entropy"],
["categorical_accuracy"],
eddl.CS_GPU([1], mem="low_mem") if args.gpu else eddl.CS_CPU()
)
eddl.summary(net)
eddl.setlogfile(net, "promort_VGG16_classification")
training_augs = ecvl.SequentialAugmentationContainer([
ecvl.AugResizeDim(size)
#ecvl.AugMirror(.5),
#ecvl.AugFlip(.5),
#ecvl.AugRotate([-180, 180]),
#ecvl.AugAdditivePoissonNoise([0, 10]),
#ecvl.AugGammaContrast([0.5, 1.5]),
#ecvl.AugGaussianBlur([0, 0.8]),
#ecvl.AugCoarseDropout([0, 0.3], [0.02, 0.05], 0.5)
])
validation_augs = ecvl.SequentialAugmentationContainer([
ecvl.AugResizeDim(size),
])
dataset_augs = ecvl.DatasetAugmentations(
[training_augs, validation_augs, None]
)
print("Reading dataset")
#d = ecvl.DLDataset(args.in_ds, args.batch_size)
d = ecvl.DLDataset(args.in_ds, args.batch_size, dataset_augs)
x = Tensor([args.batch_size, d.n_channels_, size[0], size[1]])
y = Tensor([args.batch_size, len(d.classes_)])
num_samples_train = len(d.GetSplit())
num_batches_train = num_samples_train // args.batch_size
d.SetSplit(ecvl.SplitType.validation)
num_samples_val = len(d.GetSplit())
num_batches_val = num_samples_val // args.batch_size
indices = list(range(args.batch_size))
metric = eddl.getMetric("categorical_accuracy")
print("Starting training")
### Main loop across epochs
for e in range(args.epochs):
print("Epoch {:d}/{:d} - Training".format(e + 1, args.epochs),
flush=True)
if args.out_dir:
current_path = os.path.join(args.out_dir, "Epoch_%d" % e)
for c in d.classes_:
c_dir = os.path.join(current_path, c)
os.makedirs(c_dir, exist_ok=True)
d.SetSplit(ecvl.SplitType.training)
eddl.reset_loss(net)
total_metric = []
s = d.GetSplit()
random.shuffle(s)
d.split_.training_ = s
d.ResetAllBatches()
### Looping across batches of training data
for b in range(num_batches_train):
print("Epoch {:d}/{:d} (batch {:d}/{:d}) - ".format(
e + 1, args.epochs, b + 1, num_batches_train
), end="", flush=True)
d.LoadBatch(x, y)
x.div_(255.0)
tx, ty = [x], [y]
#print (tx[0].info())
eddl.train_batch(net, tx, ty, indices)
#eddl.print_loss(net, b)
instances = (b+1) * args.batch_size
print ("loss = %.3f, acc = %.3f" % (net.fiterr[0]/instances, net.fiterr[1]/instances))
#print()
print("Saving weights")
eddl.save(net, "promort_checkpoint_%s.bin" % e, "bin")
### Evaluation on validation set
print("Epoch %d/%d - Evaluation" % (e + 1, args.epochs), flush=True)
d.SetSplit(ecvl.SplitType.validation)
for b in range(num_batches_val):
n = 0
print("Epoch {:d}/{:d} (batch {:d}/{:d}) - ".format(
e + 1, args.epochs, b + 1, num_batches_val
), end="", flush=True)
d.LoadBatch(x, y)
x.div_(255.0)
eddl.forward(net, [x])
output = eddl.getOutput(out)
sum_ = 0.0
for k in range(args.batch_size):
result = output.select([str(k)])
target = y.select([str(k)])
ca = metric.value(target, result)
total_metric.append(ca)
sum_ += ca
if args.out_dir:
result_a = np.array(result, copy=False)
target_a = np.array(target, copy=False)
classe = np.argmax(result_a).item()
gt_class = np.argmax(target_a).item()
single_image = x.select([str(k)])
img_t = ecvl.TensorToView(single_image)
img_t.colortype_ = ecvl.ColorType.BGR
single_image.mult_(255.)
filename = d.samples_[d.GetSplit()[n]].location_[0]
head, tail = os.path.splitext(os.path.basename(filename))
bname = "%s_gt_class_%s.png" % (head, gt_class)
cur_path = os.path.join(
current_path, d.classes_[classe], bname
)
ecvl.ImWrite(cur_path, img_t)
n += 1
print("categorical_accuracy:", sum_ / args.batch_size)
total_avg = sum(total_metric) / len(total_metric)
print("Total categorical accuracy:", total_avg)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument("in_ds", metavar="INPUT_DATASET")
parser.add_argument("--epochs", type=int, metavar="INT", default=50)
parser.add_argument("--batch-size", type=int, metavar="INT", default=32)
parser.add_argument("--gpu", action="store_true")
parser.add_argument("--out-dir", metavar="DIR",
help="if set, save images in this directory")
main(parser.parse_args())
| 2.4375 | 2 |
src/view/services_update_page.py | nbilbo/services_manager | 0 | 11188 | from src.view.services_page import ServicesPage
from src.view.services_add_page import ServicesAddPage
class ServicesUpdatePage(ServicesAddPage):
def __init__(self, parent, *args, **kwargs):
super().__init__(parent, *args, **kwargs)
self.set_title("Update service")
self.set_confirm_button_text("Update")
| 1.984375 | 2 |
Lib/site-packages/wagtail/utils/l18n/translation.py | SyahmiAmin/belikilo | 0 | 11189 | <filename>Lib/site-packages/wagtail/utils/l18n/translation.py<gh_stars>0
import os
import gettext
import bisect
from locale import getdefaultlocale
from collections.abc import MutableMapping
from copy import copy, deepcopy
import six
class Trans:
def __init__(self):
self.registry = {}
self.current = None
self.set(getdefaultlocale()[0])
def __getitem__(self, language):
if language:
try:
return self.registry[language]
except KeyError:
self.registry[language] = gettext.translation(
'l18n',
os.path.join(os.path.dirname(__file__), 'locale'),
languages=[language],
fallback=True
)
return self.registry[language]
else:
return None
def set(self, language):
self.current = self[language]
def gettext(self, s):
try:
return self.current.gettext(s)
except AttributeError:
return s
if six.PY2:
def ugettext(self, s):
try:
return self.current.ugettext(s)
except AttributeError:
return s
_trans = Trans()
def set_language(language=None):
_trans.set(language)
if six.PY2:
def translate(s, utf8=True, trans=_trans):
if trans:
if utf8:
return trans.ugettext(s)
return trans.gettext(s)
else:
return s
else:
def translate(s, utf8=True, trans=_trans):
if trans:
t = trans.gettext(s)
if utf8:
return t
return t.encode()
else:
return s
class L18NLazyObject:
def _value(self, utf8=True):
raise NotImplementedError
def __str__(self):
return self._value(utf8=six.PY3)
def __bytes__(self):
return self._value(utf8=False)
def __unicode__(self):
return self._value(utf8=True)
class L18NLazyString(L18NLazyObject):
def __init__(self, s):
self._str = s
def __copy__(self):
return self.__class__(self._str)
def __deepcopy__(self, memo):
result = self.__copy__()
memo[id(self)] = result
return result
def _value(self, utf8=True):
return translate(self._str, utf8)
def __repr__(self):
return 'L18NLazyString <%s>' % repr(self._str)
def __getattr__(self, name):
# fallback to call the value's attribute in case it's not found in
# L18NLazyString
return getattr(self._value(), name)
class L18NLazyStringsList(L18NLazyObject):
def __init__(self, sep='/', *s):
# we assume that the separator and the strings have the same encoding
# (text_type)
self._sep = sep
self._strings = s
def __copy__(self):
return self.__class__(self._sep, *self._strings)
def __deepcopy__(self, memo):
result = self.__copy__()
memo[id(self)] = result
return result
def _value(self, utf8=True):
sep = self._sep
if utf8 and isinstance(sep, six.binary_type):
sep = sep.decode(encoding='utf-8')
elif not utf8 and isinstance(sep, six.text_type):
sep = sep.encode(encoding='utf-8')
return sep.join([translate(s, utf8)
for s in self._strings])
def __repr__(self):
return 'L18NLazyStringsList <%s>' % self._sep.join([
repr(s) for s in self._strings
])
def __getattr__(self, name):
# fallback to call the value's attribute in case it's not found in
# L18NLazyStringsList
return getattr(self._value(), name)
class L18NBaseMap(MutableMapping):
"""
Generic dictionary that returns lazy string or lazy string lists
"""
def __init__(self, *args, **kwargs):
self.store = dict(*args, **kwargs)
self.sorted = {}
def __copy__(self):
result = self.__class__()
result.store = self.store
result.sorted = self.sorted
return result
def __deepcopy__(self, memo):
result = self.__class__()
memo[id(self)] = result
result.store = deepcopy(self.store, memo)
result.sorted = deepcopy(self.sorted, memo)
return result
def __getitem__(self, key):
raise NotImplementedError
def __setitem__(self, key, value):
self.store[key] = value
for locale, (keys, values) in six.iteritems(self.sorted):
tr = translate(value, trans=_trans[locale])
i = bisect.bisect_left(values, tr)
keys.insert(i, key)
values.insert(i, tr)
def __delitem__(self, key):
del self.store[key]
for keys, values in self.sorted.values():
i = keys.index(key)
del keys[i]
del values[i]
def __iter__(self):
loc = _trans.current._info['language'] if _trans.current else None
try:
return iter(self.sorted[loc][0])
except KeyError:
keys = []
values = []
# we can't use iteritems here, as we need to call __getitem__
# via self[key]
for key in iter(self.store):
value = six.text_type(self[key])
i = bisect.bisect_left(values, value)
keys.insert(i, key)
values.insert(i, value)
self.sorted[loc] = (keys, values)
return iter(keys)
def __len__(self):
return len(self.store)
def subset(self, keys):
"""
Generates a subset of the current map (e.g. to retrieve only tzs in
common_timezones from the tz_cities or tz_fullnames maps)
"""
sub = self.__class__()
self_keys = set(self.store.keys())
subset_keys = self_keys.intersection(keys)
removed_keys = self_keys.difference(subset_keys)
sub.store = {k: self.store[k] for k in subset_keys}
for loc, sorted_items in six.iteritems(self.sorted):
loc_keys = copy(self.sorted[loc][0])
loc_values = copy(self.sorted[loc][1])
for k in removed_keys:
i = loc_keys.index(k)
del loc_keys[i]
del loc_values[i]
sub.sorted[loc] = (loc_keys, loc_values)
return sub
class L18NMap(L18NBaseMap):
def __getitem__(self, key):
return L18NLazyString(self.store[key])
class L18NListMap(L18NBaseMap):
def __init__(self, sep='/', aux=None, *args, **kwargs):
self._sep = sep
self._aux = aux
super(L18NListMap, self).__init__(*args, **kwargs)
def __copy__(self):
result = super(L18NListMap, self).__copy__()
result._sep = self._sep
result._aux = self._aux
return result
def __deepcopy__(self, memo):
result = super(L18NListMap, self).__deepcopy__(memo)
result._sep = self._sep
result._aux = None if self._aux is None else deepcopy(self._aux, memo)
return result
def __getitem__(self, key):
strs = key.split(self._sep)
strs[-1] = key
lst = []
for s in strs:
try:
lst.append(self.store[s])
except KeyError:
lst.append(self._aux[s])
return L18NLazyStringsList(self._sep, *lst)
def subset(self, keys):
sub = super(L18NListMap, self).subset(keys)
sub._sep = self._sep
sub._aux = deepcopy(self._aux)
return sub
| 2.15625 | 2 |
playground/sockets/server.py | tunki/lang-training | 0 | 11190 | import socket
s = socket.socket()
s.bind(("localhost", 9999))
s.listen(1)
sc, addr = s.accept()
while True:
recibido = sc.recv(1024)
if recibido == "quit":
break
print "Recibido:", recibido
sc.send(recibido)
print "adios"
sc.close()
s.close()
| 2.890625 | 3 |
examples/example_contour.py | moghimis/geojsoncontour | 63 | 11191 | <filename>examples/example_contour.py
import numpy
import matplotlib.pyplot as plt
import geojsoncontour
# Create lat and lon vectors and grid data
grid_size = 1.0
latrange = numpy.arange(-90.0, 90.0, grid_size)
lonrange = numpy.arange(-180.0, 180.0, grid_size)
X, Y = numpy.meshgrid(lonrange, latrange)
Z = numpy.sqrt(X * X + Y * Y)
n_contours = 10
levels = numpy.linspace(start=0, stop=100, num=n_contours)
# Create a contour plot plot from grid (lat, lon) data
figure = plt.figure()
ax = figure.add_subplot(111)
contour = ax.contour(lonrange, latrange, Z, levels=levels, cmap=plt.cm.jet)
# Convert matplotlib contour to geojson
geojsoncontour.contour_to_geojson(
contour=contour,
geojson_filepath='out.geojson',
min_angle_deg=10.0,
ndigits=3,
unit='m'
)
| 3.40625 | 3 |
Graphs/ConnectedComponents.py | PK-100/Competitive_Programming | 70 | 11192 | #!/bin/python3
import math
import os
import random
import re
import sys
#
# Complete the 'countGroups' function below.
#
# The function is expected to return an INTEGER.
# The function accepts STRING_ARRAY related as parameter.
#
class Graph:
def __init__(self, V):
self.V = V
self.adj = [[] for i in range(V)]
def addEdge(self, a,b):
self.adj[a].append(b)
self.adj[b].append(a)
def dfs_util(self, temp, node, visited):
visited[node] = True
temp.append(node)
for i in self.adj[node]:
if not visited[i]:
temp = self.dfs_util(temp, i, visited)
return temp
def countGroups(self):
"""
This is the classical concept of connected components in a Graph
"""
visited = [False] * self.V
groups = []
for node in range(self.V):
if not visited[node]:
temp = []
groups.append(self.dfs_util(temp, node, visited))
return groups
def convertMatrixToGraph(mat):
"""
Accept the input which is an adjacency matrix and return a Graph, which is an adjacency list
"""
n = len(mat)
g = Graph(n)
for i in range(n):
for j in range(n):
if j > i and mat[i][j] == '1':
g.addEdge(i,j)
return g
def countGroups(related):
# Write your code here
graph = convertMatrixToGraph(related)
groups = graph.countGroups()
# print(groups)
return len(groups)
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
related_count = int(input().strip())
related = []
for _ in range(related_count):
related_item = input()
related.append(related_item)
result = countGroups(related)
fptr.write(str(result) + '\n')
fptr.close()
| 4.03125 | 4 |
recognition/ml_model.py | hurschler/pig-face-recognition | 1 | 11193 | <reponame>hurschler/pig-face-recognition
import logging.config
import util.logger_init
import numpy as np
import tensorflow as tf
from sklearn.metrics import confusion_matrix
from util.tensorboard_util import plot_confusion_matrix, plot_to_image
from tensorflow.python.keras.callbacks_v1 import TensorBoard
from keras import backend as K
class MlModel:
def get_model(self):
return self.model
def summary_print(self):
self.model.summary()
# Define your scheduling function
def scheduler(self, epoch):
return 0.001 * 0.95 ** epoch
def log_confusion_matrix(self, epoch, logs):
# Use the model to predict the values from the test_images.
test_pred_raw = self.model.predict(self.ml_data.x_test)
test_pred = np.argmax(test_pred_raw, axis=1)
# Calculate the confusion matrix using sklearn.metrics
cm = confusion_matrix(self.ml_data.y_test, test_pred)
figure = plot_confusion_matrix(cm, class_names=self.ml_data.pig_dict.values())
cm_image = plot_to_image(figure)
# Log the confusion matrix as an image summary.
with self.file_writer_cm.as_default():
tf.summary.image("Confusion Matrix", cm_image, step=epoch)
# Define TensorBoard callback child class
class LRTensorBoard(TensorBoard):
def __init__(self, log_dir, **kwargs): # add other arguments to __init__ if you need
super().__init__(log_dir=log_dir, **kwargs)
def on_epoch_end(self, epoch, logs=None):
logs = logs or {}
logs.update({'lr': K.eval(self.model.optimizer.lr)})
super().on_epoch_end(epoch, logs)
| 2.734375 | 3 |
requests/UpdateSubscriptionRequest.py | divinorum-webb/python-tableau-api | 1 | 11194 | <gh_stars>1-10
from .BaseRequest import BaseRequest
class UpdateSubscriptionRequest(BaseRequest):
"""
Update subscription request for generating API request URLs to Tableau Server.
:param ts_connection: The Tableau Server connection object.
:type ts_connection: class
:param new_subscription_subject: (Optional) A new subject for the subscription.
:type new_subscription_subject: string
:param new_schedule_id: (Optional) The ID of a schedule to associate this subscription with.
:type new_schedule_id: string
"""
def __init__(self,
ts_connection,
new_subscription_subject=None,
new_schedule_id=None):
super().__init__(ts_connection)
self._new_subscription_subject = new_subscription_subject
self._new_schedule_id = new_schedule_id
@property
def base_update_subscription_request(self):
if self._new_subscription_subject and self._new_schedule_id:
self._request_body.update({
'subscription': {
'subject': self._new_subscription_subject,
'schedule': {'id': self._new_schedule_id}
}
})
elif self._new_subscription_subject and not self._new_schedule_id:
self._request_body.update({
'subscription': {
'subject': self._new_subscription_subject
}
})
else:
self._request_body.update({
'subscription': {
'schedule': {'id': self._new_schedule_id}
}
})
return self._request_body
def get_request(self):
return self.base_update_subscription_request
| 2.640625 | 3 |
doc/examples/cython/cython_main.py | hershg/ray | 2 | 11195 | <filename>doc/examples/cython/cython_main.py
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import ray
import click
import inspect
import numpy as np
import cython_examples as cyth
def run_func(func, *args, **kwargs):
"""Helper function for running examples"""
ray.init()
func = ray.remote(func)
# NOTE: kwargs not allowed for now
result = ray.get(func.remote(*args))
# Inspect the stack to get calling example
caller = inspect.stack()[1][3]
print("%s: %s" % (caller, str(result)))
return result
@click.group(context_settings={"help_option_names": ["-h", "--help"]})
def cli():
"""Working with Cython actors and functions in Ray"""
@cli.command()
def example1():
"""Cython def function"""
run_func(cyth.simple_func, 1, 2, 3)
@cli.command()
def example2():
"""Cython def function, recursive"""
run_func(cyth.fib, 10)
@cli.command()
def example3():
"""Cython def function, built-in typed parameter"""
# NOTE: Cython will attempt to cast argument to correct type
# NOTE: Floats will be cast to int, but string, for example will error
run_func(cyth.fib_int, 10)
@cli.command()
def example4():
"""Cython cpdef function"""
run_func(cyth.fib_cpdef, 10)
@cli.command()
def example5():
"""Cython wrapped cdef function"""
# NOTE: cdef functions are not exposed to Python
run_func(cyth.fib_cdef, 10)
@cli.command()
def example6():
"""Cython simple class"""
ray.init()
cls = ray.remote(cyth.simple_class)
a1 = cls.remote()
a2 = cls.remote()
result1 = ray.get(a1.increment.remote())
result2 = ray.get(a2.increment.remote())
print(result1, result2)
@cli.command()
def example7():
"""Cython with function from BrainIAK (masked log)"""
run_func(cyth.masked_log, np.array([-1.0, 0.0, 1.0, 2.0]))
@cli.command()
def example8():
"""Cython with blas. NOTE: requires scipy"""
# See cython_blas.pyx for argument documentation
mat = np.array(
[[[2.0, 2.0], [2.0, 2.0]], [[2.0, 2.0], [2.0, 2.0]]], dtype=np.float32)
result = np.zeros((2, 2), np.float32, order="C")
run_func(cyth.compute_kernel_matrix, "L", "T", 2, 2, 1.0, mat, 0, 2, 1.0,
result, 2)
if __name__ == "__main__":
cli()
| 2.75 | 3 |
Simon/dev/main_age_classification.py | uncharted-distil/simon | 0 | 11196 | <gh_stars>0
from DataGenerator import *
from Encoder import *
import pandas as pd
from keras.models import Model
from keras.layers import Dense, Activation, Flatten, Input, Dropout, MaxPooling1D, Convolution1D
from keras.layers import LSTM, Lambda, merge, Masking
from keras.layers import Embedding, TimeDistributed
from keras.layers.normalization import BatchNormalization
from keras.optimizers import SGD
from keras.utils import np_utils
import numpy as np
import tensorflow as tf
import re
from keras import backend as K
import keras.callbacks
import sys
import os
import time
import matplotlib.pyplot as plt
import pickle
def binarize(x, sz=71):
return tf.to_float(tf.one_hot(x, sz, on_value=1, off_value=0, axis=-1))
def custom_multi_label_accuracy(y_true, y_pred):
# need some threshold-specific rounding code here, presently only for 0.5 thresh.
return K.mean(K.round(np.multiply(y_true,y_pred)),axis=0)
def eval_binary_accuracy(y_test, y_pred):
correct_indices = y_test==y_pred
all_correct_predictions = np.zeros(y_test.shape)
all_correct_predictions[correct_indices] = 1
#print("DEBUG::binary accuracy matrix")
#print(all_correct_predictions)
return np.mean(all_correct_predictions),np.mean(all_correct_predictions, axis=0),all_correct_predictions
def eval_confusion(y_test, y_pred):
wrong_indices = y_test!=y_pred
all_wrong_predictions = np.zeros(y_test.shape)
all_wrong_predictions[wrong_indices] = 1
#print("DEBUG::confusion matrix")
#print(all_wrong_predictions)
return np.mean(all_wrong_predictions),np.mean(all_wrong_predictions, axis=0),all_wrong_predictions
def eval_false_positives(y_test, y_pred):
false_positive_matrix = np.zeros((y_test.shape[1],y_test.shape[1]))
false_positives = np.multiply(y_pred,1-y_test)
# print(precision_matrix)
for i in np.arange(y_test.shape[0]):
for j in np.arange(y_test.shape[1]) :
if(false_positives[i,j]==1): #positive label for ith sample and jth predicted category
for k in np.arange(y_test.shape[1]):
if(y_test[i,k]==1): #positive label for ith sample and kth true category
# print("DEBUG::i,j,k")
# print("%d,%d,%d"%(i,j,k))
false_positive_matrix[j,k] +=1
# print("DEBUG::precision matrix")
# print(precision_matrix)
return np.sum(false_positive_matrix),np.sum(false_positive_matrix, axis=0),false_positive_matrix
def binarize_outshape(in_shape):
return in_shape[0], in_shape[1], 71
def max_1d(x):
return K.max(x, axis=1)
def striphtml(html):
p = re.compile(r'<.*?>')
return p.sub('', html)
def clean(s):
return re.sub(r'[^\x00-\x7f]', r'', s)
def setup_test_sets(X, y):
ids = np.arange(len(X))
np.random.shuffle(ids)
# shuffle
X = X[ids]
y = y[ids]
train_end = int(X.shape[0] * .6)
cross_validation_end = int(X.shape[0] * .3 + train_end)
test_end = int(X.shape[0] * .1 + cross_validation_end)
X_train = X[:train_end]
X_cv_test = X[train_end:cross_validation_end]
X_test = X[cross_validation_end:test_end]
y_train = y[:train_end]
y_cv_test = y[train_end:cross_validation_end]
y_test = y[cross_validation_end:test_end]
data = type('data_type', (object,), {'X_train' : X_train, 'X_cv_test': X_cv_test, 'X_test': X_test, 'y_train': y_train, 'y_cv_test': y_cv_test, 'y_test':y_test})
return data
def generate_model(max_len, max_cells, category_count):
filter_length = [1, 3, 3]
nb_filter = [40, 200, 1000]
pool_length = 2
# document input
document = Input(shape=(max_cells, max_len), dtype='int64')
# sentence input
in_sentence = Input(shape=(max_len,), dtype='int64')
# char indices to one hot matrix, 1D sequence to 2D
embedded = Lambda(binarize, output_shape=binarize_outshape)(in_sentence)
# embedded: encodes sentence
for i in range(len(nb_filter)):
embedded = Convolution1D(nb_filter=nb_filter[i],
filter_length=filter_length[i],
border_mode='valid',
activation='relu',
init='glorot_normal',
subsample_length=1)(embedded)
embedded = Dropout(0.1)(embedded)
embedded = MaxPooling1D(pool_length=pool_length)(embedded)
forward_sent = LSTM(256, return_sequences=False, dropout_W=0.2,
dropout_U=0.2, consume_less='gpu')(embedded)
backward_sent = LSTM(256, return_sequences=False, dropout_W=0.2,
dropout_U=0.2, consume_less='gpu', go_backwards=True)(embedded)
sent_encode = merge([forward_sent, backward_sent],
mode='concat', concat_axis=-1)
sent_encode = Dropout(0.3)(sent_encode)
# sentence encoder
encoder = Model(input=in_sentence, output=sent_encode)
print(encoder.summary())
encoded = TimeDistributed(encoder)(document)
# encoded: sentences to bi-lstm for document encoding
forwards = LSTM(128, return_sequences=False, dropout_W=0.2,
dropout_U=0.2, consume_less='gpu')(encoded)
backwards = LSTM(128, return_sequences=False, dropout_W=0.2,
dropout_U=0.2, consume_less='gpu', go_backwards=True)(encoded)
merged = merge([forwards, backwards], mode='concat', concat_axis=-1)
output = Dropout(0.3)(merged)
output = Dense(128, activation='relu')(output)
output = Dropout(0.3)(output)
output = Dense(category_count, activation='softmax')(output)
# output = Activation('softmax')(output)
model = Model(input=document, output=output)
return model
# record history of training
class LossHistory(keras.callbacks.Callback):
def on_train_begin(self, logs={}):
self.losses = []
self.accuracies = []
def on_batch_end(self, batch, logs={}):
self.losses.append(logs.get('loss'))
self.accuracies.append(logs.get('binary_accuracy'))
def plot_loss(history):
# summarize history for accuracy
plt.subplot('121')
plt.plot(history.history['binary_accuracy'])
plt.plot(history.history['val_binary_accuracy'])
plt.title('model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
# summarize history for loss
plt.subplot('122')
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.show()
def train_model(batch_size, checkpoint_dir, model, nb_epoch, data):
print("starting learning")
check_cb = keras.callbacks.ModelCheckpoint(checkpoint_dir + "text-class" + '.{epoch:02d}-{val_loss:.2f}.hdf5',
monitor='val_loss', verbose=0, save_best_only=True, mode='min')
earlystop_cb = keras.callbacks.EarlyStopping(monitor='val_loss', patience=7, verbose=1, mode='auto')
tbCallBack = keras.callbacks.TensorBoard(log_dir='./logs', histogram_freq=0, write_graph=True, write_images=False, embeddings_freq=0,
embeddings_layer_names=None, embeddings_metadata=None)
loss_history = LossHistory()
history = model.fit(data.X_train, data.y_train, validation_data=(data.X_cv_test, data.y_cv_test), batch_size=batch_size,
nb_epoch=nb_epoch, shuffle=True, callbacks=[earlystop_cb, check_cb, loss_history, tbCallBack])
print('losses: ')
print(history.history['loss'])
print('accuracies: ')
# print(history.history['acc'])
print(history.history['val_binary_accuracy'])
plot_loss(history)
def evaluate_model(max_cells, model, data, encoder, p_threshold):
print("Starting predictions:")
start = time.time()
scores = model.evaluate(data.X_test, data.y_test, verbose=0)
end = time.time()
print("Accuracy: %.2f%% \n Time: {0}s \n Time/example : {1}s/ex".format(
end - start, (end - start) / data.X_test.shape[0]) % (scores[1] * 100))
# return all predictions above a certain threshold
# first, the maximum probability/class
probabilities = model.predict(data.X_test, verbose=1)
# print("The prediction probabilities are:")
# print(probabilities)
m = np.amax(probabilities, axis=1)
max_index = np.argmax(probabilities, axis=1)
# print("Associated fixed category indices:")
# print(max_index)
with open('Categories.txt','r') as f:
Categories = f.read().splitlines()
print("Remember that the fixed categories are:")
print(Categories)
print("Most Likely Predicted Category/Labels are: ")
print((np.array(Categories))[max_index])
print("Associated max probabilities/confidences:")
print(m)
# next, all probabilities above a certain threshold
print("DEBUG::y_test:")
print(data.y_test)
prediction_indices = probabilities > p_threshold
y_pred = np.zeros(data.y_test.shape)
y_pred[prediction_indices] = 1
print("DEBUG::y_pred:")
print(y_pred)
print("'Binary' accuracy (true positives + true negatives) is:")
print(eval_binary_accuracy(data.y_test,y_pred))
print("'Binary' confusion (false positives + false negatives) is:")
print(eval_confusion(data.y_test,y_pred))
print("False positive matrix is:")
print(eval_false_positives(data.y_test,y_pred))
def main(checkpoint, data_count, data_cols, should_train, nb_epoch, null_pct, try_reuse_data, batch_size, execution_config):
maxlen = 280
max_cells = 1
p_threshold = 0.5
checkpoint_dir = "checkpoints/age_classification/"
if not os.path.isdir(checkpoint_dir):
os.makedirs(checkpoint_dir)
# read test data
dataset_name = "training_age"
print("Beginning Age Classifier Training...")
raw_data = pd.read_csv('data/age_classification/'+dataset_name+'.csv',
dtype='str', header=0, skip_blank_lines=True)
#print(raw_data)
print(raw_data.shape)
raw_data_low = raw_data.ix[raw_data['label']=='14_17',0]
raw_data_medium = raw_data.ix[raw_data['label']=='18_23',0]
raw_data_high = raw_data.ix[raw_data['label']=='24_plus',0]
print(raw_data_low.shape)
print(raw_data_medium.shape)
print(raw_data_high.shape)
raw_data = np.asarray(raw_data_low)[np.newaxis]
print(raw_data)
print(raw_data.shape)
header = [['14_17'],]*raw_data_low.shape[0]
raw_data = np.column_stack((raw_data,np.asarray(raw_data_medium)[np.newaxis]))
header.extend([['18_23'],]*raw_data_medium.shape[0])
raw_data = np.column_stack((raw_data,np.asarray(raw_data_high)[np.newaxis]))
header.extend([['24_plus'],]*raw_data_high.shape[0])
print("Final raw_data size is:")
print(raw_data.shape)
print("Corresponding header length is:")
print(len(header))
#print(header)
# transpose the data
raw_data = np.char.lower(np.transpose(raw_data).astype('U'))
# do other processing and encode the data
config = {}
if not should_train:
if execution_config is None:
raise TypeError
config = load_config(execution_config, checkpoint_dir)
encoder = config['encoder']
if checkpoint is None:
checkpoint = config['checkpoint']
else:
encoder = Encoder()
encoder.process(raw_data, max_cells)
# encode the data
X, y = encoder.encode_data(raw_data, header, maxlen)
max_cells = encoder.cur_max_cells
data = None
if should_train:
data = setup_test_sets(X, y)
else:
data = type('data_type', (object,), {'X_test': X, 'y_test':y})
print('Sample chars in X:{}'.format(X[2, 0:10]))
print('y:{}'.format(y[2]))
# need to know number of fixed categories to create model
category_count = y.shape[1]
print('Number of fixed categories is :')
print(category_count)
model = generate_model(maxlen, max_cells, category_count)
load_weights(checkpoint, config, model, checkpoint_dir)
model.compile(loss='categorical_crossentropy',
optimizer='adam', metrics=['binary_accuracy'])
if(should_train):
start = time.time()
train_model(batch_size, checkpoint_dir, model, nb_epoch, data)
end = time.time()
print("Time for training is %f sec"%(end-start))
config = { 'encoder' : encoder,
'checkpoint' : get_best_checkpoint(checkpoint_dir) }
save_config(config, checkpoint_dir)
#print("DEBUG::The actual headers are:")
#print(header)
evaluate_model(max_cells, model, data, encoder, p_threshold)
# Now, label unlabeled tweets
dataset_name = "jordans_collection2"
print("Beginning Age Classifier Labeling...")
raw_data = pd.read_csv('data/age_classification/'+dataset_name+'.csv',
dtype='str', header=0, skip_blank_lines=True,lineterminator='\n')
raw_data = raw_data.ix[:,2]
raw_data = np.asarray(raw_data)[np.newaxis]
print(raw_data.shape)
raw_data = np.char.lower(np.transpose(raw_data).astype('U'))
X, y = encoder.encode_data(raw_data, [['14_17'],]*raw_data.shape[0], maxlen)
data = type('data_type', (object,), {'X_test': X, 'y_test':y})
probabilities = model.predict(data.X_test, verbose=1)
print(encoder.reverse_label_encode(probabilities,p_threshold))
def resolve_file_path(filename, dir):
if os.path.isfile(str(filename)):
return str(filename)
elif os.path.isfile(str(dir + str(filename))):
return dir + str(filename)
def load_weights(checkpoint_name, config, model, checkpoint_dir):
if config and not checkpoint_name:
checkpoint_name = config['checkpoint']
if checkpoint_name:
checkpoint_path = resolve_file_path(checkpoint_name, checkpoint_dir)
print("Checkpoint : %s" % str(checkpoint_path))
model.load_weights(checkpoint_path)
def save_config(execution_config, checkpoint_dir):
filename = ""
if not execution_config["checkpoint"] is None:
filename = execution_config["checkpoint"].rsplit( ".", 1 )[ 0 ] + ".pkl"
else:
filename = time.strftime("%Y%m%d-%H%M%S") + ".pkl"
with open(checkpoint_dir + filename, 'wb') as output:
pickle.dump(execution_config, output, pickle.HIGHEST_PROTOCOL)
def load_config(execution_config_path, dir):
execution_config_path = resolve_file_path(execution_config_path, dir)
return pickle.load( open( execution_config_path, "rb" ) )
def get_best_checkpoint(checkpoint_dir):
max_mtime = 0
max_file = ''
for dirname,subdirs,files in os.walk(checkpoint_dir):
for fname in files:
full_path = os.path.join(dirname, fname)
mtime = os.stat(full_path).st_mtime
if mtime > max_mtime:
max_mtime = mtime
max_dir = dirname
max_file = fname
return max_file
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(
description='attempts to discern data types looking at columns holistically.')
parser.add_argument('--cp', dest='checkpoint',
help='checkpoint to load')
parser.add_argument('--config', dest='execution_config',
help='execution configuration to load. contains max_cells, and encoder config.')
parser.add_argument('--train', dest='should_train', action="store_true",
default="True", help='run training')
parser.add_argument('--no_train', dest='should_train', action="store_false",
default="True", help='do not run training')
parser.set_defaults(should_train=True)
parser.add_argument('--data_count', dest='data_count', action="store", type=int,
default=100, help='number of data rows to create')
parser.add_argument('--data_cols', dest='data_cols', action="store", type=int,
default=10, help='number of data cols to create')
parser.add_argument('--nullpct', dest='null_pct', action="store", type=float,
default=0, help='percent of Nulls to put in each column')
parser.add_argument('--nb_epoch', dest='nb_epoch', action="store", type=int,
default=5, help='number of epochs')
parser.add_argument('--try_reuse_data', dest='try_reuse_data', action="store_true",
default="True", help='loads existing data if the dimensions have been stored')
parser.add_argument('--force_new_data', dest='try_reuse_data', action="store_false",
default="True", help='forces the creation of new data, even if the dimensions have been stored')
parser.add_argument('--batch_size', dest='batch_size', action="store", type=int,
default=64, help='batch size for training')
args = parser.parse_args()
main(args.checkpoint, args.data_count, args.data_cols, args.should_train,
args.nb_epoch, args.null_pct, args.try_reuse_data, args.batch_size, args.execution_config)
| 2.46875 | 2 |
desdeo_tools/solver/__init__.py | phoopies/desdeo-tools | 1 | 11197 | """This module implements methods for solving scalar valued functions.
"""
__all__ = ["DiscreteMinimizer", "ScalarMethod", "ScalarMinimizer", "ScalarSolverException"]
from desdeo_tools.solver.ScalarSolver import DiscreteMinimizer, ScalarMethod, ScalarMinimizer, ScalarSolverException
| 1.570313 | 2 |
tests/test_root_to_hdf.py | lundeenj/hawc_hal | 0 | 11198 | from hawc_hal.maptree.map_tree import map_tree_factory
from hawc_hal.response import hawc_response_factory
import os
from conftest import check_map_trees, check_responses
def test_root_to_hdf_response(response):
r = hawc_response_factory(response)
test_filename = "response.hd5"
# Make sure it doesn't exist yet, if it does,remove it
if os.path.exists(test_filename):
os.remove(test_filename)
r.write(test_filename)
# Try to open and use it
r2 = hawc_response_factory(test_filename)
check_responses(r, r2)
os.remove(test_filename)
def do_one_test_maptree(geminga_roi,
geminga_maptree,
fullsky=False):
# Test both with a defined ROI and full sky (ROI is None)
if fullsky:
roi_ = None
else:
roi_ = geminga_roi
m = map_tree_factory(geminga_maptree, roi_)
test_filename = "maptree.hd5"
# Make sure it doesn't exist yet, if it does,remove it
if os.path.exists(test_filename):
os.remove(test_filename)
m.write(test_filename)
# Try to open and use it
m2 = map_tree_factory(test_filename, roi_)
check_map_trees(m, m2)
os.remove(test_filename)
def test_root_to_hdf_maptree_roi(geminga_roi,
geminga_maptree):
do_one_test_maptree(geminga_roi,
geminga_maptree,
fullsky=False)
def test_root_to_hdf_maptree_full_sky(geminga_roi,
geminga_maptree):
do_one_test_maptree(geminga_roi,
geminga_maptree,
fullsky=True)
| 2.3125 | 2 |
clickhouse_driver/compression/zstd.py | risicle/clickhouse-driver | 17 | 11199 | from __future__ import absolute_import
from io import BytesIO
import zstd
from .base import BaseCompressor, BaseDecompressor
from ..protocol import CompressionMethod, CompressionMethodByte
from ..reader import read_binary_uint32
from ..writer import write_binary_uint32, write_binary_uint8
class Compressor(BaseCompressor):
method = CompressionMethod.ZSTD
method_byte = CompressionMethodByte.ZSTD
def get_compressed_data(self, extra_header_size):
rv = BytesIO()
data = self.get_value()
compressed = zstd.compress(data)
header_size = extra_header_size + 4 + 4 # sizes
write_binary_uint32(header_size + len(compressed), rv)
write_binary_uint32(len(data), rv)
rv.write(compressed)
return rv.getvalue()
class Decompressor(BaseDecompressor):
method = CompressionMethod.ZSTD
method_byte = CompressionMethodByte.ZSTD
def get_decompressed_data(self, method_byte, compressed_hash,
extra_header_size):
size_with_header = read_binary_uint32(self.stream)
compressed_size = size_with_header - extra_header_size - 4
compressed = BytesIO(self.stream.read(compressed_size))
block_check = BytesIO()
write_binary_uint8(method_byte, block_check)
write_binary_uint32(size_with_header, block_check)
block_check.write(compressed.getvalue())
self.check_hash(block_check.getvalue(), compressed_hash)
compressed = compressed.read(compressed_size - 4)
return zstd.decompress(compressed)
| 2.484375 | 2 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.