max_stars_repo_path
stringlengths
4
245
max_stars_repo_name
stringlengths
7
115
max_stars_count
int64
101
368k
id
stringlengths
2
8
content
stringlengths
6
1.03M
scripts/test_tutorials.py
quantummind/quantum
1,501
132454
# Copyright 2020 The TensorFlow Quantum Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Module to ensure all notebooks execute without error by pytesting them.""" import glob import re from absl.testing import parameterized import nbformat import nbconvert import tensorflow as tf # Must be run from the directory containing `quantum` repo. NOTEBOOKS = glob.glob("quantum/docs/tutorials/*.ipynb") class ExamplesTest(tf.test.TestCase, parameterized.TestCase): @parameterized.parameters(NOTEBOOKS) def test_notebook(self, path): """Test that notebooks open/run correctly.""" nb = nbformat.read(path, as_version=4) # Scrub any magic from the notebook before running. for cell in nb.get("cells"): if cell['cell_type'] == 'code': src = cell['source'] # Comment out lines containing '!' but not '!=' src = re.sub(r'\!(?!=)', r'#!', src) # For mnist.ipynb to reduce runtime in test. src = re.sub('NUM_EXAMPLES ?= ?.*', 'NUM_EXAMPLES = 10', src) # For quantum_reinforcement_learning.ipynb to reduce runtime in test. src = re.sub('n_episodes ?= ?.*', 'n_episodes = 50', src) # For noise.ipynb to reduce runtime in test. src = re.sub('n_epochs ?= ?.*', 'n_epochs = 2', src) cell['source'] = src _ = nbconvert.preprocessors.execute.executenb(nb, timeout=900, kernel_name="python3") if __name__ == "__main__": tf.test.main()
src/backend/utils/isp.py
ddddhm1/LuWu
658
132458
<reponame>ddddhm1/LuWu import json import logging from abc import ABC from abc import abstractmethod from datetime import datetime from typing import List import digitalocean from aliyunsdkcore.acs_exception.exceptions import ClientException from aliyunsdkcore.acs_exception.exceptions import ServerException from aliyunsdkcore.client import AcsClient from aliyunsdkcore.request import RpcRequest from aliyunsdkecs.request.v20140526 import AuthorizeSecurityGroupEgressRequest from aliyunsdkecs.request.v20140526 import AuthorizeSecurityGroupRequest from aliyunsdkecs.request.v20140526 import CreateSecurityGroupRequest from aliyunsdkecs.request.v20140526 import DeleteInstanceRequest from aliyunsdkecs.request.v20140526 import DeleteKeyPairsRequest from aliyunsdkecs.request.v20140526 import DescribeAvailableResourceRequest from aliyunsdkecs.request.v20140526 import DescribeImagesRequest from aliyunsdkecs.request.v20140526 import \ DescribeImageSupportInstanceTypesRequest from aliyunsdkecs.request.v20140526 import DescribeInstanceTypesRequest from aliyunsdkecs.request.v20140526 import DescribeKeyPairsRequest from aliyunsdkecs.request.v20140526 import DescribeRegionsRequest from aliyunsdkecs.request.v20140526 import DescribeSecurityGroupsRequest from aliyunsdkecs.request.v20140526 import ImportKeyPairRequest from aliyunsdkecs.request.v20140526 import RebootInstanceRequest from aliyunsdkecs.request.v20140526 import StartInstanceRequest from aliyunsdkecs.request.v20140526 import StopInstanceRequest from namesilo.core import NameSilo from tencentcloud.common import credential as tc_credential from tencentcloud.common.exception.tencent_cloud_sdk_exception import \ TencentCloudSDKException from tencentcloud.cvm.v20170312 import cvm_client from tencentcloud.cvm.v20170312 import models as tc_models from vultr import Vultr from vultr import VultrError from schemas import vps as vps_schema from utils.terraform import Terraform from utils.tools import gen_ssh_key_fingerprint class BaseIsp(ABC): def __init__(self, api_token, *args, **kwargs): self.api_token = api_token # set isp obj self.isp = self.get_isp_obj(*args, **kwargs) @abstractmethod def get_isp_obj(self, *args, **kwargs): pass @property @abstractmethod def api_url(self): pass class BaseDomainIsp(BaseIsp): def __init__(self, api_token, *args, **kwargs): super().__init__(api_token, *args, **kwargs) @abstractmethod def get_isp_obj(self, *args, **kwargs): pass @abstractmethod def get_domain_info(self, domain_name): pass @abstractmethod def check_domain(self, domain_name): pass @abstractmethod def list_dns_records(self, domain_name): pass @abstractmethod def register_domain( self, domain_name: str, years=1, auto_renew=0, private=0 ) -> bool: pass @abstractmethod def set_dns_a_record(self, domain_name: str, ip: str) -> bool: pass class NameSiloIsp(BaseDomainIsp): def get_isp_obj(self, *args, **kwargs): return NameSilo(self.api_token, sandbox=kwargs.get("is_test")) def get_domain_info(self, domain_name): return self.isp.get_domain_info(domain_name) def check_domain(self, domain_name): return self.isp.check_domain(domain_name) def check_domain_raw(self, domain_name): url_extend = ( f"checkRegisterAvailability?version=1&type=xml&" f"key={self.api_token}&domains={domain_name}" ) parsed_content = self.isp._process_data(url_extend) return parsed_content.get("namesilo") def get_prices(self): return self.isp.get_prices() def list_dns_records(self, domain_name): """ List dns records for specified domain name :param domain_name: :return: current domain's dns record :rtype: List """ url_extend = "dnsListRecords?version=1&type=xml&key=%s&domain=%s" % ( self.api_token, domain_name, ) parsed_context = self.isp._process_data(url_extend) resource_record = parsed_context["namesilo"]["reply"]["resource_record"] result = [] if isinstance(resource_record, list): result.extend(resource_record) else: result.append(resource_record) return result def register_domain( self, domain_name: str, years=1, auto_renew=0, private=1 ) -> bool: return self.isp.register_domain(domain_name, years, auto_renew, private) def list_domain_dns_records(self, domain_name: str): url_extend = "dnsListRecords?version=1&type=xml&key=%s&domain=%s&rrtype=A" % ( self.api_token, domain_name, ) parsed_context = self.isp._process_data(url_extend) return parsed_context def set_dns_a_record(self, domain_name: str, ip: str) -> bool: url_extend = ( "dnsAddRecord?version=1&type=xml&key=%s&domain=%s&rrtype=A&rrvalue=%s" % (self.api_token, domain_name, ip) ) parsed_context = self.isp._process_data(url_extend) detail = parsed_context["namesilo"]["reply"].get("detail") return detail == "success" @property def api_url(self): return self.isp._base_url class BaseVpsIsp(BaseIsp): _DEFAULT_SECURITY_GROUP_NAME = "default_sec_group" @abstractmethod def get_isp_obj(self, *args, **kwargs): pass @abstractmethod def is_valid_account(self) -> bool: pass @abstractmethod def create_ssh_key(self, name: str, public_key_content: str) -> str: pass @abstractmethod def get_available_os_list(self): pass @abstractmethod def get_available_plans_list(self): pass @abstractmethod def get_available_regions_list(self): pass @abstractmethod def get_ssh_key_list(self, vps_isp_id: int) -> List[dict]: pass @abstractmethod def destroy_ssh_key(self, ssh_key_id): pass @abstractmethod def create_server( self, vps_profile: dict, public_key: str = None, *args, **kwargs ) -> dict: tf = Terraform() config = tf.gen_ali_config(vps_profile, self.api_token, public_key) state_data = tf.run_terraform_apply(config) return state_data @abstractmethod def start_server(self, server_id, *args, **kwargs) -> bool: pass @abstractmethod def reboot_server(self, server_id, *args, **kwargs) -> bool: pass @abstractmethod def shutdown_server(self, server_id, *args, **kwargs) -> bool: pass @abstractmethod def reinstall_server(self, server_id, *args, **kwargs) -> bool: pass @abstractmethod def destroy_server(self, server_id, *args, **kwargs) -> bool: pass class VultrIsp(BaseVpsIsp): def get_isp_obj(self, **kwargs: dict): return Vultr(self.api_token) def is_valid_account(self): valid = True try: self.isp.account.info() except Exception: valid = False finally: return valid def create_ssh_key(self, name: str, public_key_content: str) -> str: isp_ssh_key_data = self.isp.sshkey.create(name, public_key_content) return isp_ssh_key_data["SSHKEYID"] def get_available_os_list(self): os_list = self.isp.os.list() os_obj_list = [ vps_schema.VpsSpecOsSchema( os_code=os_data["OSID"], name=os_data["name"], ).dict() for os_data in os_list.values() ] return os_obj_list def get_available_plans_list(self): plan_query_params = {"type": "vc2"} plans_list = self.isp.plans.list(plan_query_params) plan_obj_list = [ vps_schema.VpsSpecPlanSchema( plan_code=plan_data["VPSPLANID"], name=plan_data["name"], vcpu=plan_data["vcpu_count"], ram=plan_data["ram"], disk=plan_data["disk"], bandwidth=plan_data["bandwidth"], price_monthly=plan_data["price_per_month"], region_codes=plan_data["available_locations"], ).dict() for plan_data in plans_list.values() ] return plan_obj_list def get_available_regions_list(self): region_query_params = {"availability": "yes"} regions_list = self.isp.regions.list(region_query_params) region_obj_list = [ vps_schema.VpsSpecRegionSchema( region_code=region_data["DCID"], name=f"{region_data['name']}({region_data['country']}, {region_data['continent']})", features=[ region_metric_key for region_metric_key, region_metric_value in region_data.items() if isinstance(region_metric_value, bool) ], ).dict() for region_data in regions_list.values() ] return region_obj_list def get_ssh_key_list(self, vps_isp_id: int): ssh_key_list = self.isp.sshkey.list() ssh_key_obj_list = [ dict( ssh_key_id=key, date_created=ssh_key_data["date_created"], name=ssh_key_data["name"], public_key=ssh_key_data["ssh_key"], isp_id=vps_isp_id, ) for key, ssh_key_data in ssh_key_list.items() ] return ssh_key_obj_list def destroy_ssh_key(self, ssh_key_id): return self.isp.sshkey.destroy(ssh_key_id) def create_server( self, vps_profile: dict, public_key: str = None, *args, **kwargs ) -> dict: tf = Terraform() config = tf.gen_vultr_config(vps_profile, self.api_token, public_key) state_data = tf.run_terraform_apply(config) return state_data def start_server(self, server_id, *args, **kwargs) -> bool: return self.isp.server.start(server_id) def reboot_server(self, server_id, *args, **kwargs) -> bool: return self.isp.server.reboot(server_id) def shutdown_server(self, server_id, *args, **kwargs) -> bool: return self.isp.server.halt(server_id) def reinstall_server(self, server_id, *args, **kwargs) -> bool: reinstall_params = {"hostname": kwargs["hostname"]} if kwargs else None return self.isp.server.reinstall(server_id, reinstall_params) def destroy_server(self, server_id, *args, **kwargs) -> bool: destroy_result = True try: self.isp.server.destroy(subid=server_id) except VultrError as vultr_error: logging.info(vultr_error) destroy_result = False return destroy_result @property def api_url(self): return self.isp.api_endpoint class DigitalOceanIsp(BaseVpsIsp): def get_isp_obj(self, **kwargs: dict): return digitalocean.Manager(token=self.api_token) def is_valid_account(self): valid = True try: self.isp.get_account() except Exception: valid = False finally: return valid def create_ssh_key(self, name: str, public_key_content: str) -> str: ssh_key_data = {"name": name, "public_key": public_key_content} ssh_key = digitalocean.SSHKey(token=self.api_token, **ssh_key_data) ssh_key.create() return ssh_key.id def get_available_os_list(self): os_obj_list = self.isp.get_images(type="distribution") os_dict_list = [ vps_schema.VpsSpecOsSchema( os_code=os.id, name=f"{os.distribution} {os.name}", region_codes=os.regions, ).dict() for os in os_obj_list ] return os_dict_list def get_available_plans_list(self): plan_obj_list = self.isp.get_all_sizes() plan_dict_list = [ vps_schema.VpsSpecPlanSchema( name=( f"{plan_obj.memory} MB RAM-{plan_obj.vcpus} CPU" f"-{plan_obj.disk} GB-{plan_obj.transfer} TB-{plan_obj.price_monthly}$" ), plan_code=plan_obj.slug, price_monthly=plan_obj.price_monthly, bandwidth=plan_obj.transfer, region_codes=plan_obj.regions, vcpu=plan_obj.vcpus, ram=plan_obj.memory, disk=plan_obj.disk, ).dict() for plan_obj in plan_obj_list ] return plan_dict_list def get_available_regions_list(self): region_obj_list = self.isp.get_all_regions() region_dict_list = [ vps_schema.VpsSpecRegionSchema( name=region.name, region_code=region.slug, plan_codes=region.sizes, features=region.features, ).dict() for region in region_obj_list ] return region_dict_list def get_ssh_key_list(self, vps_isp_id: int): ssh_key_list = self.isp.get_all_sshkeys() ssh_key_obj_list = [ dict( ssh_key_id=ssh_key.id, public_key=ssh_key.public_key, name=ssh_key.name, fingerprint=ssh_key.fingerprint, isp_id=vps_isp_id, ) for ssh_key in ssh_key_list ] return ssh_key_obj_list def destroy_ssh_key(self, ssh_key_id): return self.isp.get_ssh_key(ssh_key_id).destroy() def create_server( self, vps_profile: dict, public_key: str = None, *args, **kwargs ) -> dict: tf = Terraform() if public_key: try: public_key_fingerprint = gen_ssh_key_fingerprint(public_key) public_key_exists = digitalocean.SSHKey.get_object( api_token=self.api_token, ssh_key_id=public_key_fingerprint ) except digitalocean.baseapi.NotFoundError: pass else: if vps_profile["ssh_keys"]: vps_profile["ssh_keys"].append(public_key_exists.id) else: vps_profile["ssh_keys"] = [public_key_exists.id] public_key = None config = tf.gen_digital_ocean_config(vps_profile, self.api_token, public_key) state_data = tf.run_terraform_apply(config) return state_data def start_server(self, server_id, *args, **kwargs) -> bool: droplet = digitalocean.Droplet(token=self.api_token, id=server_id) return droplet.power_on() def reboot_server(self, server_id, *args, **kwargs) -> bool: droplet = digitalocean.Droplet(token=self.api_token, id=server_id) return droplet.reboot() def shutdown_server(self, server_id, *args, **kwargs) -> bool: droplet = digitalocean.Droplet(token=self.api_token, id=server_id) return droplet.power_off() def reinstall_server(self, server_id, *args, **kwargs) -> bool: droplet = digitalocean.Droplet(token=self.api_token, id=server_id) droplet.load() return droplet.rebuild() def destroy_server(self, server_id, *args, **kwargs) -> bool: droplet = digitalocean.Droplet(token=self.api_token, id=server_id) destroy_result = True try: droplet.destroy() except digitalocean.baseapi.NotFoundError as droplet_error: logging.info(droplet_error) destroy_result = False return destroy_result @property def api_url(self): return self.isp.end_point class TencentCloudIsp(BaseVpsIsp): def get_isp_obj(self, api_id, is_test, *args, **kwargs): return tc_credential.Credential(api_id, self.api_token) def is_valid_account(self) -> bool: try: self.get_available_regions_list() except TencentCloudSDKException: return False else: return True def create_ssh_key(self, name: str, public_key_content: str, region_code: str = None) -> str: key_id = None if region_code: client = cvm_client.CvmClient(self.isp, region_code) req = tc_models.ImportKeyPairRequest() req_config = json.dumps(dict(ProjectId=0, KeyName=name, PublicKey=public_key_content,)) req.from_json_string(req_config) resp = client.ImportKeyPair(req) key_id = resp.KeyId return key_id def get_available_os_list(self): os_map = {} os_dict_list = [] region_list = self.get_available_regions_list() for region in region_list: region_code = region["region_code"] os_list = self.get_available_region_os_list(region_code) for os in os_list: os_code = os.ImageId os_name = os.OsName if os_code in os_map: os_map[os_code]["region_codes"].add(region_code) else: os_data = dict( os_code=os_code, os_name=os_name, region_codes=set([region_code]), ) os_map[os_code] = os_data for os_data in os_map.values(): os_dict_list.append( vps_schema.VpsSpecOsSchema( os_code=os_data["os_code"], name=os_data["os_name"], region_codes=list(os_data["region_codes"]), ).dict() ) return os_dict_list def get_available_region_os_list(self, region_code: str) -> List: client = cvm_client.CvmClient(self.isp, region_code) req = tc_models.DescribeImagesRequest() req.from_json_string('{"Limit":100}') resp = client.DescribeImages(req) return resp.ImageSet def get_available_plans_list(self): plan_map = {} plan_dict_list = [] regions_list = self.get_available_regions_list() for region in regions_list: region_code = region["region_code"] plans_list = self.get_available_region_plans_list(region_code) for plan in plans_list: plan_code = plan.InstanceType if plan_code in plan_map: plan_map[plan_code]["region_codes"].add(region_code) else: if plan.Price.UnitPrice and plan.Price.ChargeUnit: price_monthly = f"{plan.Price.UnitPrice or plan.Price.OriginalPrice} * {plan.Price.ChargeUnit}" else: price_monthly = plan.Price.OriginalPrice plan_detail_name = ( f"{plan.TypeName}({plan.Memory} GB RAM-{plan.Cpu} 核CPU-" f"{plan.CpuType}-{price_monthly} CN¥)" ) plan_data = dict( plan_code=plan_code, name=plan_detail_name, vcpu=plan.Cpu, ram=plan.Memory, disk=0, bandwidth=plan.InstanceBandwidth, price_monthly=price_monthly, region_codes=set([region_code]), ) plan_map[plan_code] = plan_data for plan_data in plan_map.values(): plan_dict_list.append( vps_schema.VpsSpecPlanSchema( plan_code=plan_data["plan_code"], name=plan_data["name"], vcpu=plan_data["vcpu"], ram=plan_data["ram"], disk=plan_data["disk"], bandwidth=plan_data["bandwidth"], price_monthly=plan_data["price_monthly"], region_codes=list(plan_data["region_codes"]), ).dict() ) return plan_dict_list def get_available_region_plans_list(self, region_code: str) -> List: client = cvm_client.CvmClient(self.isp, region_code) req = tc_models.DescribeZoneInstanceConfigInfosRequest() params = '{\"Filters\":[{\"Name\":\"instance-charge-type\",\"Values\":[\"POSTPAID_BY_HOUR\"]}]}' req.from_json_string(params) try: resp = client.DescribeZoneInstanceConfigInfos(req) plan_list = resp.InstanceTypeQuotaSet except TencentCloudSDKException as e: logging.warning(f"get tencent region plan {region_code} with err: {e}") plan_list = [] return plan_list def get_available_regions_list(self): client = cvm_client.CvmClient(self.isp, None) req = tc_models.DescribeRegionsRequest() resp = client.DescribeRegions(req) region_dict_list = [ vps_schema.VpsSpecRegionSchema( name=region.RegionName, region_code=region.Region, plan_codes=[], features=None, ).dict() for region in resp.RegionSet ] return region_dict_list def get_region_zones_list(self, region_code: str) -> List[str]: client = cvm_client.CvmClient(self.isp, region_code) req = tc_models.DescribeZonesRequest() resp = client.DescribeZones(req) zones_list = [zone.ZoneId for zone in resp.ZoneSet] return zones_list def get_ssh_key_list(self, vps_isp_id: int, region_code: str = None) -> List[dict]: ssh_key_list = [] if not region_code: regions_list = self.get_available_regions_list() region_code = regions_list[0]["region_code"] if regions_list else None if region_code: client = cvm_client.CvmClient(self.isp, region_code) req = tc_models.DescribeKeyPairsRequest() req_config = json.dumps(dict(Limit=100,)) req.from_json_string(req_config) resp = client.DescribeKeyPairs(req) ssh_key_list = [ dict( ssh_key_id=key_pair.KeyId, date_created=key_pair.CreatedTime, name=key_pair.KeyName, public_key=key_pair.PublicKey, isp_id=vps_isp_id, ) for key_pair in resp.KeyPairSet ] return ssh_key_list def destroy_ssh_key(self, ssh_key_id): client = cvm_client.CvmClient(self.isp) req = tc_models.DeleteKeyPairsRequest() req_config = json.dumps(dict(KeyIds=[ssh_key_id],)) req.from_json_string(req_config) client.DeleteKeyPairs(req) return True def create_server( self, vps_profile: dict, public_key: str = None, *args, **kwargs ) -> dict: api_id = self.isp.secretId tf = Terraform() # attach ssh key if public_key: ssh_key_name = self.get_or_create_ssh_key_by_public_key( public_key, vps_profile["region_code"] ) else: ssh_key_name = None config = tf.gen_tencent_cloud_config(vps_profile, self.api_token, ssh_key_name, api_id) state_data = tf.run_terraform_apply(config) return state_data def get_or_create_ssh_key_by_public_key(self, public_key: str, region_id: str = None) -> str: ssh_key_name = None existed_ssh_key = self.get_ssh_key_list(region_id) for ssh_key in existed_ssh_key: clean_ssh_public_key = ssh_key["public_key"].replace(ssh_key["ssh_key_id"], '').strip() public_key_in_tx_format = ' '.join(public_key.split(" ")[:2]) print(ssh_key, clean_ssh_public_key, public_key_in_tx_format) if clean_ssh_public_key == public_key_in_tx_format: ssh_key_name = ssh_key["ssh_key_id"] break if not ssh_key_name: now_time = datetime.now() unix_timestamp = str(int(now_time.utcnow().timestamp())) ssh_key_name = self.create_ssh_key(unix_timestamp, public_key, region_id) return ssh_key_name def start_server(self, server_id, *args, **kwargs) -> bool: regions_list = self.get_available_regions_list() for region in regions_list: region_code = region["region_code"] client = cvm_client.CvmClient(self.isp, region_code) req = tc_models.StartInstancesRequest() req_config = json.dumps(dict(InstanceIds=[server_id],)) req.from_json_string(req_config) try: client.StartInstances(req) except Exception as e: logging.warning(e) started = False else: started = True break return started def reboot_server(self, server_id, *args, **kwargs) -> bool: regions_list = self.get_available_regions_list() for region in regions_list: region_code = region["region_code"] client = cvm_client.CvmClient(self.isp, region_code) req = tc_models.RebootInstancesRequest() req_config = json.dumps( dict(InstanceIds=[server_id], ForceReboot=True,) ) req.from_json_string(req_config) try: client.RebootInstances(req) except Exception as e: logging.warning(e) rebooted = False else: rebooted = True break return rebooted def shutdown_server(self, server_id, *args, **kwargs) -> bool: regions_list = self.get_available_regions_list() for region in regions_list: region_code = region["region_code"] client = cvm_client.CvmClient(self.isp, region_code) req = tc_models.StopInstancesRequest() req_config = json.dumps( dict(InstanceIds=[server_id], ForceStop=True,) ) req.from_json_string(req_config) try: client.StopInstances(req) except Exception as e: logging.warning(e) result = False else: result = True break return result def reinstall_server(self, server_id, *args, **kwargs) -> bool: regions_list = self.get_available_regions_list() for region in regions_list: region_code = region["region_code"] client = cvm_client.CvmClient(self.isp, region_code) req = tc_models.ResetInstanceRequest() req_config = json.dumps( dict( InstanceIds=[server_id], LoginSettings=dict(KeepImageLogin=True), EnhancedService=dict(SecurityService=False, MonitorService=False), ) ) req.from_json_string(req_config) try: client.ResetInstance(req) except Exception as e: logging.warning(e) result = False else: result = True break return result def destroy_server(self, server_id, *args, **kwargs) -> bool: regions_list = self.get_available_regions_list() for region in regions_list: region_code = region["region_code"] client = cvm_client.CvmClient(self.isp, region_code) req = tc_models.TerminateInstancesRequest() req_config = json.dumps(dict(InstanceIds=[server_id],)) req.from_json_string(req_config) try: client.TerminateInstances(req) except Exception as e: logging.warning(e) result = False else: result = True break return result @property def api_url(self): client = cvm_client.CvmClient(self.isp, None) return client._endpoint class AliyunIsp(BaseVpsIsp): _ECS_ENDPORINT = "ecs.aliyuncs.com" def get_isp_obj(self, api_id, is_test, *args, **kwargs): return AcsClient(api_id, self.api_token) def handle_request(self, request: RpcRequest, region_id: str = None) -> dict: if region_id: self.isp.set_region_id(region_id) try: response = self.isp.do_action_with_exception(request) except (ClientException, ServerException) as e: logging.warning(request) logging.warning(e) json_res = {} else: json_res = json.loads(response) return json_res def is_valid_account(self) -> bool: request = StartInstanceRequest.StartInstanceRequest() request.set_DryRun(True) valid = self.handle_request(request) if valid: return True else: return False def create_ssh_key(self, name: str, public_key_content: str) -> str: request = ImportKeyPairRequest.ImportKeyPairRequest() request.set_KeyPairName(name) request.set_PublicKeyBody(public_key_content) response = self.handle_request(request) return response.get("KeyPairName") def get_available_os_list(self): os_map = {} os_dict_list = [] region_list = self.get_available_regions_list() for region in region_list: region_code = region["region_code"] os_list = self.get_available_region_os_list(region_code) for os in os_list: os_code = os["ImageId"] os_name = os["OSName"] if os_code in os_map: os_map[os_code]["region_codes"].add(region_code) else: os_data = dict( os_code=os_code, os_name=os_name, region_codes=set([region_code]), plan_codes=self.get_support_isntance_plan_type(os_code), ) os_map[os_code] = os_data for os_data in os_map.values(): os_dict_list.append( vps_schema.VpsSpecOsSchema( os_code=os_data["os_code"], name=os_data["os_name"], region_codes=list(os_data["region_codes"]), plan_codes=os_data["plan_codes"], ).dict() ) return os_dict_list def get_support_isntance_plan_type(self, image_id: str) -> List: request = DescribeImageSupportInstanceTypesRequest.DescribeImageSupportInstanceTypesRequest() request.set_ImageId(image_id) response = self.handle_request(request) plan_list = [ image_type["InstanceTypeId"] for image_type in response['InstanceTypes']['InstanceType'] ] return plan_list def get_available_region_os_list(self, region_code: str) -> List: request = DescribeImagesRequest.DescribeImagesRequest() request.set_PageSize(100) request.set_ImageOwnerAlias("system") response = self.handle_request(request) os_list = response["Images"].get("Image", []) return os_list def get_available_plans_list(self): # TODO:query price when aliyun enable price query support request = DescribeInstanceTypesRequest.DescribeInstanceTypesRequest() response = self.handle_request(request) plan_list = [ vps_schema.VpsSpecPlanSchema( plan_code=plan["InstanceTypeId"], name=f"{plan['InstanceTypeFamily']}-{plan['MemorySize']}GB RAM-{plan['CpuCoreCount']} 核CPU", vcpu=plan["CpuCoreCount"], ram=plan["MemorySize"], disk=0, bandwidth=0, price_monthly=0, region_codes=[], ).dict() for plan in response["InstanceTypes"].get("InstanceType", []) ] return plan_list def get_available_region_plan_list(self, region_code: str) -> List: request = DescribeAvailableResourceRequest.DescribeAvailableResourceRequest() request.set_DestinationResource("InstanceType") response = self.handle_request(request) region_plan_list = [] for plan_data in response["AvailableZones"]["AvailableZone"]: for region_zone_plan in plan_data["AvailableResources"][ "AvailableResource" ]: region_zone_plan_list = [ region_zone_plan_data["Value"] for region_zone_plan_data in region_zone_plan["SupportedResources"][ "SupportedResource" ] if region_zone_plan_data["Status"] == "Available" ] region_plan_list.extend(region_zone_plan_list) region_plan_list = list(set(region_plan_list)) return region_plan_list def get_available_regions_list(self): request = DescribeRegionsRequest.DescribeRegionsRequest() response = self.handle_request(request) region_list = response["Regions"].get("Region", []) region_dict_list = [ vps_schema.VpsSpecRegionSchema( name=region_data["LocalName"], region_code=region_data["RegionId"], plan_codes=self.get_available_region_plan_list(region_data["RegionId"]), features=None, ).dict() for region_data in region_list if self.get_available_region_plan_list(region_data["RegionId"]) ] return region_dict_list def get_ssh_key_list(self, vps_isp_id: int) -> List[dict]: request = DescribeKeyPairsRequest.DescribeKeyPairsRequest() request.set_PageSize(50) response = self.handle_request(request) ssh_key_list = response["KeyPairs"].get("KeyPair", []) ssh_key_list = [ dict( fingerprint=key_pair["KeyPairFingerPrint"], date_created=key_pair["CreationTime"], name=key_pair["KeyPairName"], isp_id=vps_isp_id, ) for key_pair in ssh_key_list ] return ssh_key_list def destroy_ssh_key(self, ssh_key_id) -> bool: request = DeleteKeyPairsRequest.DeleteKeyPairsRequest() request.set_KeyPairNames([ssh_key_id]) response = self.handle_request(request) return bool(response) def create_server( self, vps_profile: dict, public_key: str = None, *args, **kwargs ) -> dict: # one ecs only binds to one ssh key pair tf = Terraform() ak = self.isp.get_access_key() # attach ssh key if public_key: ssh_key_name = self.get_or_create_ssh_key_by_public_key( public_key, vps_profile["region_code"] ) # get security groups # security_groups = self.get_or_create_default_security_group(vps_profile["region_code"]) config = tf.gen_ali_cloud_config(vps_profile, self.api_token, ssh_key_name, ak) state_data = tf.run_terraform_apply(config) return state_data def get_or_create_default_security_group(self, region_code: str) -> str: query_request = DescribeSecurityGroupsRequest.DescribeSecurityGroupsRequest() query_request.set_SecurityGroupName(self._DEFAULT_SECURITY_GROUP_NAME) query_response = self.handle_request(query_request, region_id=region_code) query_sec_groups = query_response["SecurityGroups"].get("SecurityGroup", []) sec_group_id = None if query_sec_groups: sec_group_id = query_sec_groups[0]["SecurityGroupId"] else: create_request = CreateSecurityGroupRequest.CreateSecurityGroupRequest() create_request.set_SecurityGroupName(self._DEFAULT_SECURITY_GROUP_NAME) create_response = self.handle_request(create_request, region_id=region_code) sec_group_id = create_response["SecurityGroupId"] ip_protocol = "all" port_range = "-1/-1" source_cidr_ip = "0.0.0.0/0" dest_cidr_ip = "0.0.0.0/0" policy = "accept" self.set_security_group_inner_rule( region_code=region_code, sec_group_id=sec_group_id, source_cidr_ip=source_cidr_ip, ip_protocol=ip_protocol, port_range=port_range, nic_type="internet", policy=policy, ) self.set_security_group_outer_rule( region_code=region_code, sec_group_id=sec_group_id, dest_cidr_ip=dest_cidr_ip, ip_protocol=ip_protocol, port_range=port_range, nic_type="internet", policy=policy, ) return sec_group_id def set_security_group_inner_rule(self, region_code: str, sec_group_id: str, source_cidr_ip: str, ip_protocol: str, port_range: str, nic_type: str, policy: str): request = AuthorizeSecurityGroupRequest.AuthorizeSecurityGroupRequest() request.set_SecurityGroupId(sec_group_id) request.set_SourceCidrIp(source_cidr_ip) request.set_IpProtocol(ip_protocol) request.set_PortRange(port_range) request.set_NicType(nic_type) request.set_Policy(policy) self.handle_request(request, region_id=region_code) def set_security_group_outer_rule(self, region_code: str, sec_group_id: str, dest_cidr_ip: str, ip_protocol: str, port_range: str, nic_type: str, policy: str): request = AuthorizeSecurityGroupEgressRequest.AuthorizeSecurityGroupEgressRequest() request.set_SecurityGroupId(sec_group_id) request.set_DestCidrIp(dest_cidr_ip) request.set_IpProtocol(ip_protocol) request.set_PortRange(port_range) request.set_NicType(nic_type) request.set_Policy(policy) self.handle_request(request, region_id=region_code) def get_or_create_ssh_key_by_public_key(self, public_key: str, region_id: str = None) -> str: public_key_md5 = gen_ssh_key_fingerprint(public_key).replace(":", "") request = DescribeKeyPairsRequest.DescribeKeyPairsRequest() request.set_KeyPairFingerPrint(public_key_md5) response = self.handle_request(request, region_id=region_id) ssh_keys = response["KeyPairs"].get("KeyPair", []) ssh_key_name = None if ssh_keys: ssh_key_name = ssh_keys[0]["KeyPairName"] if not ssh_key_name: now_time = datetime.now() unix_timestamp = f"luwu_{int(now_time.utcnow().timestamp())}" ssh_key_name = self.create_ssh_key(unix_timestamp, public_key) return ssh_key_name def start_server(self, server_id, *args, **kwargs) -> bool: request = StartInstanceRequest.StartInstanceRequest() request.set_InstanceId(server_id) response = self.handle_request(request) return bool(response) def reboot_server(self, server_id, *args, **kwargs) -> bool: request = RebootInstanceRequest.RebootInstanceRequest() request.set_InstanceId(server_id) request.set_ForceStop(True) response = self.handle_request(request) return bool(response) def shutdown_server(self, server_id, *args, **kwargs) -> bool: request = StopInstanceRequest.StopInstanceRequest() request.set_InstanceId(server_id) request.set_ConfirmStop(True) response = self.handle_request(request) return bool(response) def reinstall_server(self, server_id, *args, **kwargs) -> bool: # unsupported return False def destroy_server(self, server_id, *args, **kwargs) -> bool: request = DeleteInstanceRequest.DeleteInstanceRequest() request.set_InstanceId(server_id) request.set_Force(True) request.set_TerminateSubscription(True) response = self.handle_request(request) return bool(response) @property def api_url(self): return self._ECS_ENDPORINT class AlibabaCloudIsp(AliyunIsp): def get_isp_obj(self, api_id, is_test, *args, **kwargs): return AcsClient(api_id, self.api_token)
nuplan/planning/script/builders/metric_runner_builder.py
motional/nuplan-devkit
128
132459
import logging from typing import List from omegaconf import DictConfig from nuplan.planning.script.builders.metric_builder import build_metrics_engines from nuplan.planning.simulation.callback.metric_callback import MetricCallback from nuplan.planning.simulation.runner.metric_runner import MetricRunner from nuplan.planning.simulation.simulation_log import SimulationLog logger = logging.getLogger(__name__) def build_metric_runners(cfg: DictConfig, simulation_logs: List[SimulationLog]) -> List[MetricRunner]: """ Build metric runners. :param cfg: DictConfig. Configuration that is used to run the experiment. :param simulation_logs: A list of simulation logs. :return A list of metric runners. """ logger.info('Building metric runners...') # Create a list of metric runners metric_runners = list() # Build a list of scenarios logger.info('Extracting scenarios...') scenarios = [simulation_log.scenario for simulation_log in simulation_logs] logger.info('Extracting scenarios...DONE!') logger.info('Building metric engines...') metric_engines_map = build_metrics_engines(cfg=cfg, scenarios=scenarios) logger.info('Building metric engines...DONE') logger.info(f'Building metric_runner from {len(scenarios)} scenarios...') for simulation_log in simulation_logs: scenario = simulation_log.scenario metric_engine = metric_engines_map.get(scenario.scenario_type, None) if not metric_engine: raise ValueError(f'{scenario.scenario_type} not found in a metric engine.') if not simulation_log: raise ValueError(f'{scenario.scenario_name} not found in simulation logs.') metric_callback = MetricCallback(metric_engine=metric_engine) metric_runner = MetricRunner(simulation_log=simulation_log, metric_callback=metric_callback) metric_runners.append(metric_runner) logger.info('Building metric runners...DONE!') return metric_runners
train.py
jayagupta678/DeepEvolve
171
132508
<gh_stars>100-1000 """ Generic setup of the data sources and the model training. Based on: https://github.com/fchollet/keras/blob/master/examples/mnist_mlp.py and also on https://github.com/fchollet/keras/blob/master/examples/mnist_cnn.py """ #import keras from keras.datasets import mnist, cifar10 from keras.models import Sequential from keras.layers import Dense, Dropout, Flatten from keras.utils.np_utils import to_categorical from keras.callbacks import EarlyStopping, Callback from keras.layers import Conv2D, MaxPooling2D from keras import backend as K import logging # Helper: Early stopping. early_stopper = EarlyStopping( monitor='val_loss', min_delta=0.1, patience=2, verbose=0, mode='auto' ) #patience=5) #monitor='val_loss',patience=2,verbose=0 #In your case, you can see that your training loss is not dropping - which means you are learning nothing after each epoch. #It look like there's nothing to learn in this model, aside from some trivial linear-like fit or cutoff value. def get_cifar10_mlp(): """Retrieve the CIFAR dataset and process the data.""" # Set defaults. nb_classes = 10 #dataset dependent batch_size = 64 epochs = 4 input_shape = (3072,) #because it's RGB # Get the data. (x_train, y_train), (x_test, y_test) = cifar10.load_data() x_train = x_train.reshape(50000, 3072) x_test = x_test.reshape(10000, 3072) x_train = x_train.astype('float32') x_test = x_test.astype('float32') x_train /= 255 x_test /= 255 # convert class vectors to binary class matrices y_train = to_categorical(y_train, nb_classes) y_test = to_categorical(y_test, nb_classes) return (nb_classes, batch_size, input_shape, x_train, x_test, y_train, y_test, epochs) def get_cifar10_cnn(): """Retrieve the MNIST dataset and process the data.""" # Set defaults. nb_classes = 10 #dataset dependent batch_size = 128 epochs = 4 # the data, shuffled and split between train and test sets (x_train, y_train), (x_test, y_test) = cifar10.load_data() # convert class vectors to binary class matrices y_train = to_categorical(y_train, nb_classes) y_test = to_categorical(y_test, nb_classes) #x._train shape: (50000, 32, 32, 3) #input shape (32, 32, 3) input_shape = x_train.shape[1:] #print('x_train shape:', x_train.shape) #print(x_train.shape[0], 'train samples') #print(x_test.shape[0], 'test samples') #print('input shape', input_shape) x_train = x_train.astype('float32') x_test = x_test.astype('float32') x_train /= 255 x_test /= 255 return (nb_classes, batch_size, input_shape, x_train, x_test, y_train, y_test, epochs) def get_mnist_mlp(): """Retrieve the MNIST dataset and process the data.""" # Set defaults. nb_classes = 10 #dataset dependent batch_size = 64 epochs = 4 input_shape = (784,) # Get the data. (x_train, y_train), (x_test, y_test) = mnist.load_data() x_train = x_train.reshape(60000, 784) x_test = x_test.reshape(10000, 784) x_train = x_train.astype('float32') x_test = x_test.astype('float32') x_train /= 255 x_test /= 255 # convert class vectors to binary class matrices y_train = to_categorical(y_train, nb_classes) y_test = to_categorical(y_test, nb_classes) return (nb_classes, batch_size, input_shape, x_train, x_test, y_train, y_test, epochs) def get_mnist_cnn(): """Retrieve the MNIST dataset and process the data.""" # Set defaults. nb_classes = 10 #dataset dependent batch_size = 128 epochs = 4 # Input image dimensions img_rows, img_cols = 28, 28 # Get the data. # the data, shuffled and split between train and test sets (x_train, y_train), (x_test, y_test) = mnist.load_data() if K.image_data_format() == 'channels_first': x_train = x_train.reshape(x_train.shape[0], 1, img_rows, img_cols) x_test = x_test.reshape(x_test.shape[0], 1, img_rows, img_cols) input_shape = (1, img_rows, img_cols) else: x_train = x_train.reshape(x_train.shape[0], img_rows, img_cols, 1) x_test = x_test.reshape(x_test.shape[0], img_rows, img_cols, 1) input_shape = (img_rows, img_cols, 1) #x_train = x_train.reshape(60000, 784) #x_test = x_test.reshape(10000, 784) x_train = x_train.astype('float32') x_test = x_test.astype('float32') x_train /= 255 x_test /= 255 #print('x_train shape:', x_train.shape) #print(x_train.shape[0], 'train samples') #print(x_test.shape[0], 'test samples') # convert class vectors to binary class matrices y_train = to_categorical(y_train, nb_classes) y_test = to_categorical(y_test, nb_classes) # convert class vectors to binary class matrices #y_train = keras.utils.to_categorical(y_train, nb_classes) #y_test = keras.utils.to_categorical(y_test, nb_classes) return (nb_classes, batch_size, input_shape, x_train, x_test, y_train, y_test, epochs) def compile_model_mlp(genome, nb_classes, input_shape): """Compile a sequential model. Args: network (dict): the parameters of the network Returns: a compiled network. """ # Get our network parameters. nb_layers = genome.geneparam['nb_layers' ] nb_neurons = genome.nb_neurons() activation = genome.geneparam['activation'] optimizer = genome.geneparam['optimizer' ] logging.info("Architecture:%s,%s,%s,%d" % (str(nb_neurons), activation, optimizer, nb_layers)) model = Sequential() # Add each layer. for i in range(nb_layers): # Need input shape for first layer. if i == 0: model.add(Dense(nb_neurons[i], activation=activation, input_shape=input_shape)) else: model.add(Dense(nb_neurons[i], activation=activation)) model.add(Dropout(0.2)) # hard-coded dropout for each layer # Output layer. model.add(Dense(nb_classes, activation='softmax')) model.compile(loss='categorical_crossentropy', optimizer=optimizer, metrics=['accuracy']) return model def compile_model_cnn(genome, nb_classes, input_shape): """Compile a sequential model. Args: genome (dict): the parameters of the genome Returns: a compiled network. """ # Get our network parameters. nb_layers = genome.geneparam['nb_layers' ] nb_neurons = genome.nb_neurons() activation = genome.geneparam['activation'] optimizer = genome.geneparam['optimizer' ] logging.info("Architecture:%s,%s,%s,%d" % (str(nb_neurons), activation, optimizer, nb_layers)) model = Sequential() # Add each layer. for i in range(0,nb_layers): # Need input shape for first layer. if i == 0: model.add(Conv2D(nb_neurons[i], kernel_size = (3, 3), activation = activation, padding='same', input_shape = input_shape)) else: model.add(Conv2D(nb_neurons[i], kernel_size = (3, 3), activation = activation)) if i < 2: #otherwise we hit zero model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Dropout(0.2)) model.add(Flatten()) # always use last nb_neurons value for dense layer model.add(Dense(nb_neurons[len(nb_neurons) - 1], activation = activation)) model.add(Dropout(0.5)) model.add(Dense(nb_classes, activation = 'softmax')) #BAYESIAN CONVOLUTIONAL NEURAL NETWORKS WITH BERNOULLI APPROXIMATE VARIATIONAL INFERENCE #need to read this paper model.compile(loss='categorical_crossentropy', optimizer=optimizer, metrics=['accuracy']) return model class LossHistory(Callback): def on_train_begin(self, logs={}): self.losses = [] def on_batch_end(self, batch, logs={}): self.losses.append(logs.get('loss')) def train_and_score(genome, dataset): """Train the model, return test loss. Args: network (dict): the parameters of the network dataset (str): Dataset to use for training/evaluating """ logging.info("Getting Keras datasets") if dataset == 'cifar10_mlp': nb_classes, batch_size, input_shape, x_train, x_test, y_train, y_test, epochs = get_cifar10_mlp() elif dataset == 'cifar10_cnn': nb_classes, batch_size, input_shape, x_train, x_test, y_train, y_test, epochs = get_cifar10_cnn() elif dataset == 'mnist_mlp': nb_classes, batch_size, input_shape, x_train, x_test, y_train, y_test, epochs = get_mnist_mlp() elif dataset == 'mnist_cnn': nb_classes, batch_size, input_shape, x_train, x_test, y_train, y_test, epochs = get_mnist_cnn() logging.info("Compling Keras model") if dataset == 'cifar10_mlp': model = compile_model_mlp(genome, nb_classes, input_shape) elif dataset == 'cifar10_cnn': model = compile_model_cnn(genome, nb_classes, input_shape) elif dataset == 'mnist_mlp': model = compile_model_mlp(genome, nb_classes, input_shape) elif dataset == 'mnist_cnn': model = compile_model_cnn(genome, nb_classes, input_shape) history = LossHistory() model.fit(x_train, y_train, batch_size=batch_size, epochs=epochs, # using early stopping so no real limit - don't want to waste time on horrible architectures verbose=1, validation_data=(x_test, y_test), #callbacks=[history]) callbacks=[early_stopper]) score = model.evaluate(x_test, y_test, verbose=0) print('Test loss:', score[0]) print('Test accuracy:', score[1]) K.clear_session() #we do not care about keeping any of this in memory - #we just need to know the final scores and the architecture return score[1] # 1 is accuracy. 0 is loss.
Lib/test/test_compiler/testcorpus/22_func_arg.py
diogommartins/cinder
1,886
132536
<gh_stars>1000+ def foo(a, b): a + b
src/oci/operator_access_control/models/operator_control_assignment_summary.py
Manny27nyc/oci-python-sdk
249
132547
# coding: utf-8 # Copyright (c) 2016, 2021, Oracle and/or its affiliates. All rights reserved. # This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license. from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401 from oci.decorators import init_model_state_from_kwargs @init_model_state_from_kwargs class OperatorControlAssignmentSummary(object): """ Details of the operator control assignment. """ #: A constant which can be used with the lifecycle_state property of a OperatorControlAssignmentSummary. #: This constant has a value of "CREATED" LIFECYCLE_STATE_CREATED = "CREATED" #: A constant which can be used with the lifecycle_state property of a OperatorControlAssignmentSummary. #: This constant has a value of "APPLIED" LIFECYCLE_STATE_APPLIED = "APPLIED" #: A constant which can be used with the lifecycle_state property of a OperatorControlAssignmentSummary. #: This constant has a value of "APPLYFAILED" LIFECYCLE_STATE_APPLYFAILED = "APPLYFAILED" #: A constant which can be used with the lifecycle_state property of a OperatorControlAssignmentSummary. #: This constant has a value of "DELETED" LIFECYCLE_STATE_DELETED = "DELETED" def __init__(self, **kwargs): """ Initializes a new OperatorControlAssignmentSummary object with values from keyword arguments. The following keyword arguments are supported (corresponding to the getters/setters of this class): :param id: The value to assign to the id property of this OperatorControlAssignmentSummary. :type id: str :param operator_control_id: The value to assign to the operator_control_id property of this OperatorControlAssignmentSummary. :type operator_control_id: str :param resource_id: The value to assign to the resource_id property of this OperatorControlAssignmentSummary. :type resource_id: str :param resource_type: The value to assign to the resource_type property of this OperatorControlAssignmentSummary. :type resource_type: str :param compartment_id: The value to assign to the compartment_id property of this OperatorControlAssignmentSummary. :type compartment_id: str :param time_assignment_from: The value to assign to the time_assignment_from property of this OperatorControlAssignmentSummary. :type time_assignment_from: datetime :param time_assignment_to: The value to assign to the time_assignment_to property of this OperatorControlAssignmentSummary. :type time_assignment_to: datetime :param is_enforced_always: The value to assign to the is_enforced_always property of this OperatorControlAssignmentSummary. :type is_enforced_always: bool :param time_of_assignment: The value to assign to the time_of_assignment property of this OperatorControlAssignmentSummary. :type time_of_assignment: datetime :param lifecycle_state: The value to assign to the lifecycle_state property of this OperatorControlAssignmentSummary. Allowed values for this property are: "CREATED", "APPLIED", "APPLYFAILED", "DELETED", 'UNKNOWN_ENUM_VALUE'. Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'. :type lifecycle_state: str :param freeform_tags: The value to assign to the freeform_tags property of this OperatorControlAssignmentSummary. :type freeform_tags: dict(str, str) :param defined_tags: The value to assign to the defined_tags property of this OperatorControlAssignmentSummary. :type defined_tags: dict(str, dict(str, object)) """ self.swagger_types = { 'id': 'str', 'operator_control_id': 'str', 'resource_id': 'str', 'resource_type': 'str', 'compartment_id': 'str', 'time_assignment_from': 'datetime', 'time_assignment_to': 'datetime', 'is_enforced_always': 'bool', 'time_of_assignment': 'datetime', 'lifecycle_state': 'str', 'freeform_tags': 'dict(str, str)', 'defined_tags': 'dict(str, dict(str, object))' } self.attribute_map = { 'id': 'id', 'operator_control_id': 'operatorControlId', 'resource_id': 'resourceId', 'resource_type': 'resourceType', 'compartment_id': 'compartmentId', 'time_assignment_from': 'timeAssignmentFrom', 'time_assignment_to': 'timeAssignmentTo', 'is_enforced_always': 'isEnforcedAlways', 'time_of_assignment': 'timeOfAssignment', 'lifecycle_state': 'lifecycleState', 'freeform_tags': 'freeformTags', 'defined_tags': 'definedTags' } self._id = None self._operator_control_id = None self._resource_id = None self._resource_type = None self._compartment_id = None self._time_assignment_from = None self._time_assignment_to = None self._is_enforced_always = None self._time_of_assignment = None self._lifecycle_state = None self._freeform_tags = None self._defined_tags = None @property def id(self): """ **[Required]** Gets the id of this OperatorControlAssignmentSummary. The OCID of the operator control assignment. :return: The id of this OperatorControlAssignmentSummary. :rtype: str """ return self._id @id.setter def id(self, id): """ Sets the id of this OperatorControlAssignmentSummary. The OCID of the operator control assignment. :param id: The id of this OperatorControlAssignmentSummary. :type: str """ self._id = id @property def operator_control_id(self): """ **[Required]** Gets the operator_control_id of this OperatorControlAssignmentSummary. The OCID of the operator control. :return: The operator_control_id of this OperatorControlAssignmentSummary. :rtype: str """ return self._operator_control_id @operator_control_id.setter def operator_control_id(self, operator_control_id): """ Sets the operator_control_id of this OperatorControlAssignmentSummary. The OCID of the operator control. :param operator_control_id: The operator_control_id of this OperatorControlAssignmentSummary. :type: str """ self._operator_control_id = operator_control_id @property def resource_id(self): """ **[Required]** Gets the resource_id of this OperatorControlAssignmentSummary. The OCID of the target resource being governed by the operator control. :return: The resource_id of this OperatorControlAssignmentSummary. :rtype: str """ return self._resource_id @resource_id.setter def resource_id(self, resource_id): """ Sets the resource_id of this OperatorControlAssignmentSummary. The OCID of the target resource being governed by the operator control. :param resource_id: The resource_id of this OperatorControlAssignmentSummary. :type: str """ self._resource_id = resource_id @property def resource_type(self): """ Gets the resource_type of this OperatorControlAssignmentSummary. Type of the target resource being governed by the operator control. :return: The resource_type of this OperatorControlAssignmentSummary. :rtype: str """ return self._resource_type @resource_type.setter def resource_type(self, resource_type): """ Sets the resource_type of this OperatorControlAssignmentSummary. Type of the target resource being governed by the operator control. :param resource_type: The resource_type of this OperatorControlAssignmentSummary. :type: str """ self._resource_type = resource_type @property def compartment_id(self): """ **[Required]** Gets the compartment_id of this OperatorControlAssignmentSummary. The OCID of the compartment that contains the operator control assignment. :return: The compartment_id of this OperatorControlAssignmentSummary. :rtype: str """ return self._compartment_id @compartment_id.setter def compartment_id(self, compartment_id): """ Sets the compartment_id of this OperatorControlAssignmentSummary. The OCID of the compartment that contains the operator control assignment. :param compartment_id: The compartment_id of this OperatorControlAssignmentSummary. :type: str """ self._compartment_id = compartment_id @property def time_assignment_from(self): """ Gets the time_assignment_from of this OperatorControlAssignmentSummary. The time at which the target resource will be brought under the governance of the operator control in `RFC 3339`__ timestamp format. Example: '2020-05-22T21:10:29.600Z' __ https://tools.ietf.org/html/rfc3339 :return: The time_assignment_from of this OperatorControlAssignmentSummary. :rtype: datetime """ return self._time_assignment_from @time_assignment_from.setter def time_assignment_from(self, time_assignment_from): """ Sets the time_assignment_from of this OperatorControlAssignmentSummary. The time at which the target resource will be brought under the governance of the operator control in `RFC 3339`__ timestamp format. Example: '2020-05-22T21:10:29.600Z' __ https://tools.ietf.org/html/rfc3339 :param time_assignment_from: The time_assignment_from of this OperatorControlAssignmentSummary. :type: datetime """ self._time_assignment_from = time_assignment_from @property def time_assignment_to(self): """ Gets the time_assignment_to of this OperatorControlAssignmentSummary. The time at which the target resource will leave the governance of the operator control in `RFC 3339`__timestamp format.Example: '2020-05-22T21:10:29.600Z' __ https://tools.ietf.org/html/rfc3339 :return: The time_assignment_to of this OperatorControlAssignmentSummary. :rtype: datetime """ return self._time_assignment_to @time_assignment_to.setter def time_assignment_to(self, time_assignment_to): """ Sets the time_assignment_to of this OperatorControlAssignmentSummary. The time at which the target resource will leave the governance of the operator control in `RFC 3339`__timestamp format.Example: '2020-05-22T21:10:29.600Z' __ https://tools.ietf.org/html/rfc3339 :param time_assignment_to: The time_assignment_to of this OperatorControlAssignmentSummary. :type: datetime """ self._time_assignment_to = time_assignment_to @property def is_enforced_always(self): """ Gets the is_enforced_always of this OperatorControlAssignmentSummary. If true, then the target resource is always governed by the operator control. Otherwise governance is time-based as specified by timeAssignmentTo and timeAssignmentFrom. :return: The is_enforced_always of this OperatorControlAssignmentSummary. :rtype: bool """ return self._is_enforced_always @is_enforced_always.setter def is_enforced_always(self, is_enforced_always): """ Sets the is_enforced_always of this OperatorControlAssignmentSummary. If true, then the target resource is always governed by the operator control. Otherwise governance is time-based as specified by timeAssignmentTo and timeAssignmentFrom. :param is_enforced_always: The is_enforced_always of this OperatorControlAssignmentSummary. :type: bool """ self._is_enforced_always = is_enforced_always @property def time_of_assignment(self): """ Gets the time_of_assignment of this OperatorControlAssignmentSummary. Time when the operator control assignment is created in `RFC 3339`__ timestamp format. Example: '2020-05-22T21:10:29.600Z' __ https://tools.ietf.org/html/rfc3339 :return: The time_of_assignment of this OperatorControlAssignmentSummary. :rtype: datetime """ return self._time_of_assignment @time_of_assignment.setter def time_of_assignment(self, time_of_assignment): """ Sets the time_of_assignment of this OperatorControlAssignmentSummary. Time when the operator control assignment is created in `RFC 3339`__ timestamp format. Example: '2020-05-22T21:10:29.600Z' __ https://tools.ietf.org/html/rfc3339 :param time_of_assignment: The time_of_assignment of this OperatorControlAssignmentSummary. :type: datetime """ self._time_of_assignment = time_of_assignment @property def lifecycle_state(self): """ Gets the lifecycle_state of this OperatorControlAssignmentSummary. The current lifcycle state of the OperatorControl. Allowed values for this property are: "CREATED", "APPLIED", "APPLYFAILED", "DELETED", 'UNKNOWN_ENUM_VALUE'. Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'. :return: The lifecycle_state of this OperatorControlAssignmentSummary. :rtype: str """ return self._lifecycle_state @lifecycle_state.setter def lifecycle_state(self, lifecycle_state): """ Sets the lifecycle_state of this OperatorControlAssignmentSummary. The current lifcycle state of the OperatorControl. :param lifecycle_state: The lifecycle_state of this OperatorControlAssignmentSummary. :type: str """ allowed_values = ["CREATED", "APPLIED", "APPLYFAILED", "DELETED"] if not value_allowed_none_or_none_sentinel(lifecycle_state, allowed_values): lifecycle_state = 'UNKNOWN_ENUM_VALUE' self._lifecycle_state = lifecycle_state @property def freeform_tags(self): """ Gets the freeform_tags of this OperatorControlAssignmentSummary. Simple key-value pair that is applied without any predefined name, type or scope. Exists for cross-compatibility only. :return: The freeform_tags of this OperatorControlAssignmentSummary. :rtype: dict(str, str) """ return self._freeform_tags @freeform_tags.setter def freeform_tags(self, freeform_tags): """ Sets the freeform_tags of this OperatorControlAssignmentSummary. Simple key-value pair that is applied without any predefined name, type or scope. Exists for cross-compatibility only. :param freeform_tags: The freeform_tags of this OperatorControlAssignmentSummary. :type: dict(str, str) """ self._freeform_tags = freeform_tags @property def defined_tags(self): """ Gets the defined_tags of this OperatorControlAssignmentSummary. Defined tags for this resource. Each key is predefined and scoped to a namespace. :return: The defined_tags of this OperatorControlAssignmentSummary. :rtype: dict(str, dict(str, object)) """ return self._defined_tags @defined_tags.setter def defined_tags(self, defined_tags): """ Sets the defined_tags of this OperatorControlAssignmentSummary. Defined tags for this resource. Each key is predefined and scoped to a namespace. :param defined_tags: The defined_tags of this OperatorControlAssignmentSummary. :type: dict(str, dict(str, object)) """ self._defined_tags = defined_tags def __repr__(self): return formatted_flat_dict(self) def __eq__(self, other): if other is None: return False return self.__dict__ == other.__dict__ def __ne__(self, other): return not self == other
luminoth/models/ssd/proposal.py
jsdussanc/luminoth
2,584
132557
import sonnet as snt import tensorflow as tf from luminoth.utils.bbox_transform_tf import decode, clip_boxes, change_order class SSDProposal(snt.AbstractModule): """Transforms anchors and SSD predictions into object proposals. Using the fixed anchors and the SSD predictions for both classification and regression (adjusting the bounding box), we return a list of proposals with assigned class. In the process it tries to remove duplicated suggestions by applying non maximum suppresion (NMS). We apply NMS because the way object detectors are usually scored is by treating duplicated detections (multiple detections that overlap the same ground truth value) as false positive. It is resonable to assume that there may exist such case that applying NMS is completely unnecesary. Besides applying NMS it also filters the top N results, both for classes and in general. These values are easily modifiable in the configuration files. """ def __init__(self, num_classes, config, variances, name='proposal_layer'): super(SSDProposal, self).__init__(name=name) self._num_classes = num_classes # Threshold to use for NMS. self._class_nms_threshold = config.class_nms_threshold # Max number of proposals detections per class. self._class_max_detections = config.class_max_detections # Maximum number of detections to return. self._total_max_detections = config.total_max_detections self._min_prob_threshold = config.min_prob_threshold or 0.0 self._filter_outside_anchors = config.filter_outside_anchors self._variances = variances def _build(self, cls_prob, loc_pred, all_anchors, im_shape): """ Args: cls_prob: A softmax probability for each anchor where the idx = 0 is the background class (which we should ignore). Shape (total_anchors, num_classes + 1) loc_pred: A Tensor with the regression output for each anchor. Its shape should be (total_anchors, 4). all_anchors: A Tensor with the anchors bounding boxes of shape (total_anchors, 4), having (x_min, y_min, x_max, y_max) for each anchor. im_shape: A Tensor with the image shape in format (height, width). Returns: prediction_dict with the following keys: raw_proposals: The raw proposals i.e. the anchors adjusted using loc_pred. proposals: The proposals of the network after appling some filters like negative area; and NMS. It's shape is (final_num_proposals, 4), where final_num_proposals is unknown before-hand (it depends on NMS). The 4-length Tensor for each corresponds to: (x_min, y_min, x_max, y_max). proposal_label: It's shape is (final_num_proposals,) proposal_label_prob: It's shape is (final_num_proposals,) """ selected_boxes = [] selected_probs = [] selected_labels = [] selected_anchors = [] # For debugging for class_id in range(self._num_classes): # Get the confidences for this class (+ 1 is to ignore background) class_cls_prob = cls_prob[:, class_id + 1] # Filter by min_prob_threshold min_prob_filter = tf.greater_equal( class_cls_prob, self._min_prob_threshold) class_cls_prob = tf.boolean_mask(class_cls_prob, min_prob_filter) class_loc_pred = tf.boolean_mask(loc_pred, min_prob_filter) anchors = tf.boolean_mask(all_anchors, min_prob_filter) # Using the loc_pred and the anchors, we generate the proposals. raw_proposals = decode(anchors, class_loc_pred, self._variances) # Clip boxes to image. clipped_proposals = clip_boxes(raw_proposals, im_shape) # Filter proposals that have an non-valid area. (x_min, y_min, x_max, y_max) = tf.unstack( clipped_proposals, axis=1) proposal_filter = tf.greater( tf.maximum(x_max - x_min, 0.) * tf.maximum(y_max - y_min, 0.), 0. ) class_proposals = tf.boolean_mask( clipped_proposals, proposal_filter) class_loc_pred = tf.boolean_mask( class_loc_pred, proposal_filter) class_cls_prob = tf.boolean_mask( class_cls_prob, proposal_filter) proposal_anchors = tf.boolean_mask( anchors, proposal_filter) # Log results of filtering non-valid area proposals total_anchors = tf.shape(all_anchors)[0] total_proposals = tf.shape(class_proposals)[0] total_raw_proposals = tf.shape(raw_proposals)[0] tf.summary.scalar( 'invalid_proposals', total_proposals - total_raw_proposals, ['ssd'] ) tf.summary.scalar( 'valid_proposals_ratio', tf.cast(total_anchors, tf.float32) / tf.cast(total_proposals, tf.float32), ['ssd'] ) # We have to use the TensorFlow's bounding box convention to use # the included function for NMS. # After gathering results we should normalize it back. class_proposal_tf = change_order(class_proposals) # Apply class NMS. class_selected_idx = tf.image.non_max_suppression( class_proposal_tf, class_cls_prob, self._class_max_detections, iou_threshold=self._class_nms_threshold ) # Using NMS resulting indices, gather values from Tensors. class_proposal_tf = tf.gather( class_proposal_tf, class_selected_idx) class_cls_prob = tf.gather(class_cls_prob, class_selected_idx) # We append values to a regular list which will later be # transformed to a proper Tensor. selected_boxes.append(class_proposal_tf) selected_probs.append(class_cls_prob) # In the case of the class_id, since it is a loop on classes, we # already have a fixed class_id. We use `tf.tile` to create that # Tensor with the total number of indices returned by the NMS. selected_labels.append( tf.tile([class_id], [tf.shape(class_selected_idx)[0]]) ) selected_anchors.append(proposal_anchors) # We use concat (axis=0) to generate a Tensor where the rows are # stacked on top of each other proposals_tf = tf.concat(selected_boxes, axis=0) # Return to the original convention. proposals = change_order(proposals_tf) proposal_label = tf.concat(selected_labels, axis=0) proposal_label_prob = tf.concat(selected_probs, axis=0) proposal_anchors = tf.concat(selected_anchors, axis=0) # Get topK detections of all classes. k = tf.minimum( self._total_max_detections, tf.shape(proposal_label_prob)[0] ) top_k = tf.nn.top_k(proposal_label_prob, k=k) top_k_proposal_label_prob = top_k.values top_k_proposals = tf.gather(proposals, top_k.indices) top_k_proposal_label = tf.gather(proposal_label, top_k.indices) top_k_proposal_anchors = tf.gather(proposal_anchors, top_k.indices) return { 'objects': top_k_proposals, 'labels': top_k_proposal_label, 'probs': top_k_proposal_label_prob, 'raw_proposals': raw_proposals, 'anchors': top_k_proposal_anchors, }
grr/server/grr_response_server/output_plugin.py
khanhgithead/grr
4,238
132563
#!/usr/bin/env python # Lint as: python3 """Output plugins used by flows and hunts for results exports.""" import abc import threading from grr_response_core.lib import rdfvalue from grr_response_core.lib.rdfvalues import protodict as rdf_protodict from grr_response_core.lib.rdfvalues import structs as rdf_structs from grr_response_core.lib.registry import OutputPluginRegistry from grr_response_proto import output_plugin_pb2 from grr_response_server.rdfvalues import output_plugin as rdf_output_plugin class OutputPluginBatchProcessingStatus(rdf_structs.RDFProtoStruct): """Describes processing status of a single batch by a hunt output plugin.""" protobuf = output_plugin_pb2.OutputPluginBatchProcessingStatus rdf_deps = [ rdf_output_plugin.OutputPluginDescriptor, ] class Error(Exception): """Output plugins-related exception.""" class PluginDoesNotProduceOutputStreams(Error): """Raised when output streams API is used on plugins not supporting them.""" class OutputPlugin(metaclass=OutputPluginRegistry): """The base class for output plugins. Plugins process responses incrementally in small batches. Every batch is processed via ProcessResponses() calls, which may be issued in parallel for better performance. Then a single Flush() call is made. Next batch of results may potentially be processed on a different worker, therefore plugin's permanent state is stored in "state" attribute. """ __abstract = True # pylint: disable=g-bad-name name = "" description = "" args_type = None @classmethod def CreatePluginAndDefaultState(cls, source_urn=None, args=None): """Creates a plugin and returns its initial state.""" state = rdf_protodict.AttributedDict() state["source_urn"] = source_urn if args is not None: args.Validate() state["args"] = args plugin = cls(source_urn=source_urn, args=args) plugin.InitializeState(state) return plugin, state def __init__(self, source_urn=None, args=None): """OutputPlugin constructor. Constructor should be overridden to maintain instance-local state - i.e. state that gets accumulated during the single output plugin run and that should be used to update the global state via UpdateState method. Args: source_urn: URN of the data source to process the results from. args: This plugin's arguments. """ self.source_urn = source_urn self.args = args self.lock = threading.RLock() def InitializeState(self, state): """Initializes the state the output plugin can use later. InitializeState() is called only once per plugin's lifetime. It will be called when hunt or flow is created. It should be used to register state variables. It's called on the worker, so no security checks apply. Args: state: rdf_protodict.AttributedDict to be filled with default values. """ @abc.abstractmethod def ProcessResponses(self, state, responses): """Processes bunch of responses. When responses are processed, multiple ProcessResponses() calls can be done in a row. ProcessResponse() calls may be parallelized within the same worker to improve output performance, therefore ProcessResponses() implementation should be thread-safe. ProcessResponse() calls are *always* followed by a single Flush() call on the same worker. ProcessResponses() is called on the worker, so no security checks apply. Args: state: rdf_protodict.AttributedDict with plugin's state. NOTE: ProcessResponses should not change state object. All such changes should take place in the UpdateState method (see below). responses: GrrMessages from the hunt results collection. """ def Flush(self, state): """Flushes the output plugin's state. Flush is *always* called after a series of ProcessResponses() calls. Flush() is called on the worker, so no security checks apply. NOTE: This method doesn't have to be thread-safe as it's called once after a series of ProcessResponses() calls is complete. Args: state: rdf_protodict.AttributedDict with plugin's state. NOTE: ProcessResponses should not change state object. All such changes should take place in the UpdateState method (see below). """ def UpdateState(self, state): """Updates state of the output plugin. UpdateState is called after a series of ProcessResponses() calls and after a Flush() call. The implementation of this method should be lightweight, since its will be guaranteed to be called atomically in a middle of database transaction. Args: state: rdf_protodict.AttributedDict with plugin's state to be updated. """ class UnknownOutputPlugin(OutputPlugin): """Stub plugin used when original plugin class can't be found.""" name = "unknown" description = "Original plugin class couldn't be found." args_type = rdfvalue.RDFBytes def ProcessResponses(self, responses): pass
torchdyn/models/hybrid.py
iisabeller/torchdyn
825
132575
# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. "Experimental API for hybrid Neural DEs and continuous models applied to sequences -> [ODE-RNN, Neural CDE]" import math import torch import torch.nn as nn from torch.distributions import Normal, kl_divergence import pytorch_lightning as pl import torchsde from torchdyn.models import LSDEFunc class HybridNeuralDE(nn.Module): def __init__(self, flow, jump, out, last_output=True, reverse=False): """ODE-RNN / LSTM / GRU""" super().__init__() self.flow, self.jump, self.out = flow, jump, out self.reverse, self.last_output = reverse, last_output # determine type of `jump` func # jump can be of two types: # either take hidden and element of sequence (e.g RNNCell) # or h, x_t and c (LSTMCell). Custom implementation assumes call # signature of type (x_t, h) and .hidden_size property if type(jump) == nn.modules.rnn.LSTMCell: self.jump_func = self._jump_latent_cell else: self.jump_func = self._jump_latent def forward(self, x): h = c = self._init_latent(x) Y = torch.zeros(x.shape[0], *h.shape).to(x) if self.reverse: x_t = x_t.flip(0) for t, x_t in enumerate(x): h, c = self.jump_func(x_t, h, c) h = self.flow(h) Y[t] = h Y = self.out(Y) return Y[-1] if self.last_output else Y def _init_latent(self, x): x = x[0] return torch.zeros(x.shape[0], self.jump.hidden_size).to(x.device) def _jump_latent(self, *args): x_t, h, c = args[:3] return self.jump(x_t, h), c def _jump_latent_cell(self, *args): x_t, h, c = args[:3] return self.jump(x_t, (h, c)) class LatentNeuralSDE(NeuralSDE, pl.LightningModule): # pragma: no cover def __init__(self, post_drift, diffusion, prior_drift, sigma, theta, mu, options, noise_type, order, sensitivity, s_span, solver, atol, rtol, intloss): """Latent Neural SDEs.""" super().__init__(drift_func=post_drift, diffusion_func=diffusion, noise_type=noise_type, order=order, sensitivity=sensitivity, s_span=s_span, solver=solver, atol=atol, rtol=rtol, intloss=intloss) self.defunc = LSDEFunc(f=post_drift, g=diffusion, h=prior_drift) self.defunc.noise_type, self.defunc.sde_type = noise_type, 'ito' self.options = options # p(y0). logvar = math.log(sigma ** 2. / (2. * theta)) self.py0_mean = nn.Parameter(torch.tensor([[mu]]), requires_grad=False) self.py0_logvar = nn.Parameter(torch.tensor([[logvar]]), requires_grad=False) # q(y0). self.qy0_mean = nn.Parameter(torch.tensor([[mu]]), requires_grad=True) self.qy0_logvar = nn.Parameter(torch.tensor([[logvar]]), requires_grad=True) def forward(self, eps: torch.Tensor, s_span=None): eps = eps.to(self.qy0_std) x0 = self.qy0_mean + eps * self.qy0_std qy0 = Normal(loc=self.qy0_mean, scale=self.qy0_std) py0 = Normal(loc=self.py0_mean, scale=self.py0_std) logqp0 = kl_divergence(qy0, py0).sum(1).mean(0) # KL(time=0). if s_span is not None: s_span_ext = s_span else: s_span_ext = self.s_span.cpu() zs, logqp = torchsde.sdeint(sde=self.defunc, x0=x0, s_span=s_span_ext, rtol=self.rtol, atol=self.atol, logqp=True, options=self.options, adaptive=self.adaptive, method=self.solver) logqp = logqp.sum(0).mean(0) log_ratio = logqp0 + logqp # KL(time=0) + KL(path). return zs, log_ratio def sample_p(self, vis_span, n_sim, eps=None, bm=None, dt=0.01): eps = torch.randn(n_sim, 1).to(self.py0_mean).to(self.device) if eps is None else eps y0 = self.py0_mean + eps.to(self.device) * self.py0_std return torchsde.sdeint(self.defunc, y0, vis_span, bm=bm, method='srk', dt=dt, names={'drift': 'h'}) def sample_q(self, vis_span, n_sim, eps=None, bm=None, dt=0.01): eps = torch.randn(n_sim, 1).to(self.qy0_mean) if eps is None else eps y0 = self.qy0_mean + eps.to(self.device) * self.qy0_std return torchsde.sdeint(self.defunc, y0, vis_span, bm=bm, method='srk', dt=dt) @property def py0_std(self): return torch.exp(.5 * self.py0_logvar) @property def qy0_std(self): return torch.exp(.5 * self.qy0_logvar)
paddlespeech/s2t/training/extensions/evaluator.py
JiehangXie/PaddleSpeech
1,540
132608
<gh_stars>1000+ # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Modified from chainer(https://github.com/chainer/chainer) from typing import Dict import paddle from paddle import distributed as dist from paddle.io import DataLoader from paddle.nn import Layer from . import extension from ..reporter import DictSummary from ..reporter import ObsScope from ..reporter import report from ..timer import Timer from paddlespeech.s2t.utils.log import Log logger = Log(__name__).getlog() class StandardEvaluator(extension.Extension): trigger = (1, 'epoch') default_name = 'validation' priority = extension.PRIORITY_WRITER name = None def __init__(self, model: Layer, dataloader: DataLoader): # it is designed to hold multiple models models = {"main": model} self.models: Dict[str, Layer] = models self.model = model # dataloaders self.dataloader = dataloader def evaluate_core(self, batch): # compute self.model(batch) # you may report here return def evaluate_sync(self, data): # dist sync `evaluate_core` outputs if data is None: return numerator, denominator = data if dist.get_world_size() > 1: numerator = paddle.to_tensor(numerator) denominator = paddle.to_tensor(denominator) # the default operator in all_reduce function is sum. dist.all_reduce(numerator) dist.all_reduce(denominator) value = numerator / denominator value = float(value) else: value = numerator / denominator # used for `snapshort` to do kbest save. report("VALID/LOSS", value) logger.info(f"Valid: all-reduce loss {value}") def evaluate(self): # switch to eval mode for model in self.models.values(): model.eval() # to average evaluation metrics summary = DictSummary() for batch in self.dataloader: observation = {} with ObsScope(observation): # main evaluation computation here. with paddle.no_grad(): self.evaluate_sync(self.evaluate_core(batch)) summary.add(observation) summary = summary.compute_mean() # switch to train mode for model in self.models.values(): model.train() return summary def __call__(self, trainer=None): # evaluate and report the averaged metric to current observation # if it is used to extend a trainer, the metrics is reported to # to observation of the trainer # or otherwise, you can use your own observation with Timer("Eval Time Cost: {}"): summary = self.evaluate() for k, v in summary.items(): report(k, v)
ledfx/integrations/qlc.py
WeekendWarrior1/LedFx
524
132610
import asyncio # import numpy as np # import importlib # import pkgutil import logging import aiohttp import voluptuous as vol # from ledfx.events import Event from ledfx.integrations import Integration from ledfx.utils import async_fire_and_forget, resolve_destination # import time # import os # import re _LOGGER = logging.getLogger(__name__) class QLC(Integration): """QLC+ Integration""" _widget_types = ["Button", "Slider", "Audio Triggers"] NAME = "QLC+" DESCRIPTION = "Web Api Integration for Q Light Controller Plus" CONFIG_SCHEMA = vol.Schema( { vol.Required( "name", description="Name of this integration instance and associated settings", default="QLC+", ): str, vol.Required( "description", description="Description of this integration", default="Web Api Integration for Q Light Controller Plus", ): str, vol.Required( "ip_address", description="QLC+ ip address", default="127.0.0.1", ): str, vol.Required( "port", description="QLC+ port", default=9999 ): vol.All(vol.Coerce(int), vol.Range(min=1, max=65535)), } ) def __init__(self, ledfx, config, active, data): super().__init__(ledfx, config, active, data) self._ledfx = ledfx self._config = config self._client = None self._data = [] self._listeners = [] self._connect_task = None self.restore_from_data(data) def restore_from_data(self, data): """Creates the event listeners from saved data""" if data is not None: try: for entry in data: event_type, event_filter, active, qlc_payload = entry self.create_event( event_type, event_filter, active, qlc_payload ) except ValueError: _LOGGER.error("Failed to restore QLC+ settings") def get_events(self): """Get all events in data: [(event_type, event_filter, active, qlc_payload), ...] event_type : type of event, str event_filter : filter for event, dict eg. {"effect_name": "Scroll"} active : whether there is an active listener for this event qlc_payload : the payload that is sent when this event is triggered """ return self._data def create_event(self, event_type, event_filter, active, qlc_payload): """Create or update event listener that sends a qlc payload on a specific event""" # If it exists, remove the existing listener and update data for idx, entry in enumerate(self._data): _event_type, _event_filter, _active, _qlc_payload = entry if (_event_type == event_type) and (_event_filter == event_filter): self._data[idx] = [ event_type, event_filter, active, qlc_payload, ] # if it was active, remove existing listener if _active: self._remove_listener(_event_type, event_filter) break # If it doesn't already exist, add it as a new entry to data else: self.data.append([event_type, event_filter, active, qlc_payload]) # Finally, subscribe to the ledfx event if the listener is now active if active: self._add_listener(event_type, event_filter, qlc_payload) _LOGGER.info( f"QLC+ payload linked to event '{event_type}' with filter {event_filter}" ) def delete_event(self, event_type, event_filter): """Completely delete event listener and saved payload from data""" # remove listener if it exists self._remove_listener(event_type, event_filter) # remove event and payload from data for idx, entry in enumerate(self._data): _event_type, _event_filter, _active, _qlc_payload = entry if (_event_type == event_type) and (_event_filter == event_filter): del self._data[idx] _LOGGER.info( f"QLC+ payload deleted for event '{event_type}' with filter {event_filter}" ) def toggle_event(self, event_type, event_filter): """Toggle a payload linked to event on or off""" # Update "active" flag in data for idx, entry in enumerate(self._data): _event_type, _event_filter, _active, _qlc_payload = entry if (_event_type == event_type) and (_event_filter == event_filter): # toggle active flag in data self._data[idx] = [ event_type, event_filter, not _active, _qlc_payload, ] # Enable/disable listener if _active: self._remove_listener(_event_type, event_filter) else: # no listener exists, so create it self._add_listener(event_type, event_filter, _qlc_payload) # log action _LOGGER.info( f"QLC+ payload {'disabled' if _active else 'enabled'} for event '{event_type}' with filter {event_filter}" ) return True # success return False # failed to find event_type with this event_filter def _remove_listener(self, event_type, event_filter): """Internal function to remove ledfx events listener if it exists""" for idx, entry in enumerate(self._listeners): _event_type, _event_filter, listener = entry if (_event_type == event_type) and (_event_filter == event_filter): # Call the listener function that removes the listener listener() del self._listeners[idx] break def _add_listener(self, event_type, event_filter, qlc_payload): """Internal function that links payload to send on the specified event""" def make_callback(qlc_payload): def callback(_): _LOGGER.info( f"QLC+ sent payload, triggered by event '{event_type}' with filter {event_filter}" ) async_fire_and_forget( self._send_payload(qlc_payload), loop=self._ledfx.loop ) return callback callback = make_callback(qlc_payload) listener = self._ledfx.events.add_listener( callback, event_type, event_filter ) # store "listener", a function to remove the listener later if needed self._listeners.append((event_type, event_filter, listener)) async def get_widgets(self): """Returns a list of widgets as tuples: [(ID, Type, Name),...]""" # First get list of widgets (ID, Name) widgets = [] message = "QLC+API|getWidgetsList" response = await self._client.query(message) widgets_list = response.lstrip(f"{message}|").split("|") # Then get the type for each widget (in individual requests bc QLC api be like that) for widget_id, widget_name in zip( widgets_list[::2], widgets_list[1::2] ): message = "QLC+API|getWidgetType" response = await self._client.query(f"{message}|{widget_id}") widget_type = response.lstrip(f"{message}|") if widget_type in self._widget_types: widgets.append((widget_id, widget_type, widget_name)) return widgets async def _send_payload(self, qlc_payload): """Sends payload of {id:value, ...} pairs to QLC""" for widget_id, value in qlc_payload.items(): await self._client.send(f"{int(widget_id)}|{value}") async def connect(self): resolved_ip = resolve_destination(self._config["ip_address"]) domain = f"{resolved_ip }:{self._config['port']}" url = f"http://{domain}/qlcplusWS" if self._client is None: self._client = QLCWebsocketClient(url, domain) self._cancel_connect() self._connect_task = asyncio.create_task(self._client.connect()) if await self._connect_task: await super().connect(f"Connected to QLC+ websocket at {domain}") async def disconnect(self): self._cancel_connect() if self._client is not None: # fire and forget bc for some reason close() never returns... -o- async_fire_and_forget( self._client.disconnect(), loop=self._ledfx.loop ) await super().disconnect("Disconnected from QLC+ websocket") else: await super().disconnect() def _cancel_connect(self): if self._connect_task is not None: self._connect_task.cancel() self._connect_task = None class QLCWebsocketClient(aiohttp.ClientSession): def __init__(self, url, domain): super().__init__() self.websocket = None self.url = url self.domain = domain async def connect(self): """Connect to the WebSocket.""" while True: try: self.websocket = await self.ws_connect(self.url) return True except aiohttp.client_exceptions.ClientConnectorError: _LOGGER.info( f"Connection to {self.domain} failed. Retrying in 5s..." ) await asyncio.sleep(5) except asyncio.CancelledError: return False async def disconnect(self): if self.websocket is not None: await self.websocket.close() async def begin(self, callback): """Connect and indefinitely read from websocket, returning messages to callback func""" await self.connect() await self.read(callback) async def query(self, message): """Send a message, and return the response""" await self.send(message) result = await self.receive() return result.lstrip("QLC+API|") async def send(self, message): """Send a message to the WebSocket.""" if self.websocket is None: _LOGGER.error("Websocket not yet established") return await self.websocket.send_str(message) _LOGGER.debug(f"Sent message {message} to {self.domain}") async def receive(self): """Receive one message from the WebSocket.""" if self.websocket is None: _LOGGER.error("Websocket not yet established") return return (await self.websocket.receive()).data async def read(self, callback): """Read messages from the WebSocket.""" if self.websocket is None: _LOGGER.error("Websocket not yet established") return while await self.websocket.receive(): message = await self.receive() if message.type == aiohttp.WSMsgType.TEXT: self.callback(message) elif message.type == aiohttp.WSMsgType.CLOSED: break elif message.type == aiohttp.WSMsgType.ERROR: break
installer/__main__.py
baptoutiego/Jarvis
2,605
132617
<filename>installer/__main__.py import traceback try: from helper import log_init, log_close from unix_windows import IS_WIN log_init() import os os.chdir(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) import steps.a_setup_virtualenv import steps.b_pip import steps.c_nltk if not IS_WIN: # TODO Optional requirements on windows import steps.d_optional import steps.e_launcher except SystemExit: # Expected Error pass except BaseException: print("\n\n") print("An unexpected error occurred. Please open an issue on github!") print("here is the error:") print('') traceback.print_exc()
flambe/cluster/__init__.py
ethan-asapp/flambe
148
132621
<gh_stars>100-1000 from flambe.cluster.cluster import Cluster from flambe.cluster.aws import AWSCluster from flambe.cluster.ssh import SSHCluster __all__ = ['Cluster', 'AWSCluster', 'SSHCluster']
tests/oauth2/rfc6749/clients/test_service_application.py
braedon/oauthlib
954
132635
# -*- coding: utf-8 -*- import os from time import time from unittest.mock import patch import jwt from oauthlib.common import Request from oauthlib.oauth2 import ServiceApplicationClient from tests.unittest import TestCase class ServiceApplicationClientTest(TestCase): gt = ServiceApplicationClient.grant_type private_key = """ -----<KEY> """ public_key = """ -----<KEY> """ subject = '<EMAIL>' issuer = '<EMAIL>' audience = 'https://provider.com/token' client_id = "someclientid" scope = ["/profile"] kwargs = { "some": "providers", "require": "extra arguments" } body = "isnot=empty" body_up = "not=empty&grant_type=%s" % gt body_kwargs = body_up + "&some=providers&require=extra+arguments" token_json = ('{ "access_token":"<KEY>",' ' "token_type":"example",' ' "expires_in":3600,' ' "scope":"/profile",' ' "example_parameter":"example_value"}') token = { "access_token": "2<PASSWORD>", "token_type": "example", "expires_in": 3600, "scope": ["/profile"], "example_parameter": "example_value" } @patch('time.time') def test_request_body(self, t): t.return_value = time() self.token['expires_at'] = self.token['expires_in'] + t.return_value client = ServiceApplicationClient( self.client_id, private_key=self.private_key) # Basic with min required params body = client.prepare_request_body(issuer=self.issuer, subject=self.subject, audience=self.audience, body=self.body) r = Request('https://a.b', body=body) self.assertEqual(r.isnot, 'empty') self.assertEqual(r.grant_type, ServiceApplicationClient.grant_type) claim = jwt.decode(r.assertion, self.public_key, audience=self.audience, algorithms=['RS256']) self.assertEqual(claim['iss'], self.issuer) # audience verification is handled during decode now self.assertEqual(claim['sub'], self.subject) self.assertEqual(claim['iat'], int(t.return_value)) self.assertNotIn('nbf', claim) self.assertNotIn('jti', claim) # Missing issuer parameter self.assertRaises(ValueError, client.prepare_request_body, issuer=None, subject=self.subject, audience=self.audience, body=self.body) # Missing subject parameter self.assertRaises(ValueError, client.prepare_request_body, issuer=self.issuer, subject=None, audience=self.audience, body=self.body) # Missing audience parameter self.assertRaises(ValueError, client.prepare_request_body, issuer=self.issuer, subject=self.subject, audience=None, body=self.body) # Optional kwargs not_before = time() - 3600 jwt_id = '8<PASSWORD>35f43sd' body = client.prepare_request_body(issuer=self.issuer, subject=self.subject, audience=self.audience, body=self.body, not_before=not_before, jwt_id=jwt_id) r = Request('https://a.b', body=body) self.assertEqual(r.isnot, 'empty') self.assertEqual(r.grant_type, ServiceApplicationClient.grant_type) claim = jwt.decode(r.assertion, self.public_key, audience=self.audience, algorithms=['RS256']) self.assertEqual(claim['iss'], self.issuer) # audience verification is handled during decode now self.assertEqual(claim['sub'], self.subject) self.assertEqual(claim['iat'], int(t.return_value)) self.assertEqual(claim['nbf'], not_before) self.assertEqual(claim['jti'], jwt_id) @patch('time.time') def test_request_body_no_initial_private_key(self, t): t.return_value = time() self.token['expires_at'] = self.token['expires_in'] + t.return_value client = ServiceApplicationClient( self.client_id, private_key=None) # Basic with private key provided body = client.prepare_request_body(issuer=self.issuer, subject=self.subject, audience=self.audience, body=self.body, private_key=self.private_key) r = Request('https://a.b', body=body) self.assertEqual(r.isnot, 'empty') self.assertEqual(r.grant_type, ServiceApplicationClient.grant_type) claim = jwt.decode(r.assertion, self.public_key, audience=self.audience, algorithms=['RS256']) self.assertEqual(claim['iss'], self.issuer) # audience verification is handled during decode now self.assertEqual(claim['sub'], self.subject) self.assertEqual(claim['iat'], int(t.return_value)) # No private key provided self.assertRaises(ValueError, client.prepare_request_body, issuer=self.issuer, subject=self.subject, audience=self.audience, body=self.body) @patch('time.time') def test_parse_token_response(self, t): t.return_value = time() self.token['expires_at'] = self.token['expires_in'] + t.return_value client = ServiceApplicationClient(self.client_id) # Parse code and state response = client.parse_request_body_response(self.token_json, scope=self.scope) self.assertEqual(response, self.token) self.assertEqual(client.access_token, response.get("access_token")) self.assertEqual(client.refresh_token, response.get("refresh_token")) self.assertEqual(client.token_type, response.get("token_type")) # Mismatching state self.assertRaises(Warning, client.parse_request_body_response, self.token_json, scope="invalid") os.environ['OAUTHLIB_RELAX_TOKEN_SCOPE'] = '2' token = client.parse_request_body_response(self.token_json, scope="invalid") self.assertTrue(token.scope_changed) del os.environ['OAUTHLIB_RELAX_TOKEN_SCOPE']
esphome/components/tuya/light/__init__.py
OttoWinter/esphomeyaml
249
132645
from esphome.components import light import esphome.config_validation as cv import esphome.codegen as cg from esphome.const import ( CONF_OUTPUT_ID, CONF_MIN_VALUE, CONF_MAX_VALUE, CONF_GAMMA_CORRECT, CONF_DEFAULT_TRANSITION_LENGTH, CONF_SWITCH_DATAPOINT, CONF_COLD_WHITE_COLOR_TEMPERATURE, CONF_WARM_WHITE_COLOR_TEMPERATURE, CONF_COLOR_INTERLOCK, ) from .. import tuya_ns, CONF_TUYA_ID, Tuya DEPENDENCIES = ["tuya"] CONF_DIMMER_DATAPOINT = "dimmer_datapoint" CONF_MIN_VALUE_DATAPOINT = "min_value_datapoint" CONF_COLOR_TEMPERATURE_DATAPOINT = "color_temperature_datapoint" CONF_COLOR_TEMPERATURE_INVERT = "color_temperature_invert" CONF_COLOR_TEMPERATURE_MAX_VALUE = "color_temperature_max_value" CONF_RGB_DATAPOINT = "rgb_datapoint" CONF_HSV_DATAPOINT = "hsv_datapoint" TuyaLight = tuya_ns.class_("TuyaLight", light.LightOutput, cg.Component) CONFIG_SCHEMA = cv.All( light.BRIGHTNESS_ONLY_LIGHT_SCHEMA.extend( { cv.GenerateID(CONF_OUTPUT_ID): cv.declare_id(TuyaLight), cv.GenerateID(CONF_TUYA_ID): cv.use_id(Tuya), cv.Optional(CONF_DIMMER_DATAPOINT): cv.uint8_t, cv.Optional(CONF_MIN_VALUE_DATAPOINT): cv.uint8_t, cv.Optional(CONF_SWITCH_DATAPOINT): cv.uint8_t, cv.Exclusive(CONF_RGB_DATAPOINT, "color"): cv.uint8_t, cv.Exclusive(CONF_HSV_DATAPOINT, "color"): cv.uint8_t, cv.Optional(CONF_COLOR_INTERLOCK, default=False): cv.boolean, cv.Inclusive( CONF_COLOR_TEMPERATURE_DATAPOINT, "color_temperature" ): cv.uint8_t, cv.Optional(CONF_COLOR_TEMPERATURE_INVERT, default=False): cv.boolean, cv.Optional(CONF_MIN_VALUE): cv.int_, cv.Optional(CONF_MAX_VALUE): cv.int_, cv.Optional(CONF_COLOR_TEMPERATURE_MAX_VALUE): cv.int_, cv.Inclusive( CONF_COLD_WHITE_COLOR_TEMPERATURE, "color_temperature" ): cv.color_temperature, cv.Inclusive( CONF_WARM_WHITE_COLOR_TEMPERATURE, "color_temperature" ): cv.color_temperature, # Change the default gamma_correct and default transition length settings. # The Tuya MCU handles transitions and gamma correction on its own. cv.Optional(CONF_GAMMA_CORRECT, default=1.0): cv.positive_float, cv.Optional( CONF_DEFAULT_TRANSITION_LENGTH, default="0s" ): cv.positive_time_period_milliseconds, } ).extend(cv.COMPONENT_SCHEMA), cv.has_at_least_one_key( CONF_DIMMER_DATAPOINT, CONF_SWITCH_DATAPOINT, CONF_RGB_DATAPOINT, CONF_HSV_DATAPOINT, ), ) async def to_code(config): var = cg.new_Pvariable(config[CONF_OUTPUT_ID]) await cg.register_component(var, config) await light.register_light(var, config) if CONF_DIMMER_DATAPOINT in config: cg.add(var.set_dimmer_id(config[CONF_DIMMER_DATAPOINT])) if CONF_MIN_VALUE_DATAPOINT in config: cg.add(var.set_min_value_datapoint_id(config[CONF_MIN_VALUE_DATAPOINT])) if CONF_SWITCH_DATAPOINT in config: cg.add(var.set_switch_id(config[CONF_SWITCH_DATAPOINT])) if CONF_RGB_DATAPOINT in config: cg.add(var.set_rgb_id(config[CONF_RGB_DATAPOINT])) elif CONF_HSV_DATAPOINT in config: cg.add(var.set_hsv_id(config[CONF_HSV_DATAPOINT])) if CONF_COLOR_TEMPERATURE_DATAPOINT in config: cg.add(var.set_color_temperature_id(config[CONF_COLOR_TEMPERATURE_DATAPOINT])) cg.add(var.set_color_temperature_invert(config[CONF_COLOR_TEMPERATURE_INVERT])) cg.add( var.set_cold_white_temperature(config[CONF_COLD_WHITE_COLOR_TEMPERATURE]) ) cg.add( var.set_warm_white_temperature(config[CONF_WARM_WHITE_COLOR_TEMPERATURE]) ) if CONF_MIN_VALUE in config: cg.add(var.set_min_value(config[CONF_MIN_VALUE])) if CONF_MAX_VALUE in config: cg.add(var.set_max_value(config[CONF_MAX_VALUE])) if CONF_COLOR_TEMPERATURE_MAX_VALUE in config: cg.add( var.set_color_temperature_max_value( config[CONF_COLOR_TEMPERATURE_MAX_VALUE] ) ) cg.add(var.set_color_interlock(config[CONF_COLOR_INTERLOCK])) paren = await cg.get_variable(config[CONF_TUYA_ID]) cg.add(var.set_tuya_parent(paren))
tests/integration/hdf5/test_scratch.py
weiglszonja/pynwb
132
132655
<reponame>weiglszonja/pynwb import pandas as pd import numpy as np from numpy.testing import assert_array_equal from pynwb import NWBFile, NWBHDF5IO, TimeSeries from pynwb.core import ScratchData from pynwb.testing import NWBH5IOMixin, TestCase class TestScratchDataIO(NWBH5IOMixin, TestCase): def setUpContainer(self): """ Return the test ScratchData to read/write """ return ScratchData(name='foo', data=[1, 2, 3, 4], description='test scratch') def addContainer(self, nwbfile): """ Add the test ScratchData to the given NWBFile """ nwbfile.add_scratch(self.container) def getContainer(self, nwbfile): """ Return the test ScratchData from the given NWBFile """ return nwbfile.get_scratch('foo', convert=False) def roundtrip_scratch(self, data, case, **kwargs): self.filename = 'test_scratch_%s.nwb' % case description = 'a file to test writing and reading a scratch data of type %s' % case identifier = 'TEST_scratch_%s' % case nwbfile = NWBFile(session_description=description, identifier=identifier, session_start_time=self.start_time, file_create_date=self.create_date) nwbfile.add_scratch(data, name='foo', **kwargs) self.writer = NWBHDF5IO(self.filename, mode='w') self.writer.write(nwbfile) self.writer.close() self.reader = NWBHDF5IO(self.filename, mode='r') self.read_nwbfile = self.reader.read() return self.read_nwbfile.get_scratch('foo') def test_scratch_convert_int(self): data = 2 ret = self.roundtrip_scratch(data, 'int', description='test scratch') self.assertEqual(data, ret) self.validate() def test_scratch_convert_list(self): data = [1, 2, 3, 4] ret = self.roundtrip_scratch(data, 'list', description='test scratch') assert_array_equal(data, ret) self.validate() def test_scratch_convert_ndarray(self): data = np.array([1, 2, 3, 4]) ret = self.roundtrip_scratch(data, 'ndarray', description='test scratch') assert_array_equal(data, ret) self.validate() def test_scratch_convert_DataFrame_table_desc(self): """Test round trip convert of DataFrame with a table description""" data = pd.DataFrame(data={'col1': [1, 2, 3, 4], 'col2': ['a', 'b', 'c', 'd']}) self.roundtrip_scratch(data, 'DataFrame', description='my_table') ret = self.read_nwbfile.get_scratch('foo', convert=False) ret_df = ret.to_dataframe() self.assertEqual(ret.description, 'my_table') assert_array_equal(data.values, ret_df.values) assert_array_equal(data.index.values, ret_df.index.values) self.validate() def test_scratch_container(self): data = TimeSeries( name='test_ts', data=[1, 2, 3, 4, 5], unit='unit', timestamps=[1.1, 1.2, 1.3, 1.4, 1.5] ) nwbfile = NWBFile( session_description='test', identifier='test', session_start_time=self.start_time, file_create_date=self.create_date ) nwbfile.add_scratch(data) self.writer = NWBHDF5IO(self.filename, mode='w') self.writer.write(nwbfile) self.writer.close() self.reader = NWBHDF5IO(self.filename, mode='r') self.read_nwbfile = self.reader.read() ret = self.read_nwbfile.get_scratch('test_ts') self.assertContainerEqual(data, ret) self.validate()
office365/sharepoint/logger/logFileInfoCollection.py
rikeshtailor/Office365-REST-Python-Client
544
132709
<filename>office365/sharepoint/logger/logFileInfoCollection.py from office365.sharepoint.base_entity_collection import BaseEntityCollection from office365.sharepoint.logger.logFileInfo import LogFileInfo class LogFileInfoCollection(BaseEntityCollection): def __init__(self, context, resource_path=None): super(LogFileInfoCollection, self).__init__(context, LogFileInfo, resource_path)
tests/qnn/conftest.py
ryanlevy/pennylane
539
132724
# Copyright 2018-2020 Xanadu Quantum Technologies Inc. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Common fixtures for the qnn module. """ import pytest import pennylane as qml import numpy as np @pytest.fixture def get_circuit(n_qubits, output_dim, interface): """Fixture for getting a sample quantum circuit with a controllable qubit number and output dimension. Returns both the circuit and the shape of the weights.""" dev = qml.device("default.qubit", wires=n_qubits) weight_shapes = { "w1": (3, n_qubits, 3), "w2": (1,), "w3": 1, "w4": [3], "w5": (2, n_qubits, 3), "w6": 3, "w7": 0, } @qml.qnode(dev, interface=interface) def circuit(inputs, w1, w2, w3, w4, w5, w6, w7): """A circuit that embeds data using the AngleEmbedding and then performs a variety of operations. The output is a PauliZ measurement on the first output_dim qubits. One set of parameters, w5, are specified as non-trainable.""" qml.templates.AngleEmbedding(inputs, wires=list(range(n_qubits))) qml.templates.StronglyEntanglingLayers(w1, wires=list(range(n_qubits))) qml.RX(w2[0], wires=0 % n_qubits) qml.RX(w3, wires=1 % n_qubits) qml.Rot(*w4, wires=2 % n_qubits) qml.templates.StronglyEntanglingLayers(w5, wires=list(range(n_qubits))) qml.Rot(*w6, wires=3 % n_qubits) qml.RX(w7, wires=4 % n_qubits) return [qml.expval(qml.PauliZ(i)) for i in range(output_dim)] return circuit, weight_shapes @pytest.fixture def get_circuit_dm(n_qubits, output_dim, interface): """Fixture for getting a sample quantum circuit with a controllable qubit number and output dimension for density matrix return type. Returns both the circuit and the shape of the weights.""" dev = qml.device("default.qubit", wires=n_qubits) weight_shapes = { "w1": (3, n_qubits, 3), "w2": (1,), "w3": 1, "w4": [3], "w5": (2, n_qubits, 3), "w6": 3, "w7": 0, } @qml.qnode(dev, interface=interface) def circuit(inputs, w1, w2, w3, w4, w5, w6, w7): """Sample circuit to be used for testing density_matrix() return type.""" qml.templates.AngleEmbedding(inputs, wires=list(range(n_qubits))) qml.templates.StronglyEntanglingLayers(w1, wires=list(range(n_qubits))) qml.RX(w2[0], wires=0 % n_qubits) qml.RX(w3, wires=1 % n_qubits) qml.Rot(*w4, wires=2 % n_qubits) qml.templates.StronglyEntanglingLayers(w5, wires=list(range(n_qubits))) qml.Rot(*w6, wires=3 % n_qubits) qml.RX(w7, wires=4 % n_qubits) # Using np.log2() here because output_dim is sampled from varying the number of # qubits (say, nq) and calculated as (2 ** nq, 2 ** nq) return qml.density_matrix(wires=[i for i in range(int(np.log2(output_dim[0])))]) return circuit, weight_shapes
Tree/tree_base.py
pyecharts/pyecharts_gallery
759
132740
from pyecharts import options as opts from pyecharts.charts import Tree data = [ { "children": [ {"name": "B"}, { "children": [{"children": [{"name": "I"}], "name": "E"}, {"name": "F"}], "name": "C", }, { "children": [ {"children": [{"name": "J"}, {"name": "K"}], "name": "G"}, {"name": "H"}, ], "name": "D", }, ], "name": "A", } ] c = ( Tree() .add("", data) .set_global_opts(title_opts=opts.TitleOpts(title="Tree-基本示例")) .render("tree_base.html") )
test/base/test_env.py
Aceticia/tianshou
4,714
132751
import sys import time import numpy as np from gym.spaces.discrete import Discrete from tianshou.data import Batch from tianshou.env import DummyVectorEnv, RayVectorEnv, ShmemVectorEnv, SubprocVectorEnv if __name__ == '__main__': from env import MyTestEnv, NXEnv else: # pytest from test.base.env import MyTestEnv, NXEnv def has_ray(): try: import ray # noqa: F401 return True except ImportError: return False def recurse_comp(a, b): try: if isinstance(a, np.ndarray): if a.dtype == object: return np.array([recurse_comp(m, n) for m, n in zip(a, b)]).all() else: return np.allclose(a, b) elif isinstance(a, (list, tuple)): return np.array([recurse_comp(m, n) for m, n in zip(a, b)]).all() elif isinstance(a, dict): return np.array([recurse_comp(a[k], b[k]) for k in a.keys()]).all() except (Exception): return False def test_async_env(size=10000, num=8, sleep=0.1): # simplify the test case, just keep stepping env_fns = [ lambda i=i: MyTestEnv(size=i, sleep=sleep, random_sleep=True) for i in range(size, size + num) ] test_cls = [SubprocVectorEnv, ShmemVectorEnv] if has_ray(): test_cls += [RayVectorEnv] for cls in test_cls: v = cls(env_fns, wait_num=num // 2, timeout=1e-3) v.seed(None) v.reset() # for a random variable u ~ U[0, 1], let v = max{u1, u2, ..., un} # P(v <= x) = x^n (0 <= x <= 1), pdf of v is nx^{n-1} # expectation of v is n / (n + 1) # for a synchronous environment, the following actions should take # about 7 * sleep * num / (num + 1) seconds # for async simulation, the analysis is complicated, but the time cost # should be smaller action_list = [1] * num + [0] * (num * 2) + [1] * (num * 4) current_idx_start = 0 action = action_list[:num] env_ids = list(range(num)) o = [] spent_time = time.time() while current_idx_start < len(action_list): A, B, C, D = v.step(action=action, id=env_ids) b = Batch({'obs': A, 'rew': B, 'done': C, 'info': D}) env_ids = b.info.env_id o.append(b) current_idx_start += len(action) # len of action may be smaller than len(A) in the end action = action_list[current_idx_start:current_idx_start + len(A)] # truncate env_ids with the first terms # typically len(env_ids) == len(A) == len(action), except for the # last batch when actions are not enough env_ids = env_ids[:len(action)] spent_time = time.time() - spent_time Batch.cat(o) v.close() # assure 1/7 improvement if sys.platform != "darwin": # macOS cannot pass this check assert spent_time < 6.0 * sleep * num / (num + 1) def test_async_check_id(size=100, num=4, sleep=.2, timeout=.7): env_fns = [ lambda: MyTestEnv(size=size, sleep=sleep * 2), lambda: MyTestEnv(size=size, sleep=sleep * 3), lambda: MyTestEnv(size=size, sleep=sleep * 5), lambda: MyTestEnv(size=size, sleep=sleep * 7) ] test_cls = [SubprocVectorEnv, ShmemVectorEnv] if has_ray(): test_cls += [RayVectorEnv] total_pass = 0 for cls in test_cls: pass_check = 1 v = cls(env_fns, wait_num=num - 1, timeout=timeout) v.reset() expect_result = [ [0, 1], [0, 1, 2], [0, 1, 3], [0, 1, 2], [0, 1], [0, 2, 3], [0, 1], ] ids = np.arange(num) for res in expect_result: t = time.time() _, _, _, info = v.step([1] * len(ids), ids) t = time.time() - t ids = Batch(info).env_id print(ids, t) if not ( len(ids) == len(res) and np.allclose(sorted(ids), res) and (t < timeout) == (len(res) == num - 1) ): pass_check = 0 break total_pass += pass_check if sys.platform == "linux": # Windows/macOS may not pass this check assert total_pass >= 2 def test_vecenv(size=10, num=8, sleep=0.001): env_fns = [ lambda i=i: MyTestEnv(size=i, sleep=sleep, recurse_state=True) for i in range(size, size + num) ] venv = [ DummyVectorEnv(env_fns), SubprocVectorEnv(env_fns), ShmemVectorEnv(env_fns), ] if has_ray(): venv += [RayVectorEnv(env_fns)] for v in venv: v.seed(0) action_list = [1] * 5 + [0] * 10 + [1] * 20 o = [v.reset() for v in venv] for a in action_list: o = [] for v in venv: A, B, C, D = v.step([a] * num) if sum(C): A = v.reset(np.where(C)[0]) o.append([A, B, C, D]) for index, infos in enumerate(zip(*o)): if index == 3: # do not check info here continue for info in infos: assert recurse_comp(infos[0], info) if __name__ == '__main__': t = [0] * len(venv) for i, e in enumerate(venv): t[i] = time.time() e.reset() for a in action_list: done = e.step([a] * num)[2] if sum(done) > 0: e.reset(np.where(done)[0]) t[i] = time.time() - t[i] for i, v in enumerate(venv): print(f'{type(v)}: {t[i]:.6f}s') for v in venv: assert v.size == list(range(size, size + num)) assert v.env_num == num assert v.action_space == [Discrete(2)] * num for v in venv: v.close() def test_env_obs(): for obs_type in ["array", "object"]: envs = SubprocVectorEnv( [lambda i=x: NXEnv(i, obs_type) for x in [5, 10, 15, 20]] ) obs = envs.reset() assert obs.dtype == object obs = envs.step([1, 1, 1, 1])[0] assert obs.dtype == object if __name__ == '__main__': test_env_obs() test_vecenv() test_async_env() test_async_check_id()
alibi/confidence/tests/test_model_linearity.py
markus583/alibi
1,570
132756
import pytest import numpy as np from sklearn.datasets import load_iris, load_boston from sklearn.linear_model import LogisticRegression, LinearRegression from sklearn.svm import SVR from alibi.confidence.model_linearity import linearity_measure, LinearityMeasure from alibi.confidence.model_linearity import _linear_superposition, _sample_grid, _sample_knn from functools import reduce @pytest.mark.parametrize('input_shape', ((3,), (4, 4, 1))) @pytest.mark.parametrize('nb_instances', (1, 10)) def test_linear_superposition(input_shape, nb_instances): alphas = np.array([0.5, 0.5]) vecs_list = [] for i in range(nb_instances): v0 = np.zeros((1,) + input_shape) v1 = np.ones((1,) + input_shape) vec = np.stack((v0, v1), axis=1) vecs_list.append(vec) vecs = reduce(lambda x, y: np.vstack((x, y)), vecs_list) summ = _linear_superposition(alphas, vecs, input_shape) assert summ.shape[0] == nb_instances assert summ.shape[1:] == input_shape assert (summ == 0.5).all() @pytest.mark.parametrize('nb_instances', (1, 5)) @pytest.mark.parametrize('nb_samples', (2, 10)) def test_sample_knn(nb_instances, nb_samples): iris = load_iris() X_train = iris.data input_shape = X_train.shape[1:] x = np.ones((nb_instances, ) + input_shape) X_samples = _sample_knn(x=x, X_train=X_train, nb_samples=nb_samples) assert X_samples.shape[0] == nb_instances assert X_samples.shape[1] == nb_samples @pytest.mark.parametrize('nb_instances', (5, )) @pytest.mark.parametrize('nb_samples', (3, )) @pytest.mark.parametrize('input_shape', ((3,), (4, 4, 1))) def test_sample_grid(nb_instances, nb_samples, input_shape): x = np.ones((nb_instances, ) + input_shape) nb_features = x.reshape(x.shape[0], -1).shape[1] feature_range = np.array([[0, 1] for _ in range(nb_features)]) X_samples = _sample_grid(x, feature_range, nb_samples=nb_samples) assert X_samples.shape[0] == nb_instances assert X_samples.shape[1] == nb_samples @pytest.mark.parametrize('method', ('knn', 'grid')) @pytest.mark.parametrize('epsilon', (0.04,)) @pytest.mark.parametrize('res', (100,)) @pytest.mark.parametrize('nb_instances', (1, 10)) @pytest.mark.parametrize('agg', ('global', 'pairwise')) def test_linearity_measure_class(method, epsilon, res, nb_instances, agg): iris = load_iris() X_train = iris.data y_train = iris.target x = X_train[0: nb_instances].reshape(nb_instances, -1) lg = LogisticRegression() lg.fit(X_train, y_train) def predict_fn(x): return lg.predict_proba(x) lin = linearity_measure(predict_fn, x, method=method, epsilon=epsilon, X_train=X_train, res=res, model_type='classifier', agg=agg) assert lin.shape[0] == nb_instances, 'Checking shapes' assert (lin >= 0).all(), 'Linearity measure must be >= 0' feature_range = [[0, 1] for _ in range(X_train.shape[1])] lin_2 = linearity_measure(predict_fn, x, method='grid', epsilon=epsilon, feature_range=feature_range, res=res, model_type='classifier', agg=agg) assert lin_2.shape[0] == nb_instances, 'Nb of linearity values returned different from number of instances' assert (lin_2 >= 0).all(), 'Linearity measure must be >= 0' @pytest.mark.parametrize('method', ('knn', 'grid')) @pytest.mark.parametrize('epsilon', (0.04,)) @pytest.mark.parametrize('res', (100,)) @pytest.mark.parametrize('nb_instances', (1, 10)) @pytest.mark.parametrize('agg', ('global', 'pairwise')) def test_linearity_measure_reg(method, epsilon, res, nb_instances, agg): boston = load_boston() X_train, y_train = boston.data, boston.target x = X_train[0: nb_instances].reshape(nb_instances, -1) lg = LinearRegression() lg.fit(X_train, y_train) svr = SVR(kernel='linear') svr.fit(X_train, y_train) def predict_fn_svr(x): return svr.predict(x) def predict_fn(x): return lg.predict(x) lin = linearity_measure(predict_fn, x, method=method, epsilon=epsilon, X_train=X_train, res=res, model_type='regressor', agg=agg) assert lin.shape[0] == nb_instances, 'Checking shapes' assert (lin >= 0).all(), 'Linearity measure must be >= 0' assert np.allclose(lin, np.zeros(lin.shape)) lin_svr = linearity_measure(predict_fn_svr, x, method=method, epsilon=epsilon, X_train=X_train, res=res, model_type='regressor', agg=agg) assert lin_svr.shape[0] == nb_instances, 'Checking shapes' assert (lin_svr >= 0).all(), 'Linearity measure must be >= 0' feature_range = [[0, 1] for _ in range(X_train.shape[1])] lin_2 = linearity_measure(predict_fn, x, method='grid', epsilon=epsilon, feature_range=feature_range, res=res, model_type='regressor', agg=agg) assert lin_2.shape[0] == nb_instances, 'Checking shapes' assert (lin_2 >= 0).all(), 'Linearity measure must be >= 0' assert np.allclose(lin_2, np.zeros(lin_2.shape)) feature_range = [[0, 1] for _ in range(X_train.shape[1])] lin_2_svr = linearity_measure(predict_fn_svr, x, method='grid', epsilon=epsilon, feature_range=feature_range, res=res, model_type='regressor', agg=agg) assert lin_2_svr.shape[0] == nb_instances, 'Checking shapes' assert (lin_2_svr >= 0).all(), 'Linearity measure must be >= 0' y_train_multi = np.stack((y_train, y_train), axis=1) lg_multi = LinearRegression() lg_multi.fit(X_train, y_train_multi) def predict_fn_multi(x): return lg_multi.predict(x) lm_multi = LinearityMeasure(method=method, epsilon=epsilon, res=res, model_type='regressor', agg=agg) lm_multi.fit(X_train) lin_multi = lm_multi.score(predict_fn_multi, x) assert lin_multi.shape[0] == nb_instances, 'Checking shapes' assert (lin_multi >= 0).all(), 'Linearity measure must be >= 0' assert np.allclose(lin_multi, np.zeros(lin_multi.shape)) @pytest.mark.parametrize('method', ('knn', 'grid')) @pytest.mark.parametrize('epsilon', (0.04,)) @pytest.mark.parametrize('res', (100,)) @pytest.mark.parametrize('nb_instances', (1, 10)) @pytest.mark.parametrize('agg', ('global', 'pairwise')) def test_LinearityMeasure_class(method, epsilon, res, nb_instances, agg): iris = load_iris() X_train = iris.data y_train = iris.target x = X_train[0: nb_instances].reshape(nb_instances, -1) lg = LogisticRegression() lg.fit(X_train, y_train) def predict_fn(x): return lg.predict_proba(x) lm = LinearityMeasure(method=method, epsilon=epsilon, res=res, model_type='classifier', agg=agg) lm.fit(X_train) lin = lm.score(predict_fn, x) assert lin.shape[0] == nb_instances, 'Checking shapes' assert (lin >= 0).all(), 'Linearity measure must be >= 0' @pytest.mark.parametrize('method', ('knn', 'grid')) @pytest.mark.parametrize('epsilon', (0.04,)) @pytest.mark.parametrize('res', (100,)) @pytest.mark.parametrize('nb_instances', (1, 10)) @pytest.mark.parametrize('agg', ('global', 'pairwise')) def test_LinearityMeasure_reg(method, epsilon, res, nb_instances, agg): boston = load_boston() X_train, y_train = boston.data, boston.target x = X_train[0: nb_instances].reshape(nb_instances, -1) lg = LinearRegression() lg.fit(X_train, y_train) def predict_fn(x): return lg.predict(x) y_train_multi = np.stack((y_train, y_train), axis=1) lg_multi = LinearRegression() lg_multi.fit(X_train, y_train_multi) def predict_fn_multi(x): return lg_multi.predict(x) lm = LinearityMeasure(method=method, epsilon=epsilon, res=res, model_type='regressor', agg=agg) lm.fit(X_train) lin = lm.score(predict_fn, x) assert lin.shape[0] == nb_instances, 'Checking shapes' assert (lin >= 0).all(), 'Linearity measure must be >= 0' assert np.allclose(lin, np.zeros(lin.shape)) lm_multi = LinearityMeasure(method=method, epsilon=epsilon, res=res, model_type='regressor', agg=agg) lm_multi.fit(X_train) lin_multi = lm_multi.score(predict_fn_multi, x) assert lin_multi.shape[0] == nb_instances, 'Checking shapes' assert (lin_multi >= 0).all(), 'Linearity measure must be >= 0' assert np.allclose(lin_multi, np.zeros(lin_multi.shape))
src/networkx/readwrite/json_graph/tree.py
MarletteFunding/aws-kube-codesuite
184
132760
# Copyright (C) 2011 by # <NAME> <<EMAIL>> # <NAME> <<EMAIL>> # <NAME> <<EMAIL>> # All rights reserved. # BSD license. from itertools import chain, count import networkx as nx from networkx.utils import make_str __author__ = """<NAME> (<EMAIL>))""" __all__ = ['tree_data', 'tree_graph'] _attrs = dict(id='id', children='children') def tree_data(G, root, attrs=_attrs): """Return data in tree format that is suitable for JSON serialization and use in Javascript documents. Parameters ---------- G : NetworkX graph G must be an oriented tree root : node The root of the tree attrs : dict A dictionary that contains two keys 'id' and 'children'. The corresponding values provide the attribute names for storing NetworkX-internal graph data. The values should be unique. Default value: :samp:`dict(id='id', children='children')`. If some user-defined graph data use these attribute names as data keys, they may be silently dropped. Returns ------- data : dict A dictionary with node-link formatted data. Raises ------ NetworkXError If values in attrs are not unique. Examples -------- >>> from networkx.readwrite import json_graph >>> G = nx.DiGraph([(1,2)]) >>> data = json_graph.tree_data(G,root=1) To serialize with json >>> import json >>> s = json.dumps(data) Notes ----- Node attributes are stored in this format but keys for attributes must be strings if you want to serialize with JSON. Graph and edge attributes are not stored. The default value of attrs will be changed in a future release of NetworkX. See Also -------- tree_graph, node_link_data, node_link_data """ if G.number_of_nodes() != G.number_of_edges() + 1: raise TypeError("G is not a tree.") if not G.is_directed(): raise TypeError("G is not directed.") id_ = attrs['id'] children = attrs['children'] if id_ == children: raise nx.NetworkXError('Attribute names are not unique.') def add_children(n, G): nbrs = G[n] if len(nbrs) == 0: return [] children_ = [] for child in nbrs: d = dict(chain(G.nodes[child].items(), [(id_, child)])) c = add_children(child, G) if c: d[children] = c children_.append(d) return children_ data = dict(chain(G.nodes[root].items(), [(id_, root)])) data[children] = add_children(root, G) return data def tree_graph(data, attrs=_attrs): """Return graph from tree data format. Parameters ---------- data : dict Tree formatted graph data Returns ------- G : NetworkX DiGraph attrs : dict A dictionary that contains two keys 'id' and 'children'. The corresponding values provide the attribute names for storing NetworkX-internal graph data. The values should be unique. Default value: :samp:`dict(id='id', children='children')`. Examples -------- >>> from networkx.readwrite import json_graph >>> G = nx.DiGraph([(1,2)]) >>> data = json_graph.tree_data(G,root=1) >>> H = json_graph.tree_graph(data) Notes ----- The default value of attrs will be changed in a future release of NetworkX. See Also -------- tree_graph, node_link_data, adjacency_data """ graph = nx.DiGraph() id_ = attrs['id'] children = attrs['children'] def add_children(parent, children_): for data in children_: child = data[id_] graph.add_edge(parent, child) grandchildren = data.get(children, []) if grandchildren: add_children(child, grandchildren) nodedata = dict((make_str(k), v) for k, v in data.items() if k != id_ and k != children) graph.add_node(child, **nodedata) root = data[id_] children_ = data.get(children, []) nodedata = dict((make_str(k), v) for k, v in data.items() if k != id_ and k != children) graph.add_node(root, **nodedata) add_children(root, children_) return graph
chrome/tools/build/mac/run_verify_order.py
google-ar/chromium
2,151
132768
<filename>chrome/tools/build/mac/run_verify_order.py # Copyright 2016 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. import argparse import os.path import sys import subprocess # Wraps chrome/tools/build/mac/verify_order for the GN build so that it can # write a stamp file, rather than operate as a postbuild. if __name__ == '__main__': parser = argparse.ArgumentParser( description='A wrapper around verify_order that writes a stamp file.') parser.add_argument('--stamp', action='store', type=str, help='Touch this stamp file on success.') parser.add_argument('--developer_dir', required=False, help='Path to Xcode.') args, unknown_args = parser.parse_known_args() if args.developer_dir: os.environ['DEVELOPER_DIR'] = args.developer_dir this_script_dir = os.path.dirname(sys.argv[0]) rv = subprocess.check_call( [ os.path.join(this_script_dir, 'verify_order') ] + unknown_args) if rv == 0 and args.stamp: if os.path.exists(args.stamp): os.unlink(args.stamp) open(args.stamp, 'w+').close() sys.exit(rv)
Tools/mavlink_px4.py
wms124/PX4_1.4.1_Back-up
4,224
132791
''' MAVLink protocol implementation (auto-generated by mavgen.py) Generated from: common.xml Note: this file has been auto-generated. DO NOT EDIT ''' import struct, array, mavutil, time, json WIRE_PROTOCOL_VERSION = "1.0" # some base types from mavlink_types.h MAVLINK_TYPE_CHAR = 0 MAVLINK_TYPE_UINT8_T = 1 MAVLINK_TYPE_INT8_T = 2 MAVLINK_TYPE_UINT16_T = 3 MAVLINK_TYPE_INT16_T = 4 MAVLINK_TYPE_UINT32_T = 5 MAVLINK_TYPE_INT32_T = 6 MAVLINK_TYPE_UINT64_T = 7 MAVLINK_TYPE_INT64_T = 8 MAVLINK_TYPE_FLOAT = 9 MAVLINK_TYPE_DOUBLE = 10 class MAVLink_header(object): '''MAVLink message header''' def __init__(self, msgId, mlen=0, seq=0, srcSystem=0, srcComponent=0): self.mlen = mlen self.seq = seq self.srcSystem = srcSystem self.srcComponent = srcComponent self.msgId = msgId def pack(self): return struct.pack('BBBBBB', 254, self.mlen, self.seq, self.srcSystem, self.srcComponent, self.msgId) class MAVLink_message(object): '''base MAVLink message class''' def __init__(self, msgId, name): self._header = MAVLink_header(msgId) self._payload = None self._msgbuf = None self._crc = None self._fieldnames = [] self._type = name def get_msgbuf(self): if isinstance(self._msgbuf, str): return self._msgbuf return self._msgbuf.tostring() def get_header(self): return self._header def get_payload(self): return self._payload def get_crc(self): return self._crc def get_fieldnames(self): return self._fieldnames def get_type(self): return self._type def get_msgId(self): return self._header.msgId def get_srcSystem(self): return self._header.srcSystem def get_srcComponent(self): return self._header.srcComponent def get_seq(self): return self._header.seq def __str__(self): ret = '%s {' % self._type for a in self._fieldnames: v = getattr(self, a) ret += '%s : %s, ' % (a, v) ret = ret[0:-2] + '}' return ret def to_dict(self): d = dict({}) d['mavpackettype'] = self._type for a in self._fieldnames: d[a] = getattr(self, a) return d def to_json(self): return json.dumps(self.to_dict) def pack(self, mav, crc_extra, payload): self._payload = payload self._header = MAVLink_header(self._header.msgId, len(payload), mav.seq, mav.srcSystem, mav.srcComponent) self._msgbuf = self._header.pack() + payload crc = mavutil.x25crc(self._msgbuf[1:]) if True: # using CRC extra crc.accumulate(chr(crc_extra)) self._crc = crc.crc self._msgbuf += struct.pack('<H', self._crc) return self._msgbuf # enums # MAV_AUTOPILOT MAV_AUTOPILOT_GENERIC = 0 # Generic autopilot, full support for everything MAV_AUTOPILOT_PIXHAWK = 1 # PIXHAWK autopilot, http://pixhawk.ethz.ch MAV_AUTOPILOT_SLUGS = 2 # SLUGS autopilot, http://slugsuav.soe.ucsc.edu MAV_AUTOPILOT_ARDUPILOTMEGA = 3 # ArduPilotMega / ArduCopter, http://diydrones.com MAV_AUTOPILOT_OPENPILOT = 4 # OpenPilot, http://openpilot.org MAV_AUTOPILOT_GENERIC_WAYPOINTS_ONLY = 5 # Generic autopilot only supporting simple waypoints MAV_AUTOPILOT_GENERIC_WAYPOINTS_AND_SIMPLE_NAVIGATION_ONLY = 6 # Generic autopilot supporting waypoints and other simple navigation # commands MAV_AUTOPILOT_GENERIC_MISSION_FULL = 7 # Generic autopilot supporting the full mission command set MAV_AUTOPILOT_INVALID = 8 # No valid autopilot, e.g. a GCS or other MAVLink component MAV_AUTOPILOT_PPZ = 9 # PPZ UAV - http://nongnu.org/paparazzi MAV_AUTOPILOT_UDB = 10 # UAV Dev Board MAV_AUTOPILOT_FP = 11 # FlexiPilot MAV_AUTOPILOT_PX4 = 12 # PX4 Autopilot - http://pixhawk.ethz.ch/px4/ MAV_AUTOPILOT_ENUM_END = 13 # # MAV_TYPE MAV_TYPE_GENERIC = 0 # Generic micro air vehicle. MAV_TYPE_FIXED_WING = 1 # Fixed wing aircraft. MAV_TYPE_QUADROTOR = 2 # Quadrotor MAV_TYPE_COAXIAL = 3 # Coaxial helicopter MAV_TYPE_HELICOPTER = 4 # Normal helicopter with tail rotor. MAV_TYPE_ANTENNA_TRACKER = 5 # Ground installation MAV_TYPE_GCS = 6 # Operator control unit / ground control station MAV_TYPE_AIRSHIP = 7 # Airship, controlled MAV_TYPE_FREE_BALLOON = 8 # Free balloon, uncontrolled MAV_TYPE_ROCKET = 9 # Rocket MAV_TYPE_GROUND_ROVER = 10 # Ground rover MAV_TYPE_SURFACE_BOAT = 11 # Surface vessel, boat, ship MAV_TYPE_SUBMARINE = 12 # Submarine MAV_TYPE_HEXAROTOR = 13 # Hexarotor MAV_TYPE_OCTOROTOR = 14 # Octorotor MAV_TYPE_TRICOPTER = 15 # Octorotor MAV_TYPE_FLAPPING_WING = 16 # Flapping wing MAV_TYPE_KITE = 17 # Flapping wing MAV_TYPE_ENUM_END = 18 # # MAV_MODE_FLAG MAV_MODE_FLAG_CUSTOM_MODE_ENABLED = 1 # 0b00000001 Reserved for future use. MAV_MODE_FLAG_TEST_ENABLED = 2 # 0b00000010 system has a test mode enabled. This flag is intended for # temporary system tests and should not be # used for stable implementations. MAV_MODE_FLAG_AUTO_ENABLED = 4 # 0b00000100 autonomous mode enabled, system finds its own goal # positions. Guided flag can be set or not, # depends on the actual implementation. MAV_MODE_FLAG_GUIDED_ENABLED = 8 # 0b00001000 guided mode enabled, system flies MISSIONs / mission items. MAV_MODE_FLAG_STABILIZE_ENABLED = 16 # 0b00010000 system stabilizes electronically its attitude (and # optionally position). It needs however # further control inputs to move around. MAV_MODE_FLAG_HIL_ENABLED = 32 # 0b00100000 hardware in the loop simulation. All motors / actuators are # blocked, but internal software is full # operational. MAV_MODE_FLAG_MANUAL_INPUT_ENABLED = 64 # 0b01000000 remote control input is enabled. MAV_MODE_FLAG_SAFETY_ARMED = 128 # 0b10000000 MAV safety set to armed. Motors are enabled / running / can # start. Ready to fly. MAV_MODE_FLAG_ENUM_END = 129 # # MAV_MODE_FLAG_DECODE_POSITION MAV_MODE_FLAG_DECODE_POSITION_CUSTOM_MODE = 1 # Eighth bit: 00000001 MAV_MODE_FLAG_DECODE_POSITION_TEST = 2 # Seventh bit: 00000010 MAV_MODE_FLAG_DECODE_POSITION_AUTO = 4 # Sixt bit: 00000100 MAV_MODE_FLAG_DECODE_POSITION_GUIDED = 8 # Fifth bit: 00001000 MAV_MODE_FLAG_DECODE_POSITION_STABILIZE = 16 # Fourth bit: 00010000 MAV_MODE_FLAG_DECODE_POSITION_HIL = 32 # Third bit: 00100000 MAV_MODE_FLAG_DECODE_POSITION_MANUAL = 64 # Second bit: 01000000 MAV_MODE_FLAG_DECODE_POSITION_SAFETY = 128 # First bit: 10000000 MAV_MODE_FLAG_DECODE_POSITION_ENUM_END = 129 # # MAV_GOTO MAV_GOTO_DO_HOLD = 0 # Hold at the current position. MAV_GOTO_DO_CONTINUE = 1 # Continue with the next item in mission execution. MAV_GOTO_HOLD_AT_CURRENT_POSITION = 2 # Hold at the current position of the system MAV_GOTO_HOLD_AT_SPECIFIED_POSITION = 3 # Hold at the position specified in the parameters of the DO_HOLD action MAV_GOTO_ENUM_END = 4 # # MAV_MODE MAV_MODE_PREFLIGHT = 0 # System is not ready to fly, booting, calibrating, etc. No flag is set. MAV_MODE_MANUAL_DISARMED = 64 # System is allowed to be active, under manual (RC) control, no # stabilization MAV_MODE_TEST_DISARMED = 66 # UNDEFINED mode. This solely depends on the autopilot - use with # caution, intended for developers only. MAV_MODE_STABILIZE_DISARMED = 80 # System is allowed to be active, under assisted RC control. MAV_MODE_GUIDED_DISARMED = 88 # System is allowed to be active, under autonomous control, manual # setpoint MAV_MODE_AUTO_DISARMED = 92 # System is allowed to be active, under autonomous control and # navigation (the trajectory is decided # onboard and not pre-programmed by MISSIONs) MAV_MODE_MANUAL_ARMED = 192 # System is allowed to be active, under manual (RC) control, no # stabilization MAV_MODE_TEST_ARMED = 194 # UNDEFINED mode. This solely depends on the autopilot - use with # caution, intended for developers only. MAV_MODE_STABILIZE_ARMED = 208 # System is allowed to be active, under assisted RC control. MAV_MODE_GUIDED_ARMED = 216 # System is allowed to be active, under autonomous control, manual # setpoint MAV_MODE_AUTO_ARMED = 220 # System is allowed to be active, under autonomous control and # navigation (the trajectory is decided # onboard and not pre-programmed by MISSIONs) MAV_MODE_ENUM_END = 221 # # MAV_STATE MAV_STATE_UNINIT = 0 # Uninitialized system, state is unknown. MAV_STATE_BOOT = 1 # System is booting up. MAV_STATE_CALIBRATING = 2 # System is calibrating and not flight-ready. MAV_STATE_STANDBY = 3 # System is grounded and on standby. It can be launched any time. MAV_STATE_ACTIVE = 4 # System is active and might be already airborne. Motors are engaged. MAV_STATE_CRITICAL = 5 # System is in a non-normal flight mode. It can however still navigate. MAV_STATE_EMERGENCY = 6 # System is in a non-normal flight mode. It lost control over parts or # over the whole airframe. It is in mayday and # going down. MAV_STATE_POWEROFF = 7 # System just initialized its power-down sequence, will shut down now. MAV_STATE_ENUM_END = 8 # # MAV_COMPONENT MAV_COMP_ID_ALL = 0 # MAV_COMP_ID_CAMERA = 100 # MAV_COMP_ID_SERVO1 = 140 # MAV_COMP_ID_SERVO2 = 141 # MAV_COMP_ID_SERVO3 = 142 # MAV_COMP_ID_SERVO4 = 143 # MAV_COMP_ID_SERVO5 = 144 # MAV_COMP_ID_SERVO6 = 145 # MAV_COMP_ID_SERVO7 = 146 # MAV_COMP_ID_SERVO8 = 147 # MAV_COMP_ID_SERVO9 = 148 # MAV_COMP_ID_SERVO10 = 149 # MAV_COMP_ID_SERVO11 = 150 # MAV_COMP_ID_SERVO12 = 151 # MAV_COMP_ID_SERVO13 = 152 # MAV_COMP_ID_SERVO14 = 153 # MAV_COMP_ID_MAPPER = 180 # MAV_COMP_ID_MISSIONPLANNER = 190 # MAV_COMP_ID_PATHPLANNER = 195 # MAV_COMP_ID_IMU = 200 # MAV_COMP_ID_IMU_2 = 201 # MAV_COMP_ID_IMU_3 = 202 # MAV_COMP_ID_GPS = 220 # MAV_COMP_ID_UDP_BRIDGE = 240 # MAV_COMP_ID_UART_BRIDGE = 241 # MAV_COMP_ID_SYSTEM_CONTROL = 250 # MAV_COMPONENT_ENUM_END = 251 # # MAV_FRAME MAV_FRAME_GLOBAL = 0 # Global coordinate frame, WGS84 coordinate system. First value / x: # latitude, second value / y: longitude, third # value / z: positive altitude over mean sea # level (MSL) MAV_FRAME_LOCAL_NED = 1 # Local coordinate frame, Z-up (x: north, y: east, z: down). MAV_FRAME_MISSION = 2 # NOT a coordinate frame, indicates a mission command. MAV_FRAME_GLOBAL_RELATIVE_ALT = 3 # Global coordinate frame, WGS84 coordinate system, relative altitude # over ground with respect to the home # position. First value / x: latitude, second # value / y: longitude, third value / z: # positive altitude with 0 being at the # altitude of the home location. MAV_FRAME_LOCAL_ENU = 4 # Local coordinate frame, Z-down (x: east, y: north, z: up) MAV_FRAME_ENUM_END = 5 # # MAVLINK_DATA_STREAM_TYPE MAVLINK_DATA_STREAM_IMG_JPEG = 1 # MAVLINK_DATA_STREAM_IMG_BMP = 2 # MAVLINK_DATA_STREAM_IMG_RAW8U = 3 # MAVLINK_DATA_STREAM_IMG_RAW32U = 4 # MAVLINK_DATA_STREAM_IMG_PGM = 5 # MAVLINK_DATA_STREAM_IMG_PNG = 6 # MAVLINK_DATA_STREAM_TYPE_ENUM_END = 7 # # MAV_CMD MAV_CMD_NAV_WAYPOINT = 16 # Navigate to MISSION. MAV_CMD_NAV_LOITER_UNLIM = 17 # Loiter around this MISSION an unlimited amount of time MAV_CMD_NAV_LOITER_TURNS = 18 # Loiter around this MISSION for X turns MAV_CMD_NAV_LOITER_TIME = 19 # Loiter around this MISSION for X seconds MAV_CMD_NAV_RETURN_TO_LAUNCH = 20 # Return to launch location MAV_CMD_NAV_LAND = 21 # Land at location MAV_CMD_NAV_TAKEOFF = 22 # Takeoff from ground / hand MAV_CMD_NAV_ROI = 80 # Sets the region of interest (ROI) for a sensor set or the vehicle # itself. This can then be used by the # vehicles control system to control the # vehicle attitude and the attitude of various # sensors such as cameras. MAV_CMD_NAV_PATHPLANNING = 81 # Control autonomous path planning on the MAV. MAV_CMD_NAV_LAST = 95 # NOP - This command is only used to mark the upper limit of the # NAV/ACTION commands in the enumeration MAV_CMD_CONDITION_DELAY = 112 # Delay mission state machine. MAV_CMD_CONDITION_CHANGE_ALT = 113 # Ascend/descend at rate. Delay mission state machine until desired # altitude reached. MAV_CMD_CONDITION_DISTANCE = 114 # Delay mission state machine until within desired distance of next NAV # point. MAV_CMD_CONDITION_YAW = 115 # Reach a certain target angle. MAV_CMD_CONDITION_LAST = 159 # NOP - This command is only used to mark the upper limit of the # CONDITION commands in the enumeration MAV_CMD_DO_SET_MODE = 176 # Set system mode. MAV_CMD_DO_JUMP = 177 # Jump to the desired command in the mission list. Repeat this action # only the specified number of times MAV_CMD_DO_CHANGE_SPEED = 178 # Change speed and/or throttle set points. MAV_CMD_DO_SET_HOME = 179 # Changes the home location either to the current location or a # specified location. MAV_CMD_DO_SET_PARAMETER = 180 # Set a system parameter. Caution! Use of this command requires # knowledge of the numeric enumeration value # of the parameter. MAV_CMD_DO_SET_RELAY = 181 # Set a relay to a condition. MAV_CMD_DO_REPEAT_RELAY = 182 # Cycle a relay on and off for a desired number of cyles with a desired # period. MAV_CMD_DO_SET_SERVO = 183 # Set a servo to a desired PWM value. MAV_CMD_DO_REPEAT_SERVO = 184 # Cycle a between its nominal setting and a desired PWM for a desired # number of cycles with a desired period. MAV_CMD_DO_CONTROL_VIDEO = 200 # Control onboard camera system. MAV_CMD_DO_LAST = 240 # NOP - This command is only used to mark the upper limit of the DO # commands in the enumeration MAV_CMD_PREFLIGHT_CALIBRATION = 241 # Trigger calibration. This command will be only accepted if in pre- # flight mode. MAV_CMD_PREFLIGHT_SET_SENSOR_OFFSETS = 242 # Set sensor offsets. This command will be only accepted if in pre- # flight mode. MAV_CMD_PREFLIGHT_STORAGE = 245 # Request storage of different parameter values and logs. This command # will be only accepted if in pre-flight mode. MAV_CMD_PREFLIGHT_REBOOT_SHUTDOWN = 246 # Request the reboot or shutdown of system components. MAV_CMD_OVERRIDE_GOTO = 252 # Hold / continue the current action MAV_CMD_MISSION_START = 300 # start running a mission MAV_CMD_COMPONENT_ARM_DISARM = 400 # Arms / Disarms a component MAV_CMD_ENUM_END = 401 # # MAV_DATA_STREAM MAV_DATA_STREAM_ALL = 0 # Enable all data streams MAV_DATA_STREAM_RAW_SENSORS = 1 # Enable IMU_RAW, GPS_RAW, GPS_STATUS packets. MAV_DATA_STREAM_EXTENDED_STATUS = 2 # Enable GPS_STATUS, CONTROL_STATUS, AUX_STATUS MAV_DATA_STREAM_RC_CHANNELS = 3 # Enable RC_CHANNELS_SCALED, RC_CHANNELS_RAW, SERVO_OUTPUT_RAW MAV_DATA_STREAM_RAW_CONTROLLER = 4 # Enable ATTITUDE_CONTROLLER_OUTPUT, POSITION_CONTROLLER_OUTPUT, # NAV_CONTROLLER_OUTPUT. MAV_DATA_STREAM_POSITION = 6 # Enable LOCAL_POSITION, GLOBAL_POSITION/GLOBAL_POSITION_INT messages. MAV_DATA_STREAM_EXTRA1 = 10 # Dependent on the autopilot MAV_DATA_STREAM_EXTRA2 = 11 # Dependent on the autopilot MAV_DATA_STREAM_EXTRA3 = 12 # Dependent on the autopilot MAV_DATA_STREAM_ENUM_END = 13 # # MAV_ROI MAV_ROI_NONE = 0 # No region of interest. MAV_ROI_WPNEXT = 1 # Point toward next MISSION. MAV_ROI_WPINDEX = 2 # Point toward given MISSION. MAV_ROI_LOCATION = 3 # Point toward fixed location. MAV_ROI_TARGET = 4 # Point toward of given id. MAV_ROI_ENUM_END = 5 # # MAV_CMD_ACK MAV_CMD_ACK_OK = 1 # Command / mission item is ok. MAV_CMD_ACK_ERR_FAIL = 2 # Generic error message if none of the other reasons fails or if no # detailed error reporting is implemented. MAV_CMD_ACK_ERR_ACCESS_DENIED = 3 # The system is refusing to accept this command from this source / # communication partner. MAV_CMD_ACK_ERR_NOT_SUPPORTED = 4 # Command or mission item is not supported, other commands would be # accepted. MAV_CMD_ACK_ERR_COORDINATE_FRAME_NOT_SUPPORTED = 5 # The coordinate frame of this command / mission item is not supported. MAV_CMD_ACK_ERR_COORDINATES_OUT_OF_RANGE = 6 # The coordinate frame of this command is ok, but he coordinate values # exceed the safety limits of this system. # This is a generic error, please use the more # specific error messages below if possible. MAV_CMD_ACK_ERR_X_LAT_OUT_OF_RANGE = 7 # The X or latitude value is out of range. MAV_CMD_ACK_ERR_Y_LON_OUT_OF_RANGE = 8 # The Y or longitude value is out of range. MAV_CMD_ACK_ERR_Z_ALT_OUT_OF_RANGE = 9 # The Z or altitude value is out of range. MAV_CMD_ACK_ENUM_END = 10 # # MAV_PARAM_TYPE MAV_PARAM_TYPE_UINT8 = 1 # 8-bit unsigned integer MAV_PARAM_TYPE_INT8 = 2 # 8-bit signed integer MAV_PARAM_TYPE_UINT16 = 3 # 16-bit unsigned integer MAV_PARAM_TYPE_INT16 = 4 # 16-bit signed integer MAV_PARAM_TYPE_UINT32 = 5 # 32-bit unsigned integer MAV_PARAM_TYPE_INT32 = 6 # 32-bit signed integer MAV_PARAM_TYPE_UINT64 = 7 # 64-bit unsigned integer MAV_PARAM_TYPE_INT64 = 8 # 64-bit signed integer MAV_PARAM_TYPE_REAL32 = 9 # 32-bit floating-point MAV_PARAM_TYPE_REAL64 = 10 # 64-bit floating-point MAV_PARAM_TYPE_ENUM_END = 11 # # MAV_RESULT MAV_RESULT_ACCEPTED = 0 # Command ACCEPTED and EXECUTED MAV_RESULT_TEMPORARILY_REJECTED = 1 # Command TEMPORARY REJECTED/DENIED MAV_RESULT_DENIED = 2 # Command PERMANENTLY DENIED MAV_RESULT_UNSUPPORTED = 3 # Command UNKNOWN/UNSUPPORTED MAV_RESULT_FAILED = 4 # Command executed, but failed MAV_RESULT_ENUM_END = 5 # # MAV_MISSION_RESULT MAV_MISSION_ACCEPTED = 0 # mission accepted OK MAV_MISSION_ERROR = 1 # generic error / not accepting mission commands at all right now MAV_MISSION_UNSUPPORTED_FRAME = 2 # coordinate frame is not supported MAV_MISSION_UNSUPPORTED = 3 # command is not supported MAV_MISSION_NO_SPACE = 4 # mission item exceeds storage space MAV_MISSION_INVALID = 5 # one of the parameters has an invalid value MAV_MISSION_INVALID_PARAM1 = 6 # param1 has an invalid value MAV_MISSION_INVALID_PARAM2 = 7 # param2 has an invalid value MAV_MISSION_INVALID_PARAM3 = 8 # param3 has an invalid value MAV_MISSION_INVALID_PARAM4 = 9 # param4 has an invalid value MAV_MISSION_INVALID_PARAM5_X = 10 # x/param5 has an invalid value MAV_MISSION_INVALID_PARAM6_Y = 11 # y/param6 has an invalid value MAV_MISSION_INVALID_PARAM7 = 12 # param7 has an invalid value MAV_MISSION_INVALID_SEQUENCE = 13 # received waypoint out of sequence MAV_MISSION_DENIED = 14 # not accepting any mission commands from this communication partner MAV_MISSION_RESULT_ENUM_END = 15 # # MAV_SEVERITY MAV_SEVERITY_EMERGENCY = 0 # System is unusable. This is a "panic" condition. MAV_SEVERITY_ALERT = 1 # Action should be taken immediately. Indicates error in non-critical # systems. MAV_SEVERITY_CRITICAL = 2 # Action must be taken immediately. Indicates failure in a primary # system. MAV_SEVERITY_ERROR = 3 # Indicates an error in secondary/redundant systems. MAV_SEVERITY_WARNING = 4 # Indicates about a possible future error if this is not resolved within # a given timeframe. Example would be a low # battery warning. MAV_SEVERITY_NOTICE = 5 # An unusual event has occured, though not an error condition. This # should be investigated for the root cause. MAV_SEVERITY_INFO = 6 # Normal operational messages. Useful for logging. No action is required # for these messages. MAV_SEVERITY_DEBUG = 7 # Useful non-operational messages that can assist in debugging. These # should not occur during normal operation. MAV_SEVERITY_ENUM_END = 8 # # message IDs MAVLINK_MSG_ID_BAD_DATA = -1 MAVLINK_MSG_ID_HEARTBEAT = 0 MAVLINK_MSG_ID_SYS_STATUS = 1 MAVLINK_MSG_ID_SYSTEM_TIME = 2 MAVLINK_MSG_ID_PING = 4 MAVLINK_MSG_ID_CHANGE_OPERATOR_CONTROL = 5 MAVLINK_MSG_ID_CHANGE_OPERATOR_CONTROL_ACK = 6 MAVLINK_MSG_ID_AUTH_KEY = 7 MAVLINK_MSG_ID_SET_MODE = 11 MAVLINK_MSG_ID_PARAM_REQUEST_READ = 20 MAVLINK_MSG_ID_PARAM_REQUEST_LIST = 21 MAVLINK_MSG_ID_PARAM_VALUE = 22 MAVLINK_MSG_ID_PARAM_SET = 23 MAVLINK_MSG_ID_GPS_RAW_INT = 24 MAVLINK_MSG_ID_GPS_STATUS = 25 MAVLINK_MSG_ID_SCALED_IMU = 26 MAVLINK_MSG_ID_RAW_IMU = 27 MAVLINK_MSG_ID_RAW_PRESSURE = 28 MAVLINK_MSG_ID_SCALED_PRESSURE = 29 MAVLINK_MSG_ID_ATTITUDE = 30 MAVLINK_MSG_ID_ATTITUDE_QUATERNION = 31 MAVLINK_MSG_ID_LOCAL_POSITION_NED = 32 MAVLINK_MSG_ID_GLOBAL_POSITION_INT = 33 MAVLINK_MSG_ID_RC_CHANNELS_SCALED = 34 MAVLINK_MSG_ID_RC_CHANNELS_RAW = 35 MAVLINK_MSG_ID_SERVO_OUTPUT_RAW = 36 MAVLINK_MSG_ID_MISSION_REQUEST_PARTIAL_LIST = 37 MAVLINK_MSG_ID_MISSION_WRITE_PARTIAL_LIST = 38 MAVLINK_MSG_ID_MISSION_ITEM = 39 MAVLINK_MSG_ID_MISSION_REQUEST = 40 MAVLINK_MSG_ID_MISSION_SET_CURRENT = 41 MAVLINK_MSG_ID_MISSION_CURRENT = 42 MAVLINK_MSG_ID_MISSION_REQUEST_LIST = 43 MAVLINK_MSG_ID_MISSION_COUNT = 44 MAVLINK_MSG_ID_MISSION_CLEAR_ALL = 45 MAVLINK_MSG_ID_MISSION_ITEM_REACHED = 46 MAVLINK_MSG_ID_MISSION_ACK = 47 MAVLINK_MSG_ID_SET_GPS_GLOBAL_ORIGIN = 48 MAVLINK_MSG_ID_GPS_GLOBAL_ORIGIN = 49 MAVLINK_MSG_ID_SET_LOCAL_POSITION_SETPOINT = 50 MAVLINK_MSG_ID_LOCAL_POSITION_SETPOINT = 51 MAVLINK_MSG_ID_GLOBAL_POSITION_SETPOINT_INT = 52 MAVLINK_MSG_ID_SET_GLOBAL_POSITION_SETPOINT_INT = 53 MAVLINK_MSG_ID_SAFETY_SET_ALLOWED_AREA = 54 MAVLINK_MSG_ID_SAFETY_ALLOWED_AREA = 55 MAVLINK_MSG_ID_SET_ROLL_PITCH_YAW_THRUST = 56 MAVLINK_MSG_ID_SET_ROLL_PITCH_YAW_SPEED_THRUST = 57 MAVLINK_MSG_ID_ROLL_PITCH_YAW_THRUST_SETPOINT = 58 MAVLINK_MSG_ID_ROLL_PITCH_YAW_SPEED_THRUST_SETPOINT = 59 MAVLINK_MSG_ID_SET_QUAD_MOTORS_SETPOINT = 60 MAVLINK_MSG_ID_SET_QUAD_SWARM_ROLL_PITCH_YAW_THRUST = 61 MAVLINK_MSG_ID_NAV_CONTROLLER_OUTPUT = 62 MAVLINK_MSG_ID_SET_QUAD_SWARM_LED_ROLL_PITCH_YAW_THRUST = 63 MAVLINK_MSG_ID_STATE_CORRECTION = 64 MAVLINK_MSG_ID_REQUEST_DATA_STREAM = 66 MAVLINK_MSG_ID_DATA_STREAM = 67 MAVLINK_MSG_ID_MANUAL_CONTROL = 69 MAVLINK_MSG_ID_RC_CHANNELS_OVERRIDE = 70 MAVLINK_MSG_ID_VFR_HUD = 74 MAVLINK_MSG_ID_COMMAND_LONG = 76 MAVLINK_MSG_ID_COMMAND_ACK = 77 MAVLINK_MSG_ID_ROLL_PITCH_YAW_RATES_THRUST_SETPOINT = 80 MAVLINK_MSG_ID_MANUAL_SETPOINT = 81 MAVLINK_MSG_ID_LOCAL_POSITION_NED_SYSTEM_GLOBAL_OFFSET = 89 MAVLINK_MSG_ID_HIL_STATE = 90 MAVLINK_MSG_ID_HIL_CONTROLS = 91 MAVLINK_MSG_ID_HIL_RC_INPUTS_RAW = 92 MAVLINK_MSG_ID_OPTICAL_FLOW = 100 MAVLINK_MSG_ID_GLOBAL_VISION_POSITION_ESTIMATE = 101 MAVLINK_MSG_ID_VISION_POSITION_ESTIMATE = 102 MAVLINK_MSG_ID_VISION_SPEED_ESTIMATE = 103 MAVLINK_MSG_ID_VICON_POSITION_ESTIMATE = 104 MAVLINK_MSG_ID_HIGHRES_IMU = 105 MAVLINK_MSG_ID_FILE_TRANSFER_START = 110 MAVLINK_MSG_ID_FILE_TRANSFER_DIR_LIST = 111 MAVLINK_MSG_ID_FILE_TRANSFER_RES = 112 MAVLINK_MSG_ID_BATTERY_STATUS = 147 MAVLINK_MSG_ID_SETPOINT_8DOF = 148 MAVLINK_MSG_ID_SETPOINT_6DOF = 149 MAVLINK_MSG_ID_MEMORY_VECT = 249 MAVLINK_MSG_ID_DEBUG_VECT = 250 MAVLINK_MSG_ID_NAMED_VALUE_FLOAT = 251 MAVLINK_MSG_ID_NAMED_VALUE_INT = 252 MAVLINK_MSG_ID_STATUSTEXT = 253 MAVLINK_MSG_ID_DEBUG = 254 class MAVLink_heartbeat_message(MAVLink_message): ''' The heartbeat message shows that a system is present and responding. The type of the MAV and Autopilot hardware allow the receiving system to treat further messages from this system appropriate (e.g. by laying out the user interface based on the autopilot). ''' def __init__(self, type, autopilot, base_mode, custom_mode, system_status, mavlink_version): MAVLink_message.__init__(self, MAVLINK_MSG_ID_HEARTBEAT, 'HEARTBEAT') self._fieldnames = ['type', 'autopilot', 'base_mode', 'custom_mode', 'system_status', 'mavlink_version'] self.type = type self.autopilot = autopilot self.base_mode = base_mode self.custom_mode = custom_mode self.system_status = system_status self.mavlink_version = mavlink_version def pack(self, mav): return MAVLink_message.pack(self, mav, 50, struct.pack('<IBBBBB', self.custom_mode, self.type, self.autopilot, self.base_mode, self.system_status, self.mavlink_version)) class MAVLink_sys_status_message(MAVLink_message): ''' The general system state. If the system is following the MAVLink standard, the system state is mainly defined by three orthogonal states/modes: The system mode, which is either LOCKED (motors shut down and locked), MANUAL (system under RC control), GUIDED (system with autonomous position control, position setpoint controlled manually) or AUTO (system guided by path/waypoint planner). The NAV_MODE defined the current flight state: LIFTOFF (often an open-loop maneuver), LANDING, WAYPOINTS or VECTOR. This represents the internal navigation state machine. The system status shows wether the system is currently active or not and if an emergency occured. During the CRITICAL and EMERGENCY states the MAV is still considered to be active, but should start emergency procedures autonomously. After a failure occured it should first move from active to critical to allow manual intervention and then move to emergency after a certain timeout. ''' def __init__(self, onboard_control_sensors_present, onboard_control_sensors_enabled, onboard_control_sensors_health, load, voltage_battery, current_battery, battery_remaining, drop_rate_comm, errors_comm, errors_count1, errors_count2, errors_count3, errors_count4): MAVLink_message.__init__(self, MAVLINK_MSG_ID_SYS_STATUS, 'SYS_STATUS') self._fieldnames = ['onboard_control_sensors_present', 'onboard_control_sensors_enabled', 'onboard_control_sensors_health', 'load', 'voltage_battery', 'current_battery', 'battery_remaining', 'drop_rate_comm', 'errors_comm', 'errors_count1', 'errors_count2', 'errors_count3', 'errors_count4'] self.onboard_control_sensors_present = onboard_control_sensors_present self.onboard_control_sensors_enabled = onboard_control_sensors_enabled self.onboard_control_sensors_health = onboard_control_sensors_health self.load = load self.voltage_battery = voltage_battery self.current_battery = current_battery self.battery_remaining = battery_remaining self.drop_rate_comm = drop_rate_comm self.errors_comm = errors_comm self.errors_count1 = errors_count1 self.errors_count2 = errors_count2 self.errors_count3 = errors_count3 self.errors_count4 = errors_count4 def pack(self, mav): return MAVLink_message.pack(self, mav, 124, struct.pack('<IIIHHhHHHHHHb', self.onboard_control_sensors_present, self.onboard_control_sensors_enabled, self.onboard_control_sensors_health, self.load, self.voltage_battery, self.current_battery, self.drop_rate_comm, self.errors_comm, self.errors_count1, self.errors_count2, self.errors_count3, self.errors_count4, self.battery_remaining)) class MAVLink_system_time_message(MAVLink_message): ''' The system time is the time of the master clock, typically the computer clock of the main onboard computer. ''' def __init__(self, time_unix_usec, time_boot_ms): MAVLink_message.__init__(self, MAVLINK_MSG_ID_SYSTEM_TIME, 'SYSTEM_TIME') self._fieldnames = ['time_unix_usec', 'time_boot_ms'] self.time_unix_usec = time_unix_usec self.time_boot_ms = time_boot_ms def pack(self, mav): return MAVLink_message.pack(self, mav, 137, struct.pack('<QI', self.time_unix_usec, self.time_boot_ms)) class MAVLink_ping_message(MAVLink_message): ''' A ping message either requesting or responding to a ping. This allows to measure the system latencies, including serial port, radio modem and UDP connections. ''' def __init__(self, time_usec, seq, target_system, target_component): MAVLink_message.__init__(self, MAVLINK_MSG_ID_PING, 'PING') self._fieldnames = ['time_usec', 'seq', 'target_system', 'target_component'] self.time_usec = time_usec self.seq = seq self.target_system = target_system self.target_component = target_component def pack(self, mav): return MAVLink_message.pack(self, mav, 237, struct.pack('<QIBB', self.time_usec, self.seq, self.target_system, self.target_component)) class MAVLink_change_operator_control_message(MAVLink_message): ''' Request to control this MAV ''' def __init__(self, target_system, control_request, version, passkey): MAVLink_message.__init__(self, MAVLINK_MSG_ID_CHANGE_OPERATOR_CONTROL, 'CHANGE_OPERATOR_CONTROL') self._fieldnames = ['target_system', 'control_request', 'version', 'passkey'] self.target_system = target_system self.control_request = control_request self.version = version self.passkey = passkey def pack(self, mav): return MAVLink_message.pack(self, mav, 217, struct.pack('<BBB25s', self.target_system, self.control_request, self.version, self.passkey)) class MAVLink_change_operator_control_ack_message(MAVLink_message): ''' Accept / deny control of this MAV ''' def __init__(self, gcs_system_id, control_request, ack): MAVLink_message.__init__(self, MAVLINK_MSG_ID_CHANGE_OPERATOR_CONTROL_ACK, 'CHANGE_OPERATOR_CONTROL_ACK') self._fieldnames = ['gcs_system_id', 'control_request', 'ack'] self.gcs_system_id = gcs_system_id self.control_request = control_request self.ack = ack def pack(self, mav): return MAVLink_message.pack(self, mav, 104, struct.pack('<BBB', self.gcs_system_id, self.control_request, self.ack)) class MAVLink_auth_key_message(MAVLink_message): ''' Emit an encrypted signature / key identifying this system. PLEASE NOTE: This protocol has been kept simple, so transmitting the key requires an encrypted channel for true safety. ''' def __init__(self, key): MAVLink_message.__init__(self, MAVLINK_MSG_ID_AUTH_KEY, 'AUTH_KEY') self._fieldnames = ['key'] self.key = key def pack(self, mav): return MAVLink_message.pack(self, mav, 119, struct.pack('<32s', self.key)) class MAVLink_set_mode_message(MAVLink_message): ''' Set the system mode, as defined by enum MAV_MODE. There is no target component id as the mode is by definition for the overall aircraft, not only for one component. ''' def __init__(self, target_system, base_mode, custom_mode): MAVLink_message.__init__(self, MAVLINK_MSG_ID_SET_MODE, 'SET_MODE') self._fieldnames = ['target_system', 'base_mode', 'custom_mode'] self.target_system = target_system self.base_mode = base_mode self.custom_mode = custom_mode def pack(self, mav): return MAVLink_message.pack(self, mav, 89, struct.pack('<IBB', self.custom_mode, self.target_system, self.base_mode)) class MAVLink_param_request_read_message(MAVLink_message): ''' Request to read the onboard parameter with the param_id string id. Onboard parameters are stored as key[const char*] -> value[float]. This allows to send a parameter to any other component (such as the GCS) without the need of previous knowledge of possible parameter names. Thus the same GCS can store different parameters for different autopilots. See also http://qgroundcontrol.org/parameter_interface for a full documentation of QGroundControl and IMU code. ''' def __init__(self, target_system, target_component, param_id, param_index): MAVLink_message.__init__(self, MAVLINK_MSG_ID_PARAM_REQUEST_READ, 'PARAM_REQUEST_READ') self._fieldnames = ['target_system', 'target_component', 'param_id', 'param_index'] self.target_system = target_system self.target_component = target_component self.param_id = param_id self.param_index = param_index def pack(self, mav): return MAVLink_message.pack(self, mav, 214, struct.pack('<hBB16s', self.param_index, self.target_system, self.target_component, self.param_id)) class MAVLink_param_request_list_message(MAVLink_message): ''' Request all parameters of this component. After his request, all parameters are emitted. ''' def __init__(self, target_system, target_component): MAVLink_message.__init__(self, MAVLINK_MSG_ID_PARAM_REQUEST_LIST, 'PARAM_REQUEST_LIST') self._fieldnames = ['target_system', 'target_component'] self.target_system = target_system self.target_component = target_component def pack(self, mav): return MAVLink_message.pack(self, mav, 159, struct.pack('<BB', self.target_system, self.target_component)) class MAVLink_param_value_message(MAVLink_message): ''' Emit the value of a onboard parameter. The inclusion of param_count and param_index in the message allows the recipient to keep track of received parameters and allows him to re-request missing parameters after a loss or timeout. ''' def __init__(self, param_id, param_value, param_type, param_count, param_index): MAVLink_message.__init__(self, MAVLINK_MSG_ID_PARAM_VALUE, 'PARAM_VALUE') self._fieldnames = ['param_id', 'param_value', 'param_type', 'param_count', 'param_index'] self.param_id = param_id self.param_value = param_value self.param_type = param_type self.param_count = param_count self.param_index = param_index def pack(self, mav): return MAVLink_message.pack(self, mav, 220, struct.pack('<fHH16sB', self.param_value, self.param_count, self.param_index, self.param_id, self.param_type)) class MAVLink_param_set_message(MAVLink_message): ''' Set a parameter value TEMPORARILY to RAM. It will be reset to default on system reboot. Send the ACTION MAV_ACTION_STORAGE_WRITE to PERMANENTLY write the RAM contents to EEPROM. IMPORTANT: The receiving component should acknowledge the new parameter value by sending a param_value message to all communication partners. This will also ensure that multiple GCS all have an up-to-date list of all parameters. If the sending GCS did not receive a PARAM_VALUE message within its timeout time, it should re-send the PARAM_SET message. ''' def __init__(self, target_system, target_component, param_id, param_value, param_type): MAVLink_message.__init__(self, MAVLINK_MSG_ID_PARAM_SET, 'PARAM_SET') self._fieldnames = ['target_system', 'target_component', 'param_id', 'param_value', 'param_type'] self.target_system = target_system self.target_component = target_component self.param_id = param_id self.param_value = param_value self.param_type = param_type def pack(self, mav): return MAVLink_message.pack(self, mav, 168, struct.pack('<fBB16sB', self.param_value, self.target_system, self.target_component, self.param_id, self.param_type)) class MAVLink_gps_raw_int_message(MAVLink_message): ''' The global position, as returned by the Global Positioning System (GPS). This is NOT the global position estimate of the system, but rather a RAW sensor value. See message GLOBAL_POSITION for the global position estimate. Coordinate frame is right-handed, Z-axis up (GPS frame). ''' def __init__(self, time_usec, fix_type, lat, lon, alt, eph, epv, vel, cog, satellites_visible): MAVLink_message.__init__(self, MAVLINK_MSG_ID_GPS_RAW_INT, 'GPS_RAW_INT') self._fieldnames = ['time_usec', 'fix_type', 'lat', 'lon', 'alt', 'eph', 'epv', 'vel', 'cog', 'satellites_visible'] self.time_usec = time_usec self.fix_type = fix_type self.lat = lat self.lon = lon self.alt = alt self.eph = eph self.epv = epv self.vel = vel self.cog = cog self.satellites_visible = satellites_visible def pack(self, mav): return MAVLink_message.pack(self, mav, 24, struct.pack('<QiiiHHHHBB', self.time_usec, self.lat, self.lon, self.alt, self.eph, self.epv, self.vel, self.cog, self.fix_type, self.satellites_visible)) class MAVLink_gps_status_message(MAVLink_message): ''' The positioning status, as reported by GPS. This message is intended to display status information about each satellite visible to the receiver. See message GLOBAL_POSITION for the global position estimate. This message can contain information for up to 20 satellites. ''' def __init__(self, satellites_visible, satellite_prn, satellite_used, satellite_elevation, satellite_azimuth, satellite_snr): MAVLink_message.__init__(self, MAVLINK_MSG_ID_GPS_STATUS, 'GPS_STATUS') self._fieldnames = ['satellites_visible', 'satellite_prn', 'satellite_used', 'satellite_elevation', 'satellite_azimuth', 'satellite_snr'] self.satellites_visible = satellites_visible self.satellite_prn = satellite_prn self.satellite_used = satellite_used self.satellite_elevation = satellite_elevation self.satellite_azimuth = satellite_azimuth self.satellite_snr = satellite_snr def pack(self, mav): return MAVLink_message.pack(self, mav, 23, struct.pack('<B20s20s20s20s20s', self.satellites_visible, self.satellite_prn, self.satellite_used, self.satellite_elevation, self.satellite_azimuth, self.satellite_snr)) class MAVLink_scaled_imu_message(MAVLink_message): ''' The RAW IMU readings for the usual 9DOF sensor setup. This message should contain the scaled values to the described units ''' def __init__(self, time_boot_ms, xacc, yacc, zacc, xgyro, ygyro, zgyro, xmag, ymag, zmag): MAVLink_message.__init__(self, MAVLINK_MSG_ID_SCALED_IMU, 'SCALED_IMU') self._fieldnames = ['time_boot_ms', 'xacc', 'yacc', 'zacc', 'xgyro', 'ygyro', 'zgyro', 'xmag', 'ymag', 'zmag'] self.time_boot_ms = time_boot_ms self.xacc = xacc self.yacc = yacc self.zacc = zacc self.xgyro = xgyro self.ygyro = ygyro self.zgyro = zgyro self.xmag = xmag self.ymag = ymag self.zmag = zmag def pack(self, mav): return MAVLink_message.pack(self, mav, 170, struct.pack('<Ihhhhhhhhh', self.time_boot_ms, self.xacc, self.yacc, self.zacc, self.xgyro, self.ygyro, self.zgyro, self.xmag, self.ymag, self.zmag)) class MAVLink_raw_imu_message(MAVLink_message): ''' The RAW IMU readings for the usual 9DOF sensor setup. This message should always contain the true raw values without any scaling to allow data capture and system debugging. ''' def __init__(self, time_usec, xacc, yacc, zacc, xgyro, ygyro, zgyro, xmag, ymag, zmag): MAVLink_message.__init__(self, MAVLINK_MSG_ID_RAW_IMU, 'RAW_IMU') self._fieldnames = ['time_usec', 'xacc', 'yacc', 'zacc', 'xgyro', 'ygyro', 'zgyro', 'xmag', 'ymag', 'zmag'] self.time_usec = time_usec self.xacc = xacc self.yacc = yacc self.zacc = zacc self.xgyro = xgyro self.ygyro = ygyro self.zgyro = zgyro self.xmag = xmag self.ymag = ymag self.zmag = zmag def pack(self, mav): return MAVLink_message.pack(self, mav, 144, struct.pack('<Qhhhhhhhhh', self.time_usec, self.xacc, self.yacc, self.zacc, self.xgyro, self.ygyro, self.zgyro, self.xmag, self.ymag, self.zmag)) class MAVLink_raw_pressure_message(MAVLink_message): ''' The RAW pressure readings for the typical setup of one absolute pressure and one differential pressure sensor. The sensor values should be the raw, UNSCALED ADC values. ''' def __init__(self, time_usec, press_abs, press_diff1, press_diff2, temperature): MAVLink_message.__init__(self, MAVLINK_MSG_ID_RAW_PRESSURE, 'RAW_PRESSURE') self._fieldnames = ['time_usec', 'press_abs', 'press_diff1', 'press_diff2', 'temperature'] self.time_usec = time_usec self.press_abs = press_abs self.press_diff1 = press_diff1 self.press_diff2 = press_diff2 self.temperature = temperature def pack(self, mav): return MAVLink_message.pack(self, mav, 67, struct.pack('<Qhhhh', self.time_usec, self.press_abs, self.press_diff1, self.press_diff2, self.temperature)) class MAVLink_scaled_pressure_message(MAVLink_message): ''' The pressure readings for the typical setup of one absolute and differential pressure sensor. The units are as specified in each field. ''' def __init__(self, time_boot_ms, press_abs, press_diff, temperature): MAVLink_message.__init__(self, MAVLINK_MSG_ID_SCALED_PRESSURE, 'SCALED_PRESSURE') self._fieldnames = ['time_boot_ms', 'press_abs', 'press_diff', 'temperature'] self.time_boot_ms = time_boot_ms self.press_abs = press_abs self.press_diff = press_diff self.temperature = temperature def pack(self, mav): return MAVLink_message.pack(self, mav, 115, struct.pack('<Iffh', self.time_boot_ms, self.press_abs, self.press_diff, self.temperature)) class MAVLink_attitude_message(MAVLink_message): ''' The attitude in the aeronautical frame (right-handed, Z-down, X-front, Y-right). ''' def __init__(self, time_boot_ms, roll, pitch, yaw, rollspeed, pitchspeed, yawspeed): MAVLink_message.__init__(self, MAVLINK_MSG_ID_ATTITUDE, 'ATTITUDE') self._fieldnames = ['time_boot_ms', 'roll', 'pitch', 'yaw', 'rollspeed', 'pitchspeed', 'yawspeed'] self.time_boot_ms = time_boot_ms self.roll = roll self.pitch = pitch self.yaw = yaw self.rollspeed = rollspeed self.pitchspeed = pitchspeed self.yawspeed = yawspeed def pack(self, mav): return MAVLink_message.pack(self, mav, 39, struct.pack('<Iffffff', self.time_boot_ms, self.roll, self.pitch, self.yaw, self.rollspeed, self.pitchspeed, self.yawspeed)) class MAVLink_attitude_quaternion_message(MAVLink_message): ''' The attitude in the aeronautical frame (right-handed, Z-down, X-front, Y-right), expressed as quaternion. ''' def __init__(self, time_boot_ms, q1, q2, q3, q4, rollspeed, pitchspeed, yawspeed): MAVLink_message.__init__(self, MAVLINK_MSG_ID_ATTITUDE_QUATERNION, 'ATTITUDE_QUATERNION') self._fieldnames = ['time_boot_ms', 'q1', 'q2', 'q3', 'q4', 'rollspeed', 'pitchspeed', 'yawspeed'] self.time_boot_ms = time_boot_ms self.q1 = q1 self.q2 = q2 self.q3 = q3 self.q4 = q4 self.rollspeed = rollspeed self.pitchspeed = pitchspeed self.yawspeed = yawspeed def pack(self, mav): return MAVLink_message.pack(self, mav, 246, struct.pack('<Ifffffff', self.time_boot_ms, self.q1, self.q2, self.q3, self.q4, self.rollspeed, self.pitchspeed, self.yawspeed)) class MAVLink_local_position_ned_message(MAVLink_message): ''' The filtered local position (e.g. fused computer vision and accelerometers). Coordinate frame is right-handed, Z-axis down (aeronautical frame, NED / north-east-down convention) ''' def __init__(self, time_boot_ms, x, y, z, vx, vy, vz): MAVLink_message.__init__(self, MAVLINK_MSG_ID_LOCAL_POSITION_NED, 'LOCAL_POSITION_NED') self._fieldnames = ['time_boot_ms', 'x', 'y', 'z', 'vx', 'vy', 'vz'] self.time_boot_ms = time_boot_ms self.x = x self.y = y self.z = z self.vx = vx self.vy = vy self.vz = vz def pack(self, mav): return MAVLink_message.pack(self, mav, 185, struct.pack('<Iffffff', self.time_boot_ms, self.x, self.y, self.z, self.vx, self.vy, self.vz)) class MAVLink_global_position_int_message(MAVLink_message): ''' The filtered global position (e.g. fused GPS and accelerometers). The position is in GPS-frame (right-handed, Z-up). It is designed as scaled integer message since the resolution of float is not sufficient. ''' def __init__(self, time_boot_ms, lat, lon, alt, relative_alt, vx, vy, vz, hdg): MAVLink_message.__init__(self, MAVLINK_MSG_ID_GLOBAL_POSITION_INT, 'GLOBAL_POSITION_INT') self._fieldnames = ['time_boot_ms', 'lat', 'lon', 'alt', 'relative_alt', 'vx', 'vy', 'vz', 'hdg'] self.time_boot_ms = time_boot_ms self.lat = lat self.lon = lon self.alt = alt self.relative_alt = relative_alt self.vx = vx self.vy = vy self.vz = vz self.hdg = hdg def pack(self, mav): return MAVLink_message.pack(self, mav, 104, struct.pack('<IiiiihhhH', self.time_boot_ms, self.lat, self.lon, self.alt, self.relative_alt, self.vx, self.vy, self.vz, self.hdg)) class MAVLink_rc_channels_scaled_message(MAVLink_message): ''' The scaled values of the RC channels received. (-100%) -10000, (0%) 0, (100%) 10000. Channels that are inactive should be set to 65535. ''' def __init__(self, time_boot_ms, port, chan1_scaled, chan2_scaled, chan3_scaled, chan4_scaled, chan5_scaled, chan6_scaled, chan7_scaled, chan8_scaled, rssi): MAVLink_message.__init__(self, MAVLINK_MSG_ID_RC_CHANNELS_SCALED, 'RC_CHANNELS_SCALED') self._fieldnames = ['time_boot_ms', 'port', 'chan1_scaled', 'chan2_scaled', 'chan3_scaled', 'chan4_scaled', 'chan5_scaled', 'chan6_scaled', 'chan7_scaled', 'chan8_scaled', 'rssi'] self.time_boot_ms = time_boot_ms self.port = port self.chan1_scaled = chan1_scaled self.chan2_scaled = chan2_scaled self.chan3_scaled = chan3_scaled self.chan4_scaled = chan4_scaled self.chan5_scaled = chan5_scaled self.chan6_scaled = chan6_scaled self.chan7_scaled = chan7_scaled self.chan8_scaled = chan8_scaled self.rssi = rssi def pack(self, mav): return MAVLink_message.pack(self, mav, 237, struct.pack('<IhhhhhhhhBB', self.time_boot_ms, self.chan1_scaled, self.chan2_scaled, self.chan3_scaled, self.chan4_scaled, self.chan5_scaled, self.chan6_scaled, self.chan7_scaled, self.chan8_scaled, self.port, self.rssi)) class MAVLink_rc_channels_raw_message(MAVLink_message): ''' The RAW values of the RC channels received. The standard PPM modulation is as follows: 1000 microseconds: 0%, 2000 microseconds: 100%. Individual receivers/transmitters might violate this specification. ''' def __init__(self, time_boot_ms, port, chan1_raw, chan2_raw, chan3_raw, chan4_raw, chan5_raw, chan6_raw, chan7_raw, chan8_raw, rssi): MAVLink_message.__init__(self, MAVLINK_MSG_ID_RC_CHANNELS_RAW, 'RC_CHANNELS_RAW') self._fieldnames = ['time_boot_ms', 'port', 'chan1_raw', 'chan2_raw', 'chan3_raw', 'chan4_raw', 'chan5_raw', 'chan6_raw', 'chan7_raw', 'chan8_raw', 'rssi'] self.time_boot_ms = time_boot_ms self.port = port self.chan1_raw = chan1_raw self.chan2_raw = chan2_raw self.chan3_raw = chan3_raw self.chan4_raw = chan4_raw self.chan5_raw = chan5_raw self.chan6_raw = chan6_raw self.chan7_raw = chan7_raw self.chan8_raw = chan8_raw self.rssi = rssi def pack(self, mav): return MAVLink_message.pack(self, mav, 244, struct.pack('<IHHHHHHHHBB', self.time_boot_ms, self.chan1_raw, self.chan2_raw, self.chan3_raw, self.chan4_raw, self.chan5_raw, self.chan6_raw, self.chan7_raw, self.chan8_raw, self.port, self.rssi)) class MAVLink_servo_output_raw_message(MAVLink_message): ''' The RAW values of the servo outputs (for RC input from the remote, use the RC_CHANNELS messages). The standard PPM modulation is as follows: 1000 microseconds: 0%, 2000 microseconds: 100%. ''' def __init__(self, time_boot_ms, port, servo1_raw, servo2_raw, servo3_raw, servo4_raw, servo5_raw, servo6_raw, servo7_raw, servo8_raw): MAVLink_message.__init__(self, MAVLINK_MSG_ID_SERVO_OUTPUT_RAW, 'SERVO_OUTPUT_RAW') self._fieldnames = ['time_boot_ms', 'port', 'servo1_raw', 'servo2_raw', 'servo3_raw', 'servo4_raw', 'servo5_raw', 'servo6_raw', 'servo7_raw', 'servo8_raw'] self.time_boot_ms = time_boot_ms self.port = port self.servo1_raw = servo1_raw self.servo2_raw = servo2_raw self.servo3_raw = servo3_raw self.servo4_raw = servo4_raw self.servo5_raw = servo5_raw self.servo6_raw = servo6_raw self.servo7_raw = servo7_raw self.servo8_raw = servo8_raw def pack(self, mav): return MAVLink_message.pack(self, mav, 242, struct.pack('<IHHHHHHHHB', self.time_boot_ms, self.servo1_raw, self.servo2_raw, self.servo3_raw, self.servo4_raw, self.servo5_raw, self.servo6_raw, self.servo7_raw, self.servo8_raw, self.port)) class MAVLink_mission_request_partial_list_message(MAVLink_message): ''' Request a partial list of mission items from the system/component. http://qgroundcontrol.org/mavlink/waypoint_protocol. If start and end index are the same, just send one waypoint. ''' def __init__(self, target_system, target_component, start_index, end_index): MAVLink_message.__init__(self, MAVLINK_MSG_ID_MISSION_REQUEST_PARTIAL_LIST, 'MISSION_REQUEST_PARTIAL_LIST') self._fieldnames = ['target_system', 'target_component', 'start_index', 'end_index'] self.target_system = target_system self.target_component = target_component self.start_index = start_index self.end_index = end_index def pack(self, mav): return MAVLink_message.pack(self, mav, 212, struct.pack('<hhBB', self.start_index, self.end_index, self.target_system, self.target_component)) class MAVLink_mission_write_partial_list_message(MAVLink_message): ''' This message is sent to the MAV to write a partial list. If start index == end index, only one item will be transmitted / updated. If the start index is NOT 0 and above the current list size, this request should be REJECTED! ''' def __init__(self, target_system, target_component, start_index, end_index): MAVLink_message.__init__(self, MAVLINK_MSG_ID_MISSION_WRITE_PARTIAL_LIST, 'MISSION_WRITE_PARTIAL_LIST') self._fieldnames = ['target_system', 'target_component', 'start_index', 'end_index'] self.target_system = target_system self.target_component = target_component self.start_index = start_index self.end_index = end_index def pack(self, mav): return MAVLink_message.pack(self, mav, 9, struct.pack('<hhBB', self.start_index, self.end_index, self.target_system, self.target_component)) class MAVLink_mission_item_message(MAVLink_message): ''' Message encoding a mission item. This message is emitted to announce the presence of a mission item and to set a mission item on the system. The mission item can be either in x, y, z meters (type: LOCAL) or x:lat, y:lon, z:altitude. Local frame is Z-down, right handed (NED), global frame is Z-up, right handed (ENU). See also http://qgroundcontrol.org/mavlink/waypoint_protocol. ''' def __init__(self, target_system, target_component, seq, frame, command, current, autocontinue, param1, param2, param3, param4, x, y, z): MAVLink_message.__init__(self, MAVLINK_MSG_ID_MISSION_ITEM, 'MISSION_ITEM') self._fieldnames = ['target_system', 'target_component', 'seq', 'frame', 'command', 'current', 'autocontinue', 'param1', 'param2', 'param3', 'param4', 'x', 'y', 'z'] self.target_system = target_system self.target_component = target_component self.seq = seq self.frame = frame self.command = command self.current = current self.autocontinue = autocontinue self.param1 = param1 self.param2 = param2 self.param3 = param3 self.param4 = param4 self.x = x self.y = y self.z = z def pack(self, mav): return MAVLink_message.pack(self, mav, 254, struct.pack('<fffffffHHBBBBB', self.param1, self.param2, self.param3, self.param4, self.x, self.y, self.z, self.seq, self.command, self.target_system, self.target_component, self.frame, self.current, self.autocontinue)) class MAVLink_mission_request_message(MAVLink_message): ''' Request the information of the mission item with the sequence number seq. The response of the system to this message should be a MISSION_ITEM message. http://qgroundcontrol.org/mavlink/waypoint_protocol ''' def __init__(self, target_system, target_component, seq): MAVLink_message.__init__(self, MAVLINK_MSG_ID_MISSION_REQUEST, 'MISSION_REQUEST') self._fieldnames = ['target_system', 'target_component', 'seq'] self.target_system = target_system self.target_component = target_component self.seq = seq def pack(self, mav): return MAVLink_message.pack(self, mav, 230, struct.pack('<HBB', self.seq, self.target_system, self.target_component)) class MAVLink_mission_set_current_message(MAVLink_message): ''' Set the mission item with sequence number seq as current item. This means that the MAV will continue to this mission item on the shortest path (not following the mission items in- between). ''' def __init__(self, target_system, target_component, seq): MAVLink_message.__init__(self, MAVLINK_MSG_ID_MISSION_SET_CURRENT, 'MISSION_SET_CURRENT') self._fieldnames = ['target_system', 'target_component', 'seq'] self.target_system = target_system self.target_component = target_component self.seq = seq def pack(self, mav): return MAVLink_message.pack(self, mav, 28, struct.pack('<HBB', self.seq, self.target_system, self.target_component)) class MAVLink_mission_current_message(MAVLink_message): ''' Message that announces the sequence number of the current active mission item. The MAV will fly towards this mission item. ''' def __init__(self, seq): MAVLink_message.__init__(self, MAVLINK_MSG_ID_MISSION_CURRENT, 'MISSION_CURRENT') self._fieldnames = ['seq'] self.seq = seq def pack(self, mav): return MAVLink_message.pack(self, mav, 28, struct.pack('<H', self.seq)) class MAVLink_mission_request_list_message(MAVLink_message): ''' Request the overall list of mission items from the system/component. ''' def __init__(self, target_system, target_component): MAVLink_message.__init__(self, MAVLINK_MSG_ID_MISSION_REQUEST_LIST, 'MISSION_REQUEST_LIST') self._fieldnames = ['target_system', 'target_component'] self.target_system = target_system self.target_component = target_component def pack(self, mav): return MAVLink_message.pack(self, mav, 132, struct.pack('<BB', self.target_system, self.target_component)) class MAVLink_mission_count_message(MAVLink_message): ''' This message is emitted as response to MISSION_REQUEST_LIST by the MAV and to initiate a write transaction. The GCS can then request the individual mission item based on the knowledge of the total number of MISSIONs. ''' def __init__(self, target_system, target_component, count): MAVLink_message.__init__(self, MAVLINK_MSG_ID_MISSION_COUNT, 'MISSION_COUNT') self._fieldnames = ['target_system', 'target_component', 'count'] self.target_system = target_system self.target_component = target_component self.count = count def pack(self, mav): return MAVLink_message.pack(self, mav, 221, struct.pack('<HBB', self.count, self.target_system, self.target_component)) class MAVLink_mission_clear_all_message(MAVLink_message): ''' Delete all mission items at once. ''' def __init__(self, target_system, target_component): MAVLink_message.__init__(self, MAVLINK_MSG_ID_MISSION_CLEAR_ALL, 'MISSION_CLEAR_ALL') self._fieldnames = ['target_system', 'target_component'] self.target_system = target_system self.target_component = target_component def pack(self, mav): return MAVLink_message.pack(self, mav, 232, struct.pack('<BB', self.target_system, self.target_component)) class MAVLink_mission_item_reached_message(MAVLink_message): ''' A certain mission item has been reached. The system will either hold this position (or circle on the orbit) or (if the autocontinue on the WP was set) continue to the next MISSION. ''' def __init__(self, seq): MAVLink_message.__init__(self, MAVLINK_MSG_ID_MISSION_ITEM_REACHED, 'MISSION_ITEM_REACHED') self._fieldnames = ['seq'] self.seq = seq def pack(self, mav): return MAVLink_message.pack(self, mav, 11, struct.pack('<H', self.seq)) class MAVLink_mission_ack_message(MAVLink_message): ''' Ack message during MISSION handling. The type field states if this message is a positive ack (type=0) or if an error happened (type=non-zero). ''' def __init__(self, target_system, target_component, type): MAVLink_message.__init__(self, MAVLINK_MSG_ID_MISSION_ACK, 'MISSION_ACK') self._fieldnames = ['target_system', 'target_component', 'type'] self.target_system = target_system self.target_component = target_component self.type = type def pack(self, mav): return MAVLink_message.pack(self, mav, 153, struct.pack('<BBB', self.target_system, self.target_component, self.type)) class MAVLink_set_gps_global_origin_message(MAVLink_message): ''' As local waypoints exist, the global MISSION reference allows to transform between the local coordinate frame and the global (GPS) coordinate frame. This can be necessary when e.g. in- and outdoor settings are connected and the MAV should move from in- to outdoor. ''' def __init__(self, target_system, latitude, longitude, altitude): MAVLink_message.__init__(self, MAVLINK_MSG_ID_SET_GPS_GLOBAL_ORIGIN, 'SET_GPS_GLOBAL_ORIGIN') self._fieldnames = ['target_system', 'latitude', 'longitude', 'altitude'] self.target_system = target_system self.latitude = latitude self.longitude = longitude self.altitude = altitude def pack(self, mav): return MAVLink_message.pack(self, mav, 41, struct.pack('<iiiB', self.latitude, self.longitude, self.altitude, self.target_system)) class MAVLink_gps_global_origin_message(MAVLink_message): ''' Once the MAV sets a new GPS-Local correspondence, this message announces the origin (0,0,0) position ''' def __init__(self, latitude, longitude, altitude): MAVLink_message.__init__(self, MAVLINK_MSG_ID_GPS_GLOBAL_ORIGIN, 'GPS_GLOBAL_ORIGIN') self._fieldnames = ['latitude', 'longitude', 'altitude'] self.latitude = latitude self.longitude = longitude self.altitude = altitude def pack(self, mav): return MAVLink_message.pack(self, mav, 39, struct.pack('<iii', self.latitude, self.longitude, self.altitude)) class MAVLink_set_local_position_setpoint_message(MAVLink_message): ''' Set the setpoint for a local position controller. This is the position in local coordinates the MAV should fly to. This message is sent by the path/MISSION planner to the onboard position controller. As some MAVs have a degree of freedom in yaw (e.g. all helicopters/quadrotors), the desired yaw angle is part of the message. ''' def __init__(self, target_system, target_component, coordinate_frame, x, y, z, yaw): MAVLink_message.__init__(self, MAVLINK_MSG_ID_SET_LOCAL_POSITION_SETPOINT, 'SET_LOCAL_POSITION_SETPOINT') self._fieldnames = ['target_system', 'target_component', 'coordinate_frame', 'x', 'y', 'z', 'yaw'] self.target_system = target_system self.target_component = target_component self.coordinate_frame = coordinate_frame self.x = x self.y = y self.z = z self.yaw = yaw def pack(self, mav): return MAVLink_message.pack(self, mav, 214, struct.pack('<ffffBBB', self.x, self.y, self.z, self.yaw, self.target_system, self.target_component, self.coordinate_frame)) class MAVLink_local_position_setpoint_message(MAVLink_message): ''' Transmit the current local setpoint of the controller to other MAVs (collision avoidance) and to the GCS. ''' def __init__(self, coordinate_frame, x, y, z, yaw): MAVLink_message.__init__(self, MAVLINK_MSG_ID_LOCAL_POSITION_SETPOINT, 'LOCAL_POSITION_SETPOINT') self._fieldnames = ['coordinate_frame', 'x', 'y', 'z', 'yaw'] self.coordinate_frame = coordinate_frame self.x = x self.y = y self.z = z self.yaw = yaw def pack(self, mav): return MAVLink_message.pack(self, mav, 223, struct.pack('<ffffB', self.x, self.y, self.z, self.yaw, self.coordinate_frame)) class MAVLink_global_position_setpoint_int_message(MAVLink_message): ''' Transmit the current local setpoint of the controller to other MAVs (collision avoidance) and to the GCS. ''' def __init__(self, coordinate_frame, latitude, longitude, altitude, yaw): MAVLink_message.__init__(self, MAVLINK_MSG_ID_GLOBAL_POSITION_SETPOINT_INT, 'GLOBAL_POSITION_SETPOINT_INT') self._fieldnames = ['coordinate_frame', 'latitude', 'longitude', 'altitude', 'yaw'] self.coordinate_frame = coordinate_frame self.latitude = latitude self.longitude = longitude self.altitude = altitude self.yaw = yaw def pack(self, mav): return MAVLink_message.pack(self, mav, 141, struct.pack('<iiihB', self.latitude, self.longitude, self.altitude, self.yaw, self.coordinate_frame)) class MAVLink_set_global_position_setpoint_int_message(MAVLink_message): ''' Set the current global position setpoint. ''' def __init__(self, coordinate_frame, latitude, longitude, altitude, yaw): MAVLink_message.__init__(self, MAVLINK_MSG_ID_SET_GLOBAL_POSITION_SETPOINT_INT, 'SET_GLOBAL_POSITION_SETPOINT_INT') self._fieldnames = ['coordinate_frame', 'latitude', 'longitude', 'altitude', 'yaw'] self.coordinate_frame = coordinate_frame self.latitude = latitude self.longitude = longitude self.altitude = altitude self.yaw = yaw def pack(self, mav): return MAVLink_message.pack(self, mav, 33, struct.pack('<iiihB', self.latitude, self.longitude, self.altitude, self.yaw, self.coordinate_frame)) class MAVLink_safety_set_allowed_area_message(MAVLink_message): ''' Set a safety zone (volume), which is defined by two corners of a cube. This message can be used to tell the MAV which setpoints/MISSIONs to accept and which to reject. Safety areas are often enforced by national or competition regulations. ''' def __init__(self, target_system, target_component, frame, p1x, p1y, p1z, p2x, p2y, p2z): MAVLink_message.__init__(self, MAVLINK_MSG_ID_SAFETY_SET_ALLOWED_AREA, 'SAFETY_SET_ALLOWED_AREA') self._fieldnames = ['target_system', 'target_component', 'frame', 'p1x', 'p1y', 'p1z', 'p2x', 'p2y', 'p2z'] self.target_system = target_system self.target_component = target_component self.frame = frame self.p1x = p1x self.p1y = p1y self.p1z = p1z self.p2x = p2x self.p2y = p2y self.p2z = p2z def pack(self, mav): return MAVLink_message.pack(self, mav, 15, struct.pack('<ffffffBBB', self.p1x, self.p1y, self.p1z, self.p2x, self.p2y, self.p2z, self.target_system, self.target_component, self.frame)) class MAVLink_safety_allowed_area_message(MAVLink_message): ''' Read out the safety zone the MAV currently assumes. ''' def __init__(self, frame, p1x, p1y, p1z, p2x, p2y, p2z): MAVLink_message.__init__(self, MAVLINK_MSG_ID_SAFETY_ALLOWED_AREA, 'SAFETY_ALLOWED_AREA') self._fieldnames = ['frame', 'p1x', 'p1y', 'p1z', 'p2x', 'p2y', 'p2z'] self.frame = frame self.p1x = p1x self.p1y = p1y self.p1z = p1z self.p2x = p2x self.p2y = p2y self.p2z = p2z def pack(self, mav): return MAVLink_message.pack(self, mav, 3, struct.pack('<ffffffB', self.p1x, self.p1y, self.p1z, self.p2x, self.p2y, self.p2z, self.frame)) class MAVLink_set_roll_pitch_yaw_thrust_message(MAVLink_message): ''' Set roll, pitch and yaw. ''' def __init__(self, target_system, target_component, roll, pitch, yaw, thrust): MAVLink_message.__init__(self, MAVLINK_MSG_ID_SET_ROLL_PITCH_YAW_THRUST, 'SET_ROLL_PITCH_YAW_THRUST') self._fieldnames = ['target_system', 'target_component', 'roll', 'pitch', 'yaw', 'thrust'] self.target_system = target_system self.target_component = target_component self.roll = roll self.pitch = pitch self.yaw = yaw self.thrust = thrust def pack(self, mav): return MAVLink_message.pack(self, mav, 100, struct.pack('<ffffBB', self.roll, self.pitch, self.yaw, self.thrust, self.target_system, self.target_component)) class MAVLink_set_roll_pitch_yaw_speed_thrust_message(MAVLink_message): ''' Set roll, pitch and yaw. ''' def __init__(self, target_system, target_component, roll_speed, pitch_speed, yaw_speed, thrust): MAVLink_message.__init__(self, MAVLINK_MSG_ID_SET_ROLL_PITCH_YAW_SPEED_THRUST, 'SET_ROLL_PITCH_YAW_SPEED_THRUST') self._fieldnames = ['target_system', 'target_component', 'roll_speed', 'pitch_speed', 'yaw_speed', 'thrust'] self.target_system = target_system self.target_component = target_component self.roll_speed = roll_speed self.pitch_speed = pitch_speed self.yaw_speed = yaw_speed self.thrust = thrust def pack(self, mav): return MAVLink_message.pack(self, mav, 24, struct.pack('<ffffBB', self.roll_speed, self.pitch_speed, self.yaw_speed, self.thrust, self.target_system, self.target_component)) class MAVLink_roll_pitch_yaw_thrust_setpoint_message(MAVLink_message): ''' Setpoint in roll, pitch, yaw currently active on the system. ''' def __init__(self, time_boot_ms, roll, pitch, yaw, thrust): MAVLink_message.__init__(self, MAVLINK_MSG_ID_ROLL_PITCH_YAW_THRUST_SETPOINT, 'ROLL_PITCH_YAW_THRUST_SETPOINT') self._fieldnames = ['time_boot_ms', 'roll', 'pitch', 'yaw', 'thrust'] self.time_boot_ms = time_boot_ms self.roll = roll self.pitch = pitch self.yaw = yaw self.thrust = thrust def pack(self, mav): return MAVLink_message.pack(self, mav, 239, struct.pack('<Iffff', self.time_boot_ms, self.roll, self.pitch, self.yaw, self.thrust)) class MAVLink_roll_pitch_yaw_speed_thrust_setpoint_message(MAVLink_message): ''' Setpoint in rollspeed, pitchspeed, yawspeed currently active on the system. ''' def __init__(self, time_boot_ms, roll_speed, pitch_speed, yaw_speed, thrust): MAVLink_message.__init__(self, MAVLINK_MSG_ID_ROLL_PITCH_YAW_SPEED_THRUST_SETPOINT, 'ROLL_PITCH_YAW_SPEED_THRUST_SETPOINT') self._fieldnames = ['time_boot_ms', 'roll_speed', 'pitch_speed', 'yaw_speed', 'thrust'] self.time_boot_ms = time_boot_ms self.roll_speed = roll_speed self.pitch_speed = pitch_speed self.yaw_speed = yaw_speed self.thrust = thrust def pack(self, mav): return MAVLink_message.pack(self, mav, 238, struct.pack('<Iffff', self.time_boot_ms, self.roll_speed, self.pitch_speed, self.yaw_speed, self.thrust)) class MAVLink_set_quad_motors_setpoint_message(MAVLink_message): ''' Setpoint in the four motor speeds ''' def __init__(self, target_system, motor_front_nw, motor_right_ne, motor_back_se, motor_left_sw): MAVLink_message.__init__(self, MAVLINK_MSG_ID_SET_QUAD_MOTORS_SETPOINT, 'SET_QUAD_MOTORS_SETPOINT') self._fieldnames = ['target_system', 'motor_front_nw', 'motor_right_ne', 'motor_back_se', 'motor_left_sw'] self.target_system = target_system self.motor_front_nw = motor_front_nw self.motor_right_ne = motor_right_ne self.motor_back_se = motor_back_se self.motor_left_sw = motor_left_sw def pack(self, mav): return MAVLink_message.pack(self, mav, 30, struct.pack('<HHHHB', self.motor_front_nw, self.motor_right_ne, self.motor_back_se, self.motor_left_sw, self.target_system)) class MAVLink_set_quad_swarm_roll_pitch_yaw_thrust_message(MAVLink_message): ''' Setpoint for up to four quadrotors in a group / wing ''' def __init__(self, group, mode, roll, pitch, yaw, thrust): MAVLink_message.__init__(self, MAVLINK_MSG_ID_SET_QUAD_SWARM_ROLL_PITCH_YAW_THRUST, 'SET_QUAD_SWARM_ROLL_PITCH_YAW_THRUST') self._fieldnames = ['group', 'mode', 'roll', 'pitch', 'yaw', 'thrust'] self.group = group self.mode = mode self.roll = roll self.pitch = pitch self.yaw = yaw self.thrust = thrust def pack(self, mav): return MAVLink_message.pack(self, mav, 240, struct.pack('<4h4h4h4HBB', self.roll, self.pitch, self.yaw, self.thrust, self.group, self.mode)) class MAVLink_nav_controller_output_message(MAVLink_message): ''' Outputs of the APM navigation controller. The primary use of this message is to check the response and signs of the controller before actual flight and to assist with tuning controller parameters. ''' def __init__(self, nav_roll, nav_pitch, nav_bearing, target_bearing, wp_dist, alt_error, aspd_error, xtrack_error): MAVLink_message.__init__(self, MAVLINK_MSG_ID_NAV_CONTROLLER_OUTPUT, 'NAV_CONTROLLER_OUTPUT') self._fieldnames = ['nav_roll', 'nav_pitch', 'nav_bearing', 'target_bearing', 'wp_dist', 'alt_error', 'aspd_error', 'xtrack_error'] self.nav_roll = nav_roll self.nav_pitch = nav_pitch self.nav_bearing = nav_bearing self.target_bearing = target_bearing self.wp_dist = wp_dist self.alt_error = alt_error self.aspd_error = aspd_error self.xtrack_error = xtrack_error def pack(self, mav): return MAVLink_message.pack(self, mav, 183, struct.pack('<fffffhhH', self.nav_roll, self.nav_pitch, self.alt_error, self.aspd_error, self.xtrack_error, self.nav_bearing, self.target_bearing, self.wp_dist)) class MAVLink_set_quad_swarm_led_roll_pitch_yaw_thrust_message(MAVLink_message): ''' Setpoint for up to four quadrotors in a group / wing ''' def __init__(self, group, mode, led_red, led_blue, led_green, roll, pitch, yaw, thrust): MAVLink_message.__init__(self, MAVLINK_MSG_ID_SET_QUAD_SWARM_LED_ROLL_PITCH_YAW_THRUST, 'SET_QUAD_SWARM_LED_ROLL_PITCH_YAW_THRUST') self._fieldnames = ['group', 'mode', 'led_red', 'led_blue', 'led_green', 'roll', 'pitch', 'yaw', 'thrust'] self.group = group self.mode = mode self.led_red = led_red self.led_blue = led_blue self.led_green = led_green self.roll = roll self.pitch = pitch self.yaw = yaw self.thrust = thrust def pack(self, mav): return MAVLink_message.pack(self, mav, 130, struct.pack('<4h4h4h4HBB4s4s4s', self.roll, self.pitch, self.yaw, self.thrust, self.group, self.mode, self.led_red, self.led_blue, self.led_green)) class MAVLink_state_correction_message(MAVLink_message): ''' Corrects the systems state by adding an error correction term to the position and velocity, and by rotating the attitude by a correction angle. ''' def __init__(self, xErr, yErr, zErr, rollErr, pitchErr, yawErr, vxErr, vyErr, vzErr): MAVLink_message.__init__(self, MAVLINK_MSG_ID_STATE_CORRECTION, 'STATE_CORRECTION') self._fieldnames = ['xErr', 'yErr', 'zErr', 'rollErr', 'pitchErr', 'yawErr', 'vxErr', 'vyErr', 'vzErr'] self.xErr = xErr self.yErr = yErr self.zErr = zErr self.rollErr = rollErr self.pitchErr = pitchErr self.yawErr = yawErr self.vxErr = vxErr self.vyErr = vyErr self.vzErr = vzErr def pack(self, mav): return MAVLink_message.pack(self, mav, 130, struct.pack('<fffffffff', self.xErr, self.yErr, self.zErr, self.rollErr, self.pitchErr, self.yawErr, self.vxErr, self.vyErr, self.vzErr)) class MAVLink_request_data_stream_message(MAVLink_message): ''' ''' def __init__(self, target_system, target_component, req_stream_id, req_message_rate, start_stop): MAVLink_message.__init__(self, MAVLINK_MSG_ID_REQUEST_DATA_STREAM, 'REQUEST_DATA_STREAM') self._fieldnames = ['target_system', 'target_component', 'req_stream_id', 'req_message_rate', 'start_stop'] self.target_system = target_system self.target_component = target_component self.req_stream_id = req_stream_id self.req_message_rate = req_message_rate self.start_stop = start_stop def pack(self, mav): return MAVLink_message.pack(self, mav, 148, struct.pack('<HBBBB', self.req_message_rate, self.target_system, self.target_component, self.req_stream_id, self.start_stop)) class MAVLink_data_stream_message(MAVLink_message): ''' ''' def __init__(self, stream_id, message_rate, on_off): MAVLink_message.__init__(self, MAVLINK_MSG_ID_DATA_STREAM, 'DATA_STREAM') self._fieldnames = ['stream_id', 'message_rate', 'on_off'] self.stream_id = stream_id self.message_rate = message_rate self.on_off = on_off def pack(self, mav): return MAVLink_message.pack(self, mav, 21, struct.pack('<HBB', self.message_rate, self.stream_id, self.on_off)) class MAVLink_manual_control_message(MAVLink_message): ''' This message provides an API for manually controlling the vehicle using standard joystick axes nomenclature, along with a joystick-like input device. Unused axes can be disabled an buttons are also transmit as boolean values of their ''' def __init__(self, target, x, y, z, r, buttons): MAVLink_message.__init__(self, MAVLINK_MSG_ID_MANUAL_CONTROL, 'MANUAL_CONTROL') self._fieldnames = ['target', 'x', 'y', 'z', 'r', 'buttons'] self.target = target self.x = x self.y = y self.z = z self.r = r self.buttons = buttons def pack(self, mav): return MAVLink_message.pack(self, mav, 243, struct.pack('<hhhhHB', self.x, self.y, self.z, self.r, self.buttons, self.target)) class MAVLink_rc_channels_override_message(MAVLink_message): ''' The RAW values of the RC channels sent to the MAV to override info received from the RC radio. A value of -1 means no change to that channel. A value of 0 means control of that channel should be released back to the RC radio. The standard PPM modulation is as follows: 1000 microseconds: 0%, 2000 microseconds: 100%. Individual receivers/transmitters might violate this specification. ''' def __init__(self, target_system, target_component, chan1_raw, chan2_raw, chan3_raw, chan4_raw, chan5_raw, chan6_raw, chan7_raw, chan8_raw): MAVLink_message.__init__(self, MAVLINK_MSG_ID_RC_CHANNELS_OVERRIDE, 'RC_CHANNELS_OVERRIDE') self._fieldnames = ['target_system', 'target_component', 'chan1_raw', 'chan2_raw', 'chan3_raw', 'chan4_raw', 'chan5_raw', 'chan6_raw', 'chan7_raw', 'chan8_raw'] self.target_system = target_system self.target_component = target_component self.chan1_raw = chan1_raw self.chan2_raw = chan2_raw self.chan3_raw = chan3_raw self.chan4_raw = chan4_raw self.chan5_raw = chan5_raw self.chan6_raw = chan6_raw self.chan7_raw = chan7_raw self.chan8_raw = chan8_raw def pack(self, mav): return MAVLink_message.pack(self, mav, 124, struct.pack('<HHHHHHHHBB', self.chan1_raw, self.chan2_raw, self.chan3_raw, self.chan4_raw, self.chan5_raw, self.chan6_raw, self.chan7_raw, self.chan8_raw, self.target_system, self.target_component)) class MAVLink_vfr_hud_message(MAVLink_message): ''' Metrics typically displayed on a HUD for fixed wing aircraft ''' def __init__(self, airspeed, groundspeed, heading, throttle, alt, climb): MAVLink_message.__init__(self, MAVLINK_MSG_ID_VFR_HUD, 'VFR_HUD') self._fieldnames = ['airspeed', 'groundspeed', 'heading', 'throttle', 'alt', 'climb'] self.airspeed = airspeed self.groundspeed = groundspeed self.heading = heading self.throttle = throttle self.alt = alt self.climb = climb def pack(self, mav): return MAVLink_message.pack(self, mav, 20, struct.pack('<ffffhH', self.airspeed, self.groundspeed, self.alt, self.climb, self.heading, self.throttle)) class MAVLink_command_long_message(MAVLink_message): ''' Send a command with up to four parameters to the MAV ''' def __init__(self, target_system, target_component, command, confirmation, param1, param2, param3, param4, param5, param6, param7): MAVLink_message.__init__(self, MAVLINK_MSG_ID_COMMAND_LONG, 'COMMAND_LONG') self._fieldnames = ['target_system', 'target_component', 'command', 'confirmation', 'param1', 'param2', 'param3', 'param4', 'param5', 'param6', 'param7'] self.target_system = target_system self.target_component = target_component self.command = command self.confirmation = confirmation self.param1 = param1 self.param2 = param2 self.param3 = param3 self.param4 = param4 self.param5 = param5 self.param6 = param6 self.param7 = param7 def pack(self, mav): return MAVLink_message.pack(self, mav, 152, struct.pack('<fffffffHBBB', self.param1, self.param2, self.param3, self.param4, self.param5, self.param6, self.param7, self.command, self.target_system, self.target_component, self.confirmation)) class MAVLink_command_ack_message(MAVLink_message): ''' Report status of a command. Includes feedback wether the command was executed. ''' def __init__(self, command, result): MAVLink_message.__init__(self, MAVLINK_MSG_ID_COMMAND_ACK, 'COMMAND_ACK') self._fieldnames = ['command', 'result'] self.command = command self.result = result def pack(self, mav): return MAVLink_message.pack(self, mav, 143, struct.pack('<HB', self.command, self.result)) class MAVLink_roll_pitch_yaw_rates_thrust_setpoint_message(MAVLink_message): ''' Setpoint in roll, pitch, yaw rates and thrust currently active on the system. ''' def __init__(self, time_boot_ms, roll_rate, pitch_rate, yaw_rate, thrust): MAVLink_message.__init__(self, MAVLINK_MSG_ID_ROLL_PITCH_YAW_RATES_THRUST_SETPOINT, 'ROLL_PITCH_YAW_RATES_THRUST_SETPOINT') self._fieldnames = ['time_boot_ms', 'roll_rate', 'pitch_rate', 'yaw_rate', 'thrust'] self.time_boot_ms = time_boot_ms self.roll_rate = roll_rate self.pitch_rate = pitch_rate self.yaw_rate = yaw_rate self.thrust = thrust def pack(self, mav): return MAVLink_message.pack(self, mav, 127, struct.pack('<Iffff', self.time_boot_ms, self.roll_rate, self.pitch_rate, self.yaw_rate, self.thrust)) class MAVLink_manual_setpoint_message(MAVLink_message): ''' Setpoint in roll, pitch, yaw and thrust from the operator ''' def __init__(self, time_boot_ms, roll, pitch, yaw, thrust, mode_switch, manual_override_switch): MAVLink_message.__init__(self, MAVLINK_MSG_ID_MANUAL_SETPOINT, 'MANUAL_SETPOINT') self._fieldnames = ['time_boot_ms', 'roll', 'pitch', 'yaw', 'thrust', 'mode_switch', 'manual_override_switch'] self.time_boot_ms = time_boot_ms self.roll = roll self.pitch = pitch self.yaw = yaw self.thrust = thrust self.mode_switch = mode_switch self.manual_override_switch = manual_override_switch def pack(self, mav): return MAVLink_message.pack(self, mav, 106, struct.pack('<IffffBB', self.time_boot_ms, self.roll, self.pitch, self.yaw, self.thrust, self.mode_switch, self.manual_override_switch)) class MAVLink_local_position_ned_system_global_offset_message(MAVLink_message): ''' The offset in X, Y, Z and yaw between the LOCAL_POSITION_NED messages of MAV X and the global coordinate frame in NED coordinates. Coordinate frame is right-handed, Z-axis down (aeronautical frame, NED / north-east-down convention) ''' def __init__(self, time_boot_ms, x, y, z, roll, pitch, yaw): MAVLink_message.__init__(self, MAVLINK_MSG_ID_LOCAL_POSITION_NED_SYSTEM_GLOBAL_OFFSET, 'LOCAL_POSITION_NED_SYSTEM_GLOBAL_OFFSET') self._fieldnames = ['time_boot_ms', 'x', 'y', 'z', 'roll', 'pitch', 'yaw'] self.time_boot_ms = time_boot_ms self.x = x self.y = y self.z = z self.roll = roll self.pitch = pitch self.yaw = yaw def pack(self, mav): return MAVLink_message.pack(self, mav, 231, struct.pack('<Iffffff', self.time_boot_ms, self.x, self.y, self.z, self.roll, self.pitch, self.yaw)) class MAVLink_hil_state_message(MAVLink_message): ''' Sent from simulation to autopilot. This packet is useful for high throughput applications such as hardware in the loop simulations. ''' def __init__(self, time_usec, roll, pitch, yaw, rollspeed, pitchspeed, yawspeed, lat, lon, alt, vx, vy, vz, xacc, yacc, zacc): MAVLink_message.__init__(self, MAVLINK_MSG_ID_HIL_STATE, 'HIL_STATE') self._fieldnames = ['time_usec', 'roll', 'pitch', 'yaw', 'rollspeed', 'pitchspeed', 'yawspeed', 'lat', 'lon', 'alt', 'vx', 'vy', 'vz', 'xacc', 'yacc', 'zacc'] self.time_usec = time_usec self.roll = roll self.pitch = pitch self.yaw = yaw self.rollspeed = rollspeed self.pitchspeed = pitchspeed self.yawspeed = yawspeed self.lat = lat self.lon = lon self.alt = alt self.vx = vx self.vy = vy self.vz = vz self.xacc = xacc self.yacc = yacc self.zacc = zacc def pack(self, mav): return MAVLink_message.pack(self, mav, 183, struct.pack('<Qffffffiiihhhhhh', self.time_usec, self.roll, self.pitch, self.yaw, self.rollspeed, self.pitchspeed, self.yawspeed, self.lat, self.lon, self.alt, self.vx, self.vy, self.vz, self.xacc, self.yacc, self.zacc)) class MAVLink_hil_controls_message(MAVLink_message): ''' Sent from autopilot to simulation. Hardware in the loop control outputs ''' def __init__(self, time_usec, roll_ailerons, pitch_elevator, yaw_rudder, throttle, aux1, aux2, aux3, aux4, mode, nav_mode): MAVLink_message.__init__(self, MAVLINK_MSG_ID_HIL_CONTROLS, 'HIL_CONTROLS') self._fieldnames = ['time_usec', 'roll_ailerons', 'pitch_elevator', 'yaw_rudder', 'throttle', 'aux1', 'aux2', 'aux3', 'aux4', 'mode', 'nav_mode'] self.time_usec = time_usec self.roll_ailerons = roll_ailerons self.pitch_elevator = pitch_elevator self.yaw_rudder = yaw_rudder self.throttle = throttle self.aux1 = aux1 self.aux2 = aux2 self.aux3 = aux3 self.aux4 = aux4 self.mode = mode self.nav_mode = nav_mode def pack(self, mav): return MAVLink_message.pack(self, mav, 63, struct.pack('<QffffffffBB', self.time_usec, self.roll_ailerons, self.pitch_elevator, self.yaw_rudder, self.throttle, self.aux1, self.aux2, self.aux3, self.aux4, self.mode, self.nav_mode)) class MAVLink_hil_rc_inputs_raw_message(MAVLink_message): ''' Sent from simulation to autopilot. The RAW values of the RC channels received. The standard PPM modulation is as follows: 1000 microseconds: 0%, 2000 microseconds: 100%. Individual receivers/transmitters might violate this specification. ''' def __init__(self, time_usec, chan1_raw, chan2_raw, chan3_raw, chan4_raw, chan5_raw, chan6_raw, chan7_raw, chan8_raw, chan9_raw, chan10_raw, chan11_raw, chan12_raw, rssi): MAVLink_message.__init__(self, MAVLINK_MSG_ID_HIL_RC_INPUTS_RAW, 'HIL_RC_INPUTS_RAW') self._fieldnames = ['time_usec', 'chan1_raw', 'chan2_raw', 'chan3_raw', 'chan4_raw', 'chan5_raw', 'chan6_raw', 'chan7_raw', 'chan8_raw', 'chan9_raw', 'chan10_raw', 'chan11_raw', 'chan12_raw', 'rssi'] self.time_usec = time_usec self.chan1_raw = chan1_raw self.chan2_raw = chan2_raw self.chan3_raw = chan3_raw self.chan4_raw = chan4_raw self.chan5_raw = chan5_raw self.chan6_raw = chan6_raw self.chan7_raw = chan7_raw self.chan8_raw = chan8_raw self.chan9_raw = chan9_raw self.chan10_raw = chan10_raw self.chan11_raw = chan11_raw self.chan12_raw = chan12_raw self.rssi = rssi def pack(self, mav): return MAVLink_message.pack(self, mav, 54, struct.pack('<QHHHHHHHHHHHHB', self.time_usec, self.chan1_raw, self.chan2_raw, self.chan3_raw, self.chan4_raw, self.chan5_raw, self.chan6_raw, self.chan7_raw, self.chan8_raw, self.chan9_raw, self.chan10_raw, self.chan11_raw, self.chan12_raw, self.rssi)) class MAVLink_optical_flow_message(MAVLink_message): ''' Optical flow from a flow sensor (e.g. optical mouse sensor) ''' def __init__(self, time_usec, sensor_id, flow_x, flow_y, flow_comp_m_x, flow_comp_m_y, quality, ground_distance): MAVLink_message.__init__(self, MAVLINK_MSG_ID_OPTICAL_FLOW, 'OPTICAL_FLOW') self._fieldnames = ['time_usec', 'sensor_id', 'flow_x', 'flow_y', 'flow_comp_m_x', 'flow_comp_m_y', 'quality', 'ground_distance'] self.time_usec = time_usec self.sensor_id = sensor_id self.flow_x = flow_x self.flow_y = flow_y self.flow_comp_m_x = flow_comp_m_x self.flow_comp_m_y = flow_comp_m_y self.quality = quality self.ground_distance = ground_distance def pack(self, mav): return MAVLink_message.pack(self, mav, 175, struct.pack('<QfffhhBB', self.time_usec, self.flow_comp_m_x, self.flow_comp_m_y, self.ground_distance, self.flow_x, self.flow_y, self.sensor_id, self.quality)) class MAVLink_global_vision_position_estimate_message(MAVLink_message): ''' ''' def __init__(self, usec, x, y, z, roll, pitch, yaw): MAVLink_message.__init__(self, MAVLINK_MSG_ID_GLOBAL_VISION_POSITION_ESTIMATE, 'GLOBAL_VISION_POSITION_ESTIMATE') self._fieldnames = ['usec', 'x', 'y', 'z', 'roll', 'pitch', 'yaw'] self.usec = usec self.x = x self.y = y self.z = z self.roll = roll self.pitch = pitch self.yaw = yaw def pack(self, mav): return MAVLink_message.pack(self, mav, 102, struct.pack('<Qffffff', self.usec, self.x, self.y, self.z, self.roll, self.pitch, self.yaw)) class MAVLink_vision_position_estimate_message(MAVLink_message): ''' ''' def __init__(self, usec, x, y, z, roll, pitch, yaw): MAVLink_message.__init__(self, MAVLINK_MSG_ID_VISION_POSITION_ESTIMATE, 'VISION_POSITION_ESTIMATE') self._fieldnames = ['usec', 'x', 'y', 'z', 'roll', 'pitch', 'yaw'] self.usec = usec self.x = x self.y = y self.z = z self.roll = roll self.pitch = pitch self.yaw = yaw def pack(self, mav): return MAVLink_message.pack(self, mav, 158, struct.pack('<Qffffff', self.usec, self.x, self.y, self.z, self.roll, self.pitch, self.yaw)) class MAVLink_vision_speed_estimate_message(MAVLink_message): ''' ''' def __init__(self, usec, x, y, z): MAVLink_message.__init__(self, MAVLINK_MSG_ID_VISION_SPEED_ESTIMATE, 'VISION_SPEED_ESTIMATE') self._fieldnames = ['usec', 'x', 'y', 'z'] self.usec = usec self.x = x self.y = y self.z = z def pack(self, mav): return MAVLink_message.pack(self, mav, 208, struct.pack('<Qfff', self.usec, self.x, self.y, self.z)) class MAVLink_vicon_position_estimate_message(MAVLink_message): ''' ''' def __init__(self, usec, x, y, z, roll, pitch, yaw): MAVLink_message.__init__(self, MAVLINK_MSG_ID_VICON_POSITION_ESTIMATE, 'VICON_POSITION_ESTIMATE') self._fieldnames = ['usec', 'x', 'y', 'z', 'roll', 'pitch', 'yaw'] self.usec = usec self.x = x self.y = y self.z = z self.roll = roll self.pitch = pitch self.yaw = yaw def pack(self, mav): return MAVLink_message.pack(self, mav, 56, struct.pack('<Qffffff', self.usec, self.x, self.y, self.z, self.roll, self.pitch, self.yaw)) class MAVLink_highres_imu_message(MAVLink_message): ''' The IMU readings in SI units in NED body frame ''' def __init__(self, time_usec, xacc, yacc, zacc, xgyro, ygyro, zgyro, xmag, ymag, zmag, abs_pressure, diff_pressure, pressure_alt, temperature, fields_updated): MAVLink_message.__init__(self, MAVLINK_MSG_ID_HIGHRES_IMU, 'HIGHRES_IMU') self._fieldnames = ['time_usec', 'xacc', 'yacc', 'zacc', 'xgyro', 'ygyro', 'zgyro', 'xmag', 'ymag', 'zmag', 'abs_pressure', 'diff_pressure', 'pressure_alt', 'temperature', 'fields_updated'] self.time_usec = time_usec self.xacc = xacc self.yacc = yacc self.zacc = zacc self.xgyro = xgyro self.ygyro = ygyro self.zgyro = zgyro self.xmag = xmag self.ymag = ymag self.zmag = zmag self.abs_pressure = abs_pressure self.diff_pressure = diff_pressure self.pressure_alt = pressure_alt self.temperature = temperature self.fields_updated = fields_updated def pack(self, mav): return MAVLink_message.pack(self, mav, 93, struct.pack('<QfffffffffffffH', self.time_usec, self.xacc, self.yacc, self.zacc, self.xgyro, self.ygyro, self.zgyro, self.xmag, self.ymag, self.zmag, self.abs_pressure, self.diff_pressure, self.pressure_alt, self.temperature, self.fields_updated)) class MAVLink_file_transfer_start_message(MAVLink_message): ''' Begin file transfer ''' def __init__(self, transfer_uid, dest_path, direction, file_size, flags): MAVLink_message.__init__(self, MAVLINK_MSG_ID_FILE_TRANSFER_START, 'FILE_TRANSFER_START') self._fieldnames = ['transfer_uid', 'dest_path', 'direction', 'file_size', 'flags'] self.transfer_uid = transfer_uid self.dest_path = dest_path self.direction = direction self.file_size = file_size self.flags = flags def pack(self, mav): return MAVLink_message.pack(self, mav, 235, struct.pack('<QI240sBB', self.transfer_uid, self.file_size, self.dest_path, self.direction, self.flags)) class MAVLink_file_transfer_dir_list_message(MAVLink_message): ''' Get directory listing ''' def __init__(self, transfer_uid, dir_path, flags): MAVLink_message.__init__(self, MAVLINK_MSG_ID_FILE_TRANSFER_DIR_LIST, 'FILE_TRANSFER_DIR_LIST') self._fieldnames = ['transfer_uid', 'dir_path', 'flags'] self.transfer_uid = transfer_uid self.dir_path = dir_path self.flags = flags def pack(self, mav): return MAVLink_message.pack(self, mav, 93, struct.pack('<Q240sB', self.transfer_uid, self.dir_path, self.flags)) class MAVLink_file_transfer_res_message(MAVLink_message): ''' File transfer result ''' def __init__(self, transfer_uid, result): MAVLink_message.__init__(self, MAVLINK_MSG_ID_FILE_TRANSFER_RES, 'FILE_TRANSFER_RES') self._fieldnames = ['transfer_uid', 'result'] self.transfer_uid = transfer_uid self.result = result def pack(self, mav): return MAVLink_message.pack(self, mav, 124, struct.pack('<QB', self.transfer_uid, self.result)) class MAVLink_battery_status_message(MAVLink_message): ''' Transmitte battery informations for a accu pack. ''' def __init__(self, accu_id, voltage_cell_1, voltage_cell_2, voltage_cell_3, voltage_cell_4, voltage_cell_5, voltage_cell_6, current_battery, battery_remaining): MAVLink_message.__init__(self, MAVLINK_MSG_ID_BATTERY_STATUS, 'BATTERY_STATUS') self._fieldnames = ['accu_id', 'voltage_cell_1', 'voltage_cell_2', 'voltage_cell_3', 'voltage_cell_4', 'voltage_cell_5', 'voltage_cell_6', 'current_battery', 'battery_remaining'] self.accu_id = accu_id self.voltage_cell_1 = voltage_cell_1 self.voltage_cell_2 = voltage_cell_2 self.voltage_cell_3 = voltage_cell_3 self.voltage_cell_4 = voltage_cell_4 self.voltage_cell_5 = voltage_cell_5 self.voltage_cell_6 = voltage_cell_6 self.current_battery = current_battery self.battery_remaining = battery_remaining def pack(self, mav): return MAVLink_message.pack(self, mav, 42, struct.pack('<HHHHHHhBb', self.voltage_cell_1, self.voltage_cell_2, self.voltage_cell_3, self.voltage_cell_4, self.voltage_cell_5, self.voltage_cell_6, self.current_battery, self.accu_id, self.battery_remaining)) class MAVLink_setpoint_8dof_message(MAVLink_message): ''' Set the 8 DOF setpoint for a controller. ''' def __init__(self, target_system, val1, val2, val3, val4, val5, val6, val7, val8): MAVLink_message.__init__(self, MAVLINK_MSG_ID_SETPOINT_8DOF, 'SETPOINT_8DOF') self._fieldnames = ['target_system', 'val1', 'val2', 'val3', 'val4', 'val5', 'val6', 'val7', 'val8'] self.target_system = target_system self.val1 = val1 self.val2 = val2 self.val3 = val3 self.val4 = val4 self.val5 = val5 self.val6 = val6 self.val7 = val7 self.val8 = val8 def pack(self, mav): return MAVLink_message.pack(self, mav, 241, struct.pack('<ffffffffB', self.val1, self.val2, self.val3, self.val4, self.val5, self.val6, self.val7, self.val8, self.target_system)) class MAVLink_setpoint_6dof_message(MAVLink_message): ''' Set the 6 DOF setpoint for a attitude and position controller. ''' def __init__(self, target_system, trans_x, trans_y, trans_z, rot_x, rot_y, rot_z): MAVLink_message.__init__(self, MAVLINK_MSG_ID_SETPOINT_6DOF, 'SETPOINT_6DOF') self._fieldnames = ['target_system', 'trans_x', 'trans_y', 'trans_z', 'rot_x', 'rot_y', 'rot_z'] self.target_system = target_system self.trans_x = trans_x self.trans_y = trans_y self.trans_z = trans_z self.rot_x = rot_x self.rot_y = rot_y self.rot_z = rot_z def pack(self, mav): return MAVLink_message.pack(self, mav, 15, struct.pack('<ffffffB', self.trans_x, self.trans_y, self.trans_z, self.rot_x, self.rot_y, self.rot_z, self.target_system)) class MAVLink_memory_vect_message(MAVLink_message): ''' Send raw controller memory. The use of this message is discouraged for normal packets, but a quite efficient way for testing new messages and getting experimental debug output. ''' def __init__(self, address, ver, type, value): MAVLink_message.__init__(self, MAVLINK_MSG_ID_MEMORY_VECT, 'MEMORY_VECT') self._fieldnames = ['address', 'ver', 'type', 'value'] self.address = address self.ver = ver self.type = type self.value = value def pack(self, mav): return MAVLink_message.pack(self, mav, 204, struct.pack('<HBB32s', self.address, self.ver, self.type, self.value)) class MAVLink_debug_vect_message(MAVLink_message): ''' ''' def __init__(self, name, time_usec, x, y, z): MAVLink_message.__init__(self, MAVLINK_MSG_ID_DEBUG_VECT, 'DEBUG_VECT') self._fieldnames = ['name', 'time_usec', 'x', 'y', 'z'] self.name = name self.time_usec = time_usec self.x = x self.y = y self.z = z def pack(self, mav): return MAVLink_message.pack(self, mav, 49, struct.pack('<Qfff10s', self.time_usec, self.x, self.y, self.z, self.name)) class MAVLink_named_value_float_message(MAVLink_message): ''' Send a key-value pair as float. The use of this message is discouraged for normal packets, but a quite efficient way for testing new messages and getting experimental debug output. ''' def __init__(self, time_boot_ms, name, value): MAVLink_message.__init__(self, MAVLINK_MSG_ID_NAMED_VALUE_FLOAT, 'NAMED_VALUE_FLOAT') self._fieldnames = ['time_boot_ms', 'name', 'value'] self.time_boot_ms = time_boot_ms self.name = name self.value = value def pack(self, mav): return MAVLink_message.pack(self, mav, 170, struct.pack('<If10s', self.time_boot_ms, self.value, self.name)) class MAVLink_named_value_int_message(MAVLink_message): ''' Send a key-value pair as integer. The use of this message is discouraged for normal packets, but a quite efficient way for testing new messages and getting experimental debug output. ''' def __init__(self, time_boot_ms, name, value): MAVLink_message.__init__(self, MAVLINK_MSG_ID_NAMED_VALUE_INT, 'NAMED_VALUE_INT') self._fieldnames = ['time_boot_ms', 'name', 'value'] self.time_boot_ms = time_boot_ms self.name = name self.value = value def pack(self, mav): return MAVLink_message.pack(self, mav, 44, struct.pack('<Ii10s', self.time_boot_ms, self.value, self.name)) class MAVLink_statustext_message(MAVLink_message): ''' Status text message. These messages are printed in yellow in the COMM console of QGroundControl. WARNING: They consume quite some bandwidth, so use only for important status and error messages. If implemented wisely, these messages are buffered on the MCU and sent only at a limited rate (e.g. 10 Hz). ''' def __init__(self, severity, text): MAVLink_message.__init__(self, MAVLINK_MSG_ID_STATUSTEXT, 'STATUSTEXT') self._fieldnames = ['severity', 'text'] self.severity = severity self.text = text def pack(self, mav): return MAVLink_message.pack(self, mav, 83, struct.pack('<B50s', self.severity, self.text)) class MAVLink_debug_message(MAVLink_message): ''' Send a debug value. The index is used to discriminate between values. These values show up in the plot of QGroundControl as DEBUG N. ''' def __init__(self, time_boot_ms, ind, value): MAVLink_message.__init__(self, MAVLINK_MSG_ID_DEBUG, 'DEBUG') self._fieldnames = ['time_boot_ms', 'ind', 'value'] self.time_boot_ms = time_boot_ms self.ind = ind self.value = value def pack(self, mav): return MAVLink_message.pack(self, mav, 46, struct.pack('<IfB', self.time_boot_ms, self.value, self.ind)) mavlink_map = { MAVLINK_MSG_ID_HEARTBEAT : ( '<IBBBBB', MAVLink_heartbeat_message, [1, 2, 3, 0, 4, 5], 50 ), MAVLINK_MSG_ID_SYS_STATUS : ( '<IIIHHhHHHHHHb', MAVLink_sys_status_message, [0, 1, 2, 3, 4, 5, 12, 6, 7, 8, 9, 10, 11], 124 ), MAVLINK_MSG_ID_SYSTEM_TIME : ( '<QI', MAVLink_system_time_message, [0, 1], 137 ), MAVLINK_MSG_ID_PING : ( '<QIBB', MAVLink_ping_message, [0, 1, 2, 3], 237 ), MAVLINK_MSG_ID_CHANGE_OPERATOR_CONTROL : ( '<BBB25s', MAVLink_change_operator_control_message, [0, 1, 2, 3], 217 ), MAVLINK_MSG_ID_CHANGE_OPERATOR_CONTROL_ACK : ( '<BBB', MAVLink_change_operator_control_ack_message, [0, 1, 2], 104 ), MAVLINK_MSG_ID_AUTH_KEY : ( '<32s', MAVLink_auth_key_message, [0], 119 ), MAVLINK_MSG_ID_SET_MODE : ( '<IBB', MAVLink_set_mode_message, [1, 2, 0], 89 ), MAVLINK_MSG_ID_PARAM_REQUEST_READ : ( '<hBB16s', MAVLink_param_request_read_message, [1, 2, 3, 0], 214 ), MAVLINK_MSG_ID_PARAM_REQUEST_LIST : ( '<BB', MAVLink_param_request_list_message, [0, 1], 159 ), MAVLINK_MSG_ID_PARAM_VALUE : ( '<fHH16sB', MAVLink_param_value_message, [3, 0, 4, 1, 2], 220 ), MAVLINK_MSG_ID_PARAM_SET : ( '<fBB16sB', MAVLink_param_set_message, [1, 2, 3, 0, 4], 168 ), MAVLINK_MSG_ID_GPS_RAW_INT : ( '<QiiiHHHHBB', MAVLink_gps_raw_int_message, [0, 8, 1, 2, 3, 4, 5, 6, 7, 9], 24 ), MAVLINK_MSG_ID_GPS_STATUS : ( '<B20s20s20s20s20s', MAVLink_gps_status_message, [0, 1, 2, 3, 4, 5], 23 ), MAVLINK_MSG_ID_SCALED_IMU : ( '<Ihhhhhhhhh', MAVLink_scaled_imu_message, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9], 170 ), MAVLINK_MSG_ID_RAW_IMU : ( '<Qhhhhhhhhh', MAVLink_raw_imu_message, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9], 144 ), MAVLINK_MSG_ID_RAW_PRESSURE : ( '<Qhhhh', MAVLink_raw_pressure_message, [0, 1, 2, 3, 4], 67 ), MAVLINK_MSG_ID_SCALED_PRESSURE : ( '<Iffh', MAVLink_scaled_pressure_message, [0, 1, 2, 3], 115 ), MAVLINK_MSG_ID_ATTITUDE : ( '<Iffffff', MAVLink_attitude_message, [0, 1, 2, 3, 4, 5, 6], 39 ), MAVLINK_MSG_ID_ATTITUDE_QUATERNION : ( '<Ifffffff', MAVLink_attitude_quaternion_message, [0, 1, 2, 3, 4, 5, 6, 7], 246 ), MAVLINK_MSG_ID_LOCAL_POSITION_NED : ( '<Iffffff', MAVLink_local_position_ned_message, [0, 1, 2, 3, 4, 5, 6], 185 ), MAVLINK_MSG_ID_GLOBAL_POSITION_INT : ( '<IiiiihhhH', MAVLink_global_position_int_message, [0, 1, 2, 3, 4, 5, 6, 7, 8], 104 ), MAVLINK_MSG_ID_RC_CHANNELS_SCALED : ( '<IhhhhhhhhBB', MAVLink_rc_channels_scaled_message, [0, 9, 1, 2, 3, 4, 5, 6, 7, 8, 10], 237 ), MAVLINK_MSG_ID_RC_CHANNELS_RAW : ( '<IHHHHHHHHBB', MAVLink_rc_channels_raw_message, [0, 9, 1, 2, 3, 4, 5, 6, 7, 8, 10], 244 ), MAVLINK_MSG_ID_SERVO_OUTPUT_RAW : ( '<IHHHHHHHHB', MAVLink_servo_output_raw_message, [0, 9, 1, 2, 3, 4, 5, 6, 7, 8], 242 ), MAVLINK_MSG_ID_MISSION_REQUEST_PARTIAL_LIST : ( '<hhBB', MAVLink_mission_request_partial_list_message, [2, 3, 0, 1], 212 ), MAVLINK_MSG_ID_MISSION_WRITE_PARTIAL_LIST : ( '<hhBB', MAVLink_mission_write_partial_list_message, [2, 3, 0, 1], 9 ), MAVLINK_MSG_ID_MISSION_ITEM : ( '<fffffffHHBBBBB', MAVLink_mission_item_message, [9, 10, 7, 11, 8, 12, 13, 0, 1, 2, 3, 4, 5, 6], 254 ), MAVLINK_MSG_ID_MISSION_REQUEST : ( '<HBB', MAVLink_mission_request_message, [1, 2, 0], 230 ), MAVLINK_MSG_ID_MISSION_SET_CURRENT : ( '<HBB', MAVLink_mission_set_current_message, [1, 2, 0], 28 ), MAVLINK_MSG_ID_MISSION_CURRENT : ( '<H', MAVLink_mission_current_message, [0], 28 ), MAVLINK_MSG_ID_MISSION_REQUEST_LIST : ( '<BB', MAVLink_mission_request_list_message, [0, 1], 132 ), MAVLINK_MSG_ID_MISSION_COUNT : ( '<HBB', MAVLink_mission_count_message, [1, 2, 0], 221 ), MAVLINK_MSG_ID_MISSION_CLEAR_ALL : ( '<BB', MAVLink_mission_clear_all_message, [0, 1], 232 ), MAVLINK_MSG_ID_MISSION_ITEM_REACHED : ( '<H', MAVLink_mission_item_reached_message, [0], 11 ), MAVLINK_MSG_ID_MISSION_ACK : ( '<BBB', MAVLink_mission_ack_message, [0, 1, 2], 153 ), MAVLINK_MSG_ID_SET_GPS_GLOBAL_ORIGIN : ( '<iiiB', MAVLink_set_gps_global_origin_message, [3, 0, 1, 2], 41 ), MAVLINK_MSG_ID_GPS_GLOBAL_ORIGIN : ( '<iii', MAVLink_gps_global_origin_message, [0, 1, 2], 39 ), MAVLINK_MSG_ID_SET_LOCAL_POSITION_SETPOINT : ( '<ffffBBB', MAVLink_set_local_position_setpoint_message, [4, 5, 6, 0, 1, 2, 3], 214 ), MAVLINK_MSG_ID_LOCAL_POSITION_SETPOINT : ( '<ffffB', MAVLink_local_position_setpoint_message, [4, 0, 1, 2, 3], 223 ), MAVLINK_MSG_ID_GLOBAL_POSITION_SETPOINT_INT : ( '<iiihB', MAVLink_global_position_setpoint_int_message, [4, 0, 1, 2, 3], 141 ), MAVLINK_MSG_ID_SET_GLOBAL_POSITION_SETPOINT_INT : ( '<iiihB', MAVLink_set_global_position_setpoint_int_message, [4, 0, 1, 2, 3], 33 ), MAVLINK_MSG_ID_SAFETY_SET_ALLOWED_AREA : ( '<ffffffBBB', MAVLink_safety_set_allowed_area_message, [6, 7, 8, 0, 1, 2, 3, 4, 5], 15 ), MAVLINK_MSG_ID_SAFETY_ALLOWED_AREA : ( '<ffffffB', MAVLink_safety_allowed_area_message, [6, 0, 1, 2, 3, 4, 5], 3 ), MAVLINK_MSG_ID_SET_ROLL_PITCH_YAW_THRUST : ( '<ffffBB', MAVLink_set_roll_pitch_yaw_thrust_message, [4, 5, 0, 1, 2, 3], 100 ), MAVLINK_MSG_ID_SET_ROLL_PITCH_YAW_SPEED_THRUST : ( '<ffffBB', MAVLink_set_roll_pitch_yaw_speed_thrust_message, [4, 5, 0, 1, 2, 3], 24 ), MAVLINK_MSG_ID_ROLL_PITCH_YAW_THRUST_SETPOINT : ( '<Iffff', MAVLink_roll_pitch_yaw_thrust_setpoint_message, [0, 1, 2, 3, 4], 239 ), MAVLINK_MSG_ID_ROLL_PITCH_YAW_SPEED_THRUST_SETPOINT : ( '<Iffff', MAVLink_roll_pitch_yaw_speed_thrust_setpoint_message, [0, 1, 2, 3, 4], 238 ), MAVLINK_MSG_ID_SET_QUAD_MOTORS_SETPOINT : ( '<HHHHB', MAVLink_set_quad_motors_setpoint_message, [4, 0, 1, 2, 3], 30 ), MAVLINK_MSG_ID_SET_QUAD_SWARM_ROLL_PITCH_YAW_THRUST : ( '<4h4h4h4HBB', MAVLink_set_quad_swarm_roll_pitch_yaw_thrust_message, [4, 5, 0, 1, 2, 3], 240 ), MAVLINK_MSG_ID_NAV_CONTROLLER_OUTPUT : ( '<fffffhhH', MAVLink_nav_controller_output_message, [0, 1, 5, 6, 7, 2, 3, 4], 183 ), MAVLINK_MSG_ID_SET_QUAD_SWARM_LED_ROLL_PITCH_YAW_THRUST : ( '<4h4h4h4HBB4s4s4s', MAVLink_set_quad_swarm_led_roll_pitch_yaw_thrust_message, [4, 5, 6, 7, 8, 0, 1, 2, 3], 130 ), MAVLINK_MSG_ID_STATE_CORRECTION : ( '<fffffffff', MAVLink_state_correction_message, [0, 1, 2, 3, 4, 5, 6, 7, 8], 130 ), MAVLINK_MSG_ID_REQUEST_DATA_STREAM : ( '<HBBBB', MAVLink_request_data_stream_message, [1, 2, 3, 0, 4], 148 ), MAVLINK_MSG_ID_DATA_STREAM : ( '<HBB', MAVLink_data_stream_message, [1, 0, 2], 21 ), MAVLINK_MSG_ID_MANUAL_CONTROL : ( '<hhhhHB', MAVLink_manual_control_message, [5, 0, 1, 2, 3, 4], 243 ), MAVLINK_MSG_ID_RC_CHANNELS_OVERRIDE : ( '<HHHHHHHHBB', MAVLink_rc_channels_override_message, [8, 9, 0, 1, 2, 3, 4, 5, 6, 7], 124 ), MAVLINK_MSG_ID_VFR_HUD : ( '<ffffhH', MAVLink_vfr_hud_message, [0, 1, 4, 5, 2, 3], 20 ), MAVLINK_MSG_ID_COMMAND_LONG : ( '<fffffffHBBB', MAVLink_command_long_message, [8, 9, 7, 10, 0, 1, 2, 3, 4, 5, 6], 152 ), MAVLINK_MSG_ID_COMMAND_ACK : ( '<HB', MAVLink_command_ack_message, [0, 1], 143 ), MAVLINK_MSG_ID_ROLL_PITCH_YAW_RATES_THRUST_SETPOINT : ( '<Iffff', MAVLink_roll_pitch_yaw_rates_thrust_setpoint_message, [0, 1, 2, 3, 4], 127 ), MAVLINK_MSG_ID_MANUAL_SETPOINT : ( '<IffffBB', MAVLink_manual_setpoint_message, [0, 1, 2, 3, 4, 5, 6], 106 ), MAVLINK_MSG_ID_LOCAL_POSITION_NED_SYSTEM_GLOBAL_OFFSET : ( '<Iffffff', MAVLink_local_position_ned_system_global_offset_message, [0, 1, 2, 3, 4, 5, 6], 231 ), MAVLINK_MSG_ID_HIL_STATE : ( '<Qffffffiiihhhhhh', MAVLink_hil_state_message, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15], 183 ), MAVLINK_MSG_ID_HIL_CONTROLS : ( '<QffffffffBB', MAVLink_hil_controls_message, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10], 63 ), MAVLINK_MSG_ID_HIL_RC_INPUTS_RAW : ( '<QHHHHHHHHHHHHB', MAVLink_hil_rc_inputs_raw_message, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13], 54 ), MAVLINK_MSG_ID_OPTICAL_FLOW : ( '<QfffhhBB', MAVLink_optical_flow_message, [0, 6, 4, 5, 1, 2, 7, 3], 175 ), MAVLINK_MSG_ID_GLOBAL_VISION_POSITION_ESTIMATE : ( '<Qffffff', MAVLink_global_vision_position_estimate_message, [0, 1, 2, 3, 4, 5, 6], 102 ), MAVLINK_MSG_ID_VISION_POSITION_ESTIMATE : ( '<Qffffff', MAVLink_vision_position_estimate_message, [0, 1, 2, 3, 4, 5, 6], 158 ), MAVLINK_MSG_ID_VISION_SPEED_ESTIMATE : ( '<Qfff', MAVLink_vision_speed_estimate_message, [0, 1, 2, 3], 208 ), MAVLINK_MSG_ID_VICON_POSITION_ESTIMATE : ( '<Qffffff', MAVLink_vicon_position_estimate_message, [0, 1, 2, 3, 4, 5, 6], 56 ), MAVLINK_MSG_ID_HIGHRES_IMU : ( '<QfffffffffffffH', MAVLink_highres_imu_message, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14], 93 ), MAVLINK_MSG_ID_FILE_TRANSFER_START : ( '<QI240sBB', MAVLink_file_transfer_start_message, [0, 2, 3, 1, 4], 235 ), MAVLINK_MSG_ID_FILE_TRANSFER_DIR_LIST : ( '<Q240sB', MAVLink_file_transfer_dir_list_message, [0, 1, 2], 93 ), MAVLINK_MSG_ID_FILE_TRANSFER_RES : ( '<QB', MAVLink_file_transfer_res_message, [0, 1], 124 ), MAVLINK_MSG_ID_BATTERY_STATUS : ( '<HHHHHHhBb', MAVLink_battery_status_message, [7, 0, 1, 2, 3, 4, 5, 6, 8], 42 ), MAVLINK_MSG_ID_SETPOINT_8DOF : ( '<ffffffffB', MAVLink_setpoint_8dof_message, [8, 0, 1, 2, 3, 4, 5, 6, 7], 241 ), MAVLINK_MSG_ID_SETPOINT_6DOF : ( '<ffffffB', MAVLink_setpoint_6dof_message, [6, 0, 1, 2, 3, 4, 5], 15 ), MAVLINK_MSG_ID_MEMORY_VECT : ( '<HBB32s', MAVLink_memory_vect_message, [0, 1, 2, 3], 204 ), MAVLINK_MSG_ID_DEBUG_VECT : ( '<Qfff10s', MAVLink_debug_vect_message, [4, 0, 1, 2, 3], 49 ), MAVLINK_MSG_ID_NAMED_VALUE_FLOAT : ( '<If10s', MAVLink_named_value_float_message, [0, 2, 1], 170 ), MAVLINK_MSG_ID_NAMED_VALUE_INT : ( '<Ii10s', MAVLink_named_value_int_message, [0, 2, 1], 44 ), MAVLINK_MSG_ID_STATUSTEXT : ( '<B50s', MAVLink_statustext_message, [0, 1], 83 ), MAVLINK_MSG_ID_DEBUG : ( '<IfB', MAVLink_debug_message, [0, 2, 1], 46 ), } class MAVError(Exception): '''MAVLink error class''' def __init__(self, msg): Exception.__init__(self, msg) self.message = msg class MAVString(str): '''NUL terminated string''' def __init__(self, s): str.__init__(self) def __str__(self): i = self.find(chr(0)) if i == -1: return self[:] return self[0:i] class MAVLink_bad_data(MAVLink_message): ''' a piece of bad data in a mavlink stream ''' def __init__(self, data, reason): MAVLink_message.__init__(self, MAVLINK_MSG_ID_BAD_DATA, 'BAD_DATA') self._fieldnames = ['data', 'reason'] self.data = data self.reason = reason self._msgbuf = data class MAVLink(object): '''MAVLink protocol handling class''' def __init__(self, file, srcSystem=0, srcComponent=0): self.seq = 0 self.file = file self.srcSystem = srcSystem self.srcComponent = srcComponent self.callback = None self.callback_args = None self.callback_kwargs = None self.buf = array.array('B') self.expected_length = 6 self.have_prefix_error = False self.robust_parsing = False self.protocol_marker = 254 self.little_endian = True self.crc_extra = True self.sort_fields = True self.total_packets_sent = 0 self.total_bytes_sent = 0 self.total_packets_received = 0 self.total_bytes_received = 0 self.total_receive_errors = 0 self.startup_time = time.time() def set_callback(self, callback, *args, **kwargs): self.callback = callback self.callback_args = args self.callback_kwargs = kwargs def send(self, mavmsg): '''send a MAVLink message''' buf = mavmsg.pack(self) self.file.write(buf) self.seq = (self.seq + 1) % 255 self.total_packets_sent += 1 self.total_bytes_sent += len(buf) def bytes_needed(self): '''return number of bytes needed for next parsing stage''' ret = self.expected_length - len(self.buf) if ret <= 0: return 1 return ret def parse_char(self, c): '''input some data bytes, possibly returning a new message''' if isinstance(c, str): self.buf.fromstring(c) else: self.buf.extend(c) self.total_bytes_received += len(c) if len(self.buf) >= 1 and self.buf[0] != 254: magic = self.buf[0] self.buf = self.buf[1:] if self.robust_parsing: m = MAVLink_bad_data(chr(magic), "Bad prefix") if self.callback: self.callback(m, *self.callback_args, **self.callback_kwargs) self.expected_length = 6 self.total_receive_errors += 1 return m if self.have_prefix_error: return None self.have_prefix_error = True self.total_receive_errors += 1 raise MAVError("invalid MAVLink prefix '%s'" % magic) self.have_prefix_error = False if len(self.buf) >= 2: (magic, self.expected_length) = struct.unpack('BB', self.buf[0:2]) self.expected_length += 8 if self.expected_length >= 8 and len(self.buf) >= self.expected_length: mbuf = self.buf[0:self.expected_length] self.buf = self.buf[self.expected_length:] self.expected_length = 6 if self.robust_parsing: try: m = self.decode(mbuf) self.total_packets_received += 1 except MAVError as reason: m = MAVLink_bad_data(mbuf, reason.message) self.total_receive_errors += 1 else: m = self.decode(mbuf) self.total_packets_received += 1 if self.callback: self.callback(m, *self.callback_args, **self.callback_kwargs) return m return None def parse_buffer(self, s): '''input some data bytes, possibly returning a list of new messages''' m = self.parse_char(s) if m is None: return None ret = [m] while True: m = self.parse_char("") if m is None: return ret ret.append(m) return ret def decode(self, msgbuf): '''decode a buffer as a MAVLink message''' # decode the header try: magic, mlen, seq, srcSystem, srcComponent, msgId = struct.unpack('cBBBBB', msgbuf[:6]) except struct.error as emsg: raise MAVError('Unable to unpack MAVLink header: %s' % emsg) if ord(magic) != 254: raise MAVError("invalid MAVLink prefix '%s'" % magic) if mlen != len(msgbuf)-8: raise MAVError('invalid MAVLink message length. Got %u expected %u, msgId=%u' % (len(msgbuf)-8, mlen, msgId)) if not msgId in mavlink_map: raise MAVError('unknown MAVLink message ID %u' % msgId) # decode the payload (fmt, type, order_map, crc_extra) = mavlink_map[msgId] # decode the checksum try: crc, = struct.unpack('<H', msgbuf[-2:]) except struct.error as emsg: raise MAVError('Unable to unpack MAVLink CRC: %s' % emsg) crc2 = mavutil.x25crc(msgbuf[1:-2]) if True: # using CRC extra crc2.accumulate(chr(crc_extra)) if crc != crc2.crc: raise MAVError('invalid MAVLink CRC in msgID %u 0x%04x should be 0x%04x' % (msgId, crc, crc2.crc)) try: t = struct.unpack(fmt, msgbuf[6:-2]) except struct.error as emsg: raise MAVError('Unable to unpack MAVLink payload type=%s fmt=%s payloadLength=%u: %s' % ( type, fmt, len(msgbuf[6:-2]), emsg)) tlist = list(t) # handle sorted fields if True: t = tlist[:] for i in range(0, len(tlist)): tlist[i] = t[order_map[i]] # terminate any strings for i in range(0, len(tlist)): if isinstance(tlist[i], str): tlist[i] = MAVString(tlist[i]) t = tuple(tlist) # construct the message object try: m = type(*t) except Exception as emsg: raise MAVError('Unable to instantiate MAVLink message of type %s : %s' % (type, emsg)) m._msgbuf = msgbuf m._payload = msgbuf[6:-2] m._crc = crc m._header = MAVLink_header(msgId, mlen, seq, srcSystem, srcComponent) return m def heartbeat_encode(self, type, autopilot, base_mode, custom_mode, system_status, mavlink_version=3): ''' The heartbeat message shows that a system is present and responding. The type of the MAV and Autopilot hardware allow the receiving system to treat further messages from this system appropriate (e.g. by laying out the user interface based on the autopilot). type : Type of the MAV (quadrotor, helicopter, etc., up to 15 types, defined in MAV_TYPE ENUM) (uint8_t) autopilot : Autopilot type / class. defined in MAV_AUTOPILOT ENUM (uint8_t) base_mode : System mode bitfield, see MAV_MODE_FLAGS ENUM in mavlink/include/mavlink_types.h (uint8_t) custom_mode : A bitfield for use for autopilot-specific flags. (uint32_t) system_status : System status flag, see MAV_STATE ENUM (uint8_t) mavlink_version : MAVLink version, not writable by user, gets added by protocol because of magic data type: uint8_t_mavlink_version (uint8_t) ''' msg = MAVLink_heartbeat_message(type, autopilot, base_mode, custom_mode, system_status, mavlink_version) msg.pack(self) return msg def heartbeat_send(self, type, autopilot, base_mode, custom_mode, system_status, mavlink_version=3): ''' The heartbeat message shows that a system is present and responding. The type of the MAV and Autopilot hardware allow the receiving system to treat further messages from this system appropriate (e.g. by laying out the user interface based on the autopilot). type : Type of the MAV (quadrotor, helicopter, etc., up to 15 types, defined in MAV_TYPE ENUM) (uint8_t) autopilot : Autopilot type / class. defined in MAV_AUTOPILOT ENUM (uint8_t) base_mode : System mode bitfield, see MAV_MODE_FLAGS ENUM in mavlink/include/mavlink_types.h (uint8_t) custom_mode : A bitfield for use for autopilot-specific flags. (uint32_t) system_status : System status flag, see MAV_STATE ENUM (uint8_t) mavlink_version : MAVLink version, not writable by user, gets added by protocol because of magic data type: uint8_t_mavlink_version (uint8_t) ''' return self.send(self.heartbeat_encode(type, autopilot, base_mode, custom_mode, system_status, mavlink_version)) def sys_status_encode(self, onboard_control_sensors_present, onboard_control_sensors_enabled, onboard_control_sensors_health, load, voltage_battery, current_battery, battery_remaining, drop_rate_comm, errors_comm, errors_count1, errors_count2, errors_count3, errors_count4): ''' The general system state. If the system is following the MAVLink standard, the system state is mainly defined by three orthogonal states/modes: The system mode, which is either LOCKED (motors shut down and locked), MANUAL (system under RC control), GUIDED (system with autonomous position control, position setpoint controlled manually) or AUTO (system guided by path/waypoint planner). The NAV_MODE defined the current flight state: LIFTOFF (often an open-loop maneuver), LANDING, WAYPOINTS or VECTOR. This represents the internal navigation state machine. The system status shows wether the system is currently active or not and if an emergency occured. During the CRITICAL and EMERGENCY states the MAV is still considered to be active, but should start emergency procedures autonomously. After a failure occured it should first move from active to critical to allow manual intervention and then move to emergency after a certain timeout. onboard_control_sensors_present : Bitmask showing which onboard controllers and sensors are present. Value of 0: not present. Value of 1: present. Indices: 0: 3D gyro, 1: 3D acc, 2: 3D mag, 3: absolute pressure, 4: differential pressure, 5: GPS, 6: optical flow, 7: computer vision position, 8: laser based position, 9: external ground-truth (Vicon or Leica). Controllers: 10: 3D angular rate control 11: attitude stabilization, 12: yaw position, 13: z/altitude control, 14: x/y position control, 15: motor outputs / control (uint32_t) onboard_control_sensors_enabled : Bitmask showing which onboard controllers and sensors are enabled: Value of 0: not enabled. Value of 1: enabled. Indices: 0: 3D gyro, 1: 3D acc, 2: 3D mag, 3: absolute pressure, 4: differential pressure, 5: GPS, 6: optical flow, 7: computer vision position, 8: laser based position, 9: external ground-truth (Vicon or Leica). Controllers: 10: 3D angular rate control 11: attitude stabilization, 12: yaw position, 13: z/altitude control, 14: x/y position control, 15: motor outputs / control (uint32_t) onboard_control_sensors_health : Bitmask showing which onboard controllers and sensors are operational or have an error: Value of 0: not enabled. Value of 1: enabled. Indices: 0: 3D gyro, 1: 3D acc, 2: 3D mag, 3: absolute pressure, 4: differential pressure, 5: GPS, 6: optical flow, 7: computer vision position, 8: laser based position, 9: external ground-truth (Vicon or Leica). Controllers: 10: 3D angular rate control 11: attitude stabilization, 12: yaw position, 13: z/altitude control, 14: x/y position control, 15: motor outputs / control (uint32_t) load : Maximum usage in percent of the mainloop time, (0%: 0, 100%: 1000) should be always below 1000 (uint16_t) voltage_battery : Battery voltage, in millivolts (1 = 1 millivolt) (uint16_t) current_battery : Battery current, in 10*milliamperes (1 = 10 milliampere), -1: autopilot does not measure the current (int16_t) battery_remaining : Remaining battery energy: (0%: 0, 100%: 100), -1: autopilot estimate the remaining battery (int8_t) drop_rate_comm : Communication drops in percent, (0%: 0, 100%: 10'000), (UART, I2C, SPI, CAN), dropped packets on all links (packets that were corrupted on reception on the MAV) (uint16_t) errors_comm : Communication errors (UART, I2C, SPI, CAN), dropped packets on all links (packets that were corrupted on reception on the MAV) (uint16_t) errors_count1 : Autopilot-specific errors (uint16_t) errors_count2 : Autopilot-specific errors (uint16_t) errors_count3 : Autopilot-specific errors (uint16_t) errors_count4 : Autopilot-specific errors (uint16_t) ''' msg = MAVLink_sys_status_message(onboard_control_sensors_present, onboard_control_sensors_enabled, onboard_control_sensors_health, load, voltage_battery, current_battery, battery_remaining, drop_rate_comm, errors_comm, errors_count1, errors_count2, errors_count3, errors_count4) msg.pack(self) return msg def sys_status_send(self, onboard_control_sensors_present, onboard_control_sensors_enabled, onboard_control_sensors_health, load, voltage_battery, current_battery, battery_remaining, drop_rate_comm, errors_comm, errors_count1, errors_count2, errors_count3, errors_count4): ''' The general system state. If the system is following the MAVLink standard, the system state is mainly defined by three orthogonal states/modes: The system mode, which is either LOCKED (motors shut down and locked), MANUAL (system under RC control), GUIDED (system with autonomous position control, position setpoint controlled manually) or AUTO (system guided by path/waypoint planner). The NAV_MODE defined the current flight state: LIFTOFF (often an open-loop maneuver), LANDING, WAYPOINTS or VECTOR. This represents the internal navigation state machine. The system status shows wether the system is currently active or not and if an emergency occured. During the CRITICAL and EMERGENCY states the MAV is still considered to be active, but should start emergency procedures autonomously. After a failure occured it should first move from active to critical to allow manual intervention and then move to emergency after a certain timeout. onboard_control_sensors_present : Bitmask showing which onboard controllers and sensors are present. Value of 0: not present. Value of 1: present. Indices: 0: 3D gyro, 1: 3D acc, 2: 3D mag, 3: absolute pressure, 4: differential pressure, 5: GPS, 6: optical flow, 7: computer vision position, 8: laser based position, 9: external ground-truth (Vicon or Leica). Controllers: 10: 3D angular rate control 11: attitude stabilization, 12: yaw position, 13: z/altitude control, 14: x/y position control, 15: motor outputs / control (uint32_t) onboard_control_sensors_enabled : Bitmask showing which onboard controllers and sensors are enabled: Value of 0: not enabled. Value of 1: enabled. Indices: 0: 3D gyro, 1: 3D acc, 2: 3D mag, 3: absolute pressure, 4: differential pressure, 5: GPS, 6: optical flow, 7: computer vision position, 8: laser based position, 9: external ground-truth (Vicon or Leica). Controllers: 10: 3D angular rate control 11: attitude stabilization, 12: yaw position, 13: z/altitude control, 14: x/y position control, 15: motor outputs / control (uint32_t) onboard_control_sensors_health : Bitmask showing which onboard controllers and sensors are operational or have an error: Value of 0: not enabled. Value of 1: enabled. Indices: 0: 3D gyro, 1: 3D acc, 2: 3D mag, 3: absolute pressure, 4: differential pressure, 5: GPS, 6: optical flow, 7: computer vision position, 8: laser based position, 9: external ground-truth (Vicon or Leica). Controllers: 10: 3D angular rate control 11: attitude stabilization, 12: yaw position, 13: z/altitude control, 14: x/y position control, 15: motor outputs / control (uint32_t) load : Maximum usage in percent of the mainloop time, (0%: 0, 100%: 1000) should be always below 1000 (uint16_t) voltage_battery : Battery voltage, in millivolts (1 = 1 millivolt) (uint16_t) current_battery : Battery current, in 10*milliamperes (1 = 10 milliampere), -1: autopilot does not measure the current (int16_t) battery_remaining : Remaining battery energy: (0%: 0, 100%: 100), -1: autopilot estimate the remaining battery (int8_t) drop_rate_comm : Communication drops in percent, (0%: 0, 100%: 10'000), (UART, I2C, SPI, CAN), dropped packets on all links (packets that were corrupted on reception on the MAV) (uint16_t) errors_comm : Communication errors (UART, I2C, SPI, CAN), dropped packets on all links (packets that were corrupted on reception on the MAV) (uint16_t) errors_count1 : Autopilot-specific errors (uint16_t) errors_count2 : Autopilot-specific errors (uint16_t) errors_count3 : Autopilot-specific errors (uint16_t) errors_count4 : Autopilot-specific errors (uint16_t) ''' return self.send(self.sys_status_encode(onboard_control_sensors_present, onboard_control_sensors_enabled, onboard_control_sensors_health, load, voltage_battery, current_battery, battery_remaining, drop_rate_comm, errors_comm, errors_count1, errors_count2, errors_count3, errors_count4)) def system_time_encode(self, time_unix_usec, time_boot_ms): ''' The system time is the time of the master clock, typically the computer clock of the main onboard computer. time_unix_usec : Timestamp of the master clock in microseconds since UNIX epoch. (uint64_t) time_boot_ms : Timestamp of the component clock since boot time in milliseconds. (uint32_t) ''' msg = MAVLink_system_time_message(time_unix_usec, time_boot_ms) msg.pack(self) return msg def system_time_send(self, time_unix_usec, time_boot_ms): ''' The system time is the time of the master clock, typically the computer clock of the main onboard computer. time_unix_usec : Timestamp of the master clock in microseconds since UNIX epoch. (uint64_t) time_boot_ms : Timestamp of the component clock since boot time in milliseconds. (uint32_t) ''' return self.send(self.system_time_encode(time_unix_usec, time_boot_ms)) def ping_encode(self, time_usec, seq, target_system, target_component): ''' A ping message either requesting or responding to a ping. This allows to measure the system latencies, including serial port, radio modem and UDP connections. time_usec : Unix timestamp in microseconds (uint64_t) seq : PING sequence (uint32_t) target_system : 0: request ping from all receiving systems, if greater than 0: message is a ping response and number is the system id of the requesting system (uint8_t) target_component : 0: request ping from all receiving components, if greater than 0: message is a ping response and number is the system id of the requesting system (uint8_t) ''' msg = MAVLink_ping_message(time_usec, seq, target_system, target_component) msg.pack(self) return msg def ping_send(self, time_usec, seq, target_system, target_component): ''' A ping message either requesting or responding to a ping. This allows to measure the system latencies, including serial port, radio modem and UDP connections. time_usec : Unix timestamp in microseconds (uint64_t) seq : PING sequence (uint32_t) target_system : 0: request ping from all receiving systems, if greater than 0: message is a ping response and number is the system id of the requesting system (uint8_t) target_component : 0: request ping from all receiving components, if greater than 0: message is a ping response and number is the system id of the requesting system (uint8_t) ''' return self.send(self.ping_encode(time_usec, seq, target_system, target_component)) def change_operator_control_encode(self, target_system, control_request, version, passkey): ''' Request to control this MAV target_system : System the GCS requests control for (uint8_t) control_request : 0: request control of this MAV, 1: Release control of this MAV (uint8_t) version : 0: key as plaintext, 1-255: future, different hashing/encryption variants. The GCS should in general use the safest mode possible initially and then gradually move down the encryption level if it gets a NACK message indicating an encryption mismatch. (uint8_t) passkey : Password / Key, depending on version plaintext or encrypted. 25 or less characters, NULL terminated. The characters may involve A-Z, a-z, 0-9, and "!?,.-" (char) ''' msg = MAVLink_change_operator_control_message(target_system, control_request, version, passkey) msg.pack(self) return msg def change_operator_control_send(self, target_system, control_request, version, passkey): ''' Request to control this MAV target_system : System the GCS requests control for (uint8_t) control_request : 0: request control of this MAV, 1: Release control of this MAV (uint8_t) version : 0: key as plaintext, 1-255: future, different hashing/encryption variants. The GCS should in general use the safest mode possible initially and then gradually move down the encryption level if it gets a NACK message indicating an encryption mismatch. (uint8_t) passkey : Password / Key, depending on version plaintext or encrypted. 25 or less characters, NULL terminated. The characters may involve A-Z, a-z, 0-9, and "!?,.-" (char) ''' return self.send(self.change_operator_control_encode(target_system, control_request, version, passkey)) def change_operator_control_ack_encode(self, gcs_system_id, control_request, ack): ''' Accept / deny control of this MAV gcs_system_id : ID of the GCS this message (uint8_t) control_request : 0: request control of this MAV, 1: Release control of this MAV (uint8_t) ack : 0: ACK, 1: NACK: Wrong passkey, 2: NACK: Unsupported passkey encryption method, 3: NACK: Already under control (uint8_t) ''' msg = MAVLink_change_operator_control_ack_message(gcs_system_id, control_request, ack) msg.pack(self) return msg def change_operator_control_ack_send(self, gcs_system_id, control_request, ack): ''' Accept / deny control of this MAV gcs_system_id : ID of the GCS this message (uint8_t) control_request : 0: request control of this MAV, 1: Release control of this MAV (uint8_t) ack : 0: ACK, 1: NACK: Wrong passkey, 2: NACK: Unsupported passkey encryption method, 3: NACK: Already under control (uint8_t) ''' return self.send(self.change_operator_control_ack_encode(gcs_system_id, control_request, ack)) def auth_key_encode(self, key): ''' Emit an encrypted signature / key identifying this system. PLEASE NOTE: This protocol has been kept simple, so transmitting the key requires an encrypted channel for true safety. key : key (char) ''' msg = MAVLink_auth_key_message(key) msg.pack(self) return msg def auth_key_send(self, key): ''' Emit an encrypted signature / key identifying this system. PLEASE NOTE: This protocol has been kept simple, so transmitting the key requires an encrypted channel for true safety. key : key (char) ''' return self.send(self.auth_key_encode(key)) def set_mode_encode(self, target_system, base_mode, custom_mode): ''' Set the system mode, as defined by enum MAV_MODE. There is no target component id as the mode is by definition for the overall aircraft, not only for one component. target_system : The system setting the mode (uint8_t) base_mode : The new base mode (uint8_t) custom_mode : The new autopilot-specific mode. This field can be ignored by an autopilot. (uint32_t) ''' msg = MAVLink_set_mode_message(target_system, base_mode, custom_mode) msg.pack(self) return msg def set_mode_send(self, target_system, base_mode, custom_mode): ''' Set the system mode, as defined by enum MAV_MODE. There is no target component id as the mode is by definition for the overall aircraft, not only for one component. target_system : The system setting the mode (uint8_t) base_mode : The new base mode (uint8_t) custom_mode : The new autopilot-specific mode. This field can be ignored by an autopilot. (uint32_t) ''' return self.send(self.set_mode_encode(target_system, base_mode, custom_mode)) def param_request_read_encode(self, target_system, target_component, param_id, param_index): ''' Request to read the onboard parameter with the param_id string id. Onboard parameters are stored as key[const char*] -> value[float]. This allows to send a parameter to any other component (such as the GCS) without the need of previous knowledge of possible parameter names. Thus the same GCS can store different parameters for different autopilots. See also http://qgroundcontrol.org/parameter_interface for a full documentation of QGroundControl and IMU code. target_system : System ID (uint8_t) target_component : Component ID (uint8_t) param_id : Onboard parameter id, terminated by NULL if the length is less than 16 human-readable chars and WITHOUT null termination (NULL) byte if the length is exactly 16 chars - applications have to provide 16+1 bytes storage if the ID is stored as string (char) param_index : Parameter index. Send -1 to use the param ID field as identifier (else the param id will be ignored) (int16_t) ''' msg = MAVLink_param_request_read_message(target_system, target_component, param_id, param_index) msg.pack(self) return msg def param_request_read_send(self, target_system, target_component, param_id, param_index): ''' Request to read the onboard parameter with the param_id string id. Onboard parameters are stored as key[const char*] -> value[float]. This allows to send a parameter to any other component (such as the GCS) without the need of previous knowledge of possible parameter names. Thus the same GCS can store different parameters for different autopilots. See also http://qgroundcontrol.org/parameter_interface for a full documentation of QGroundControl and IMU code. target_system : System ID (uint8_t) target_component : Component ID (uint8_t) param_id : Onboard parameter id, terminated by NULL if the length is less than 16 human-readable chars and WITHOUT null termination (NULL) byte if the length is exactly 16 chars - applications have to provide 16+1 bytes storage if the ID is stored as string (char) param_index : Parameter index. Send -1 to use the param ID field as identifier (else the param id will be ignored) (int16_t) ''' return self.send(self.param_request_read_encode(target_system, target_component, param_id, param_index)) def param_request_list_encode(self, target_system, target_component): ''' Request all parameters of this component. After his request, all parameters are emitted. target_system : System ID (uint8_t) target_component : Component ID (uint8_t) ''' msg = MAVLink_param_request_list_message(target_system, target_component) msg.pack(self) return msg def param_request_list_send(self, target_system, target_component): ''' Request all parameters of this component. After his request, all parameters are emitted. target_system : System ID (uint8_t) target_component : Component ID (uint8_t) ''' return self.send(self.param_request_list_encode(target_system, target_component)) def param_value_encode(self, param_id, param_value, param_type, param_count, param_index): ''' Emit the value of a onboard parameter. The inclusion of param_count and param_index in the message allows the recipient to keep track of received parameters and allows him to re-request missing parameters after a loss or timeout. param_id : Onboard parameter id, terminated by NULL if the length is less than 16 human-readable chars and WITHOUT null termination (NULL) byte if the length is exactly 16 chars - applications have to provide 16+1 bytes storage if the ID is stored as string (char) param_value : Onboard parameter value (float) param_type : Onboard parameter type: see the MAV_PARAM_TYPE enum for supported data types. (uint8_t) param_count : Total number of onboard parameters (uint16_t) param_index : Index of this onboard parameter (uint16_t) ''' msg = MAVLink_param_value_message(param_id, param_value, param_type, param_count, param_index) msg.pack(self) return msg def param_value_send(self, param_id, param_value, param_type, param_count, param_index): ''' Emit the value of a onboard parameter. The inclusion of param_count and param_index in the message allows the recipient to keep track of received parameters and allows him to re-request missing parameters after a loss or timeout. param_id : Onboard parameter id, terminated by NULL if the length is less than 16 human-readable chars and WITHOUT null termination (NULL) byte if the length is exactly 16 chars - applications have to provide 16+1 bytes storage if the ID is stored as string (char) param_value : Onboard parameter value (float) param_type : Onboard parameter type: see the MAV_PARAM_TYPE enum for supported data types. (uint8_t) param_count : Total number of onboard parameters (uint16_t) param_index : Index of this onboard parameter (uint16_t) ''' return self.send(self.param_value_encode(param_id, param_value, param_type, param_count, param_index)) def param_set_encode(self, target_system, target_component, param_id, param_value, param_type): ''' Set a parameter value TEMPORARILY to RAM. It will be reset to default on system reboot. Send the ACTION MAV_ACTION_STORAGE_WRITE to PERMANENTLY write the RAM contents to EEPROM. IMPORTANT: The receiving component should acknowledge the new parameter value by sending a param_value message to all communication partners. This will also ensure that multiple GCS all have an up-to-date list of all parameters. If the sending GCS did not receive a PARAM_VALUE message within its timeout time, it should re-send the PARAM_SET message. target_system : System ID (uint8_t) target_component : Component ID (uint8_t) param_id : Onboard parameter id, terminated by NULL if the length is less than 16 human-readable chars and WITHOUT null termination (NULL) byte if the length is exactly 16 chars - applications have to provide 16+1 bytes storage if the ID is stored as string (char) param_value : Onboard parameter value (float) param_type : Onboard parameter type: see the MAV_PARAM_TYPE enum for supported data types. (uint8_t) ''' msg = MAVLink_param_set_message(target_system, target_component, param_id, param_value, param_type) msg.pack(self) return msg def param_set_send(self, target_system, target_component, param_id, param_value, param_type): ''' Set a parameter value TEMPORARILY to RAM. It will be reset to default on system reboot. Send the ACTION MAV_ACTION_STORAGE_WRITE to PERMANENTLY write the RAM contents to EEPROM. IMPORTANT: The receiving component should acknowledge the new parameter value by sending a param_value message to all communication partners. This will also ensure that multiple GCS all have an up-to-date list of all parameters. If the sending GCS did not receive a PARAM_VALUE message within its timeout time, it should re-send the PARAM_SET message. target_system : System ID (uint8_t) target_component : Component ID (uint8_t) param_id : Onboard parameter id, terminated by NULL if the length is less than 16 human-readable chars and WITHOUT null termination (NULL) byte if the length is exactly 16 chars - applications have to provide 16+1 bytes storage if the ID is stored as string (char) param_value : Onboard parameter value (float) param_type : Onboard parameter type: see the MAV_PARAM_TYPE enum for supported data types. (uint8_t) ''' return self.send(self.param_set_encode(target_system, target_component, param_id, param_value, param_type)) def gps_raw_int_encode(self, time_usec, fix_type, lat, lon, alt, eph, epv, vel, cog, satellites_visible): ''' The global position, as returned by the Global Positioning System (GPS). This is NOT the global position estimate of the system, but rather a RAW sensor value. See message GLOBAL_POSITION for the global position estimate. Coordinate frame is right-handed, Z-axis up (GPS frame). time_usec : Timestamp (microseconds since UNIX epoch or microseconds since system boot) (uint64_t) fix_type : 0-1: no fix, 2: 2D fix, 3: 3D fix. Some applications will not use the value of this field unless it is at least two, so always correctly fill in the fix. (uint8_t) lat : Latitude in 1E7 degrees (int32_t) lon : Longitude in 1E7 degrees (int32_t) alt : Altitude in 1E3 meters (millimeters) above MSL (int32_t) eph : GPS HDOP horizontal dilution of position in cm (m*100). If unknown, set to: 65535 (uint16_t) epv : GPS VDOP horizontal dilution of position in cm (m*100). If unknown, set to: 65535 (uint16_t) vel : GPS ground speed (m/s * 100). If unknown, set to: 65535 (uint16_t) cog : Course over ground (NOT heading, but direction of movement) in degrees * 100, 0.0..359.99 degrees. If unknown, set to: 65535 (uint16_t) satellites_visible : Number of satellites visible. If unknown, set to 255 (uint8_t) ''' msg = MAVLink_gps_raw_int_message(time_usec, fix_type, lat, lon, alt, eph, epv, vel, cog, satellites_visible) msg.pack(self) return msg def gps_raw_int_send(self, time_usec, fix_type, lat, lon, alt, eph, epv, vel, cog, satellites_visible): ''' The global position, as returned by the Global Positioning System (GPS). This is NOT the global position estimate of the system, but rather a RAW sensor value. See message GLOBAL_POSITION for the global position estimate. Coordinate frame is right-handed, Z-axis up (GPS frame). time_usec : Timestamp (microseconds since UNIX epoch or microseconds since system boot) (uint64_t) fix_type : 0-1: no fix, 2: 2D fix, 3: 3D fix. Some applications will not use the value of this field unless it is at least two, so always correctly fill in the fix. (uint8_t) lat : Latitude in 1E7 degrees (int32_t) lon : Longitude in 1E7 degrees (int32_t) alt : Altitude in 1E3 meters (millimeters) above MSL (int32_t) eph : GPS HDOP horizontal dilution of position in cm (m*100). If unknown, set to: 65535 (uint16_t) epv : GPS VDOP horizontal dilution of position in cm (m*100). If unknown, set to: 65535 (uint16_t) vel : GPS ground speed (m/s * 100). If unknown, set to: 65535 (uint16_t) cog : Course over ground (NOT heading, but direction of movement) in degrees * 100, 0.0..359.99 degrees. If unknown, set to: 65535 (uint16_t) satellites_visible : Number of satellites visible. If unknown, set to 255 (uint8_t) ''' return self.send(self.gps_raw_int_encode(time_usec, fix_type, lat, lon, alt, eph, epv, vel, cog, satellites_visible)) def gps_status_encode(self, satellites_visible, satellite_prn, satellite_used, satellite_elevation, satellite_azimuth, satellite_snr): ''' The positioning status, as reported by GPS. This message is intended to display status information about each satellite visible to the receiver. See message GLOBAL_POSITION for the global position estimate. This message can contain information for up to 20 satellites. satellites_visible : Number of satellites visible (uint8_t) satellite_prn : Global satellite ID (uint8_t) satellite_used : 0: Satellite not used, 1: used for localization (uint8_t) satellite_elevation : Elevation (0: right on top of receiver, 90: on the horizon) of satellite (uint8_t) satellite_azimuth : Direction of satellite, 0: 0 deg, 255: 360 deg. (uint8_t) satellite_snr : Signal to noise ratio of satellite (uint8_t) ''' msg = MAVLink_gps_status_message(satellites_visible, satellite_prn, satellite_used, satellite_elevation, satellite_azimuth, satellite_snr) msg.pack(self) return msg def gps_status_send(self, satellites_visible, satellite_prn, satellite_used, satellite_elevation, satellite_azimuth, satellite_snr): ''' The positioning status, as reported by GPS. This message is intended to display status information about each satellite visible to the receiver. See message GLOBAL_POSITION for the global position estimate. This message can contain information for up to 20 satellites. satellites_visible : Number of satellites visible (uint8_t) satellite_prn : Global satellite ID (uint8_t) satellite_used : 0: Satellite not used, 1: used for localization (uint8_t) satellite_elevation : Elevation (0: right on top of receiver, 90: on the horizon) of satellite (uint8_t) satellite_azimuth : Direction of satellite, 0: 0 deg, 255: 360 deg. (uint8_t) satellite_snr : Signal to noise ratio of satellite (uint8_t) ''' return self.send(self.gps_status_encode(satellites_visible, satellite_prn, satellite_used, satellite_elevation, satellite_azimuth, satellite_snr)) def scaled_imu_encode(self, time_boot_ms, xacc, yacc, zacc, xgyro, ygyro, zgyro, xmag, ymag, zmag): ''' The RAW IMU readings for the usual 9DOF sensor setup. This message should contain the scaled values to the described units time_boot_ms : Timestamp (milliseconds since system boot) (uint32_t) xacc : X acceleration (mg) (int16_t) yacc : Y acceleration (mg) (int16_t) zacc : Z acceleration (mg) (int16_t) xgyro : Angular speed around X axis (millirad /sec) (int16_t) ygyro : Angular speed around Y axis (millirad /sec) (int16_t) zgyro : Angular speed around Z axis (millirad /sec) (int16_t) xmag : X Magnetic field (milli tesla) (int16_t) ymag : Y Magnetic field (milli tesla) (int16_t) zmag : Z Magnetic field (milli tesla) (int16_t) ''' msg = MAVLink_scaled_imu_message(time_boot_ms, xacc, yacc, zacc, xgyro, ygyro, zgyro, xmag, ymag, zmag) msg.pack(self) return msg def scaled_imu_send(self, time_boot_ms, xacc, yacc, zacc, xgyro, ygyro, zgyro, xmag, ymag, zmag): ''' The RAW IMU readings for the usual 9DOF sensor setup. This message should contain the scaled values to the described units time_boot_ms : Timestamp (milliseconds since system boot) (uint32_t) xacc : X acceleration (mg) (int16_t) yacc : Y acceleration (mg) (int16_t) zacc : Z acceleration (mg) (int16_t) xgyro : Angular speed around X axis (millirad /sec) (int16_t) ygyro : Angular speed around Y axis (millirad /sec) (int16_t) zgyro : Angular speed around Z axis (millirad /sec) (int16_t) xmag : X Magnetic field (milli tesla) (int16_t) ymag : Y Magnetic field (milli tesla) (int16_t) zmag : Z Magnetic field (milli tesla) (int16_t) ''' return self.send(self.scaled_imu_encode(time_boot_ms, xacc, yacc, zacc, xgyro, ygyro, zgyro, xmag, ymag, zmag)) def raw_imu_encode(self, time_usec, xacc, yacc, zacc, xgyro, ygyro, zgyro, xmag, ymag, zmag): ''' The RAW IMU readings for the usual 9DOF sensor setup. This message should always contain the true raw values without any scaling to allow data capture and system debugging. time_usec : Timestamp (microseconds since UNIX epoch or microseconds since system boot) (uint64_t) xacc : X acceleration (raw) (int16_t) yacc : Y acceleration (raw) (int16_t) zacc : Z acceleration (raw) (int16_t) xgyro : Angular speed around X axis (raw) (int16_t) ygyro : Angular speed around Y axis (raw) (int16_t) zgyro : Angular speed around Z axis (raw) (int16_t) xmag : X Magnetic field (raw) (int16_t) ymag : Y Magnetic field (raw) (int16_t) zmag : Z Magnetic field (raw) (int16_t) ''' msg = MAVLink_raw_imu_message(time_usec, xacc, yacc, zacc, xgyro, ygyro, zgyro, xmag, ymag, zmag) msg.pack(self) return msg def raw_imu_send(self, time_usec, xacc, yacc, zacc, xgyro, ygyro, zgyro, xmag, ymag, zmag): ''' The RAW IMU readings for the usual 9DOF sensor setup. This message should always contain the true raw values without any scaling to allow data capture and system debugging. time_usec : Timestamp (microseconds since UNIX epoch or microseconds since system boot) (uint64_t) xacc : X acceleration (raw) (int16_t) yacc : Y acceleration (raw) (int16_t) zacc : Z acceleration (raw) (int16_t) xgyro : Angular speed around X axis (raw) (int16_t) ygyro : Angular speed around Y axis (raw) (int16_t) zgyro : Angular speed around Z axis (raw) (int16_t) xmag : X Magnetic field (raw) (int16_t) ymag : Y Magnetic field (raw) (int16_t) zmag : Z Magnetic field (raw) (int16_t) ''' return self.send(self.raw_imu_encode(time_usec, xacc, yacc, zacc, xgyro, ygyro, zgyro, xmag, ymag, zmag)) def raw_pressure_encode(self, time_usec, press_abs, press_diff1, press_diff2, temperature): ''' The RAW pressure readings for the typical setup of one absolute pressure and one differential pressure sensor. The sensor values should be the raw, UNSCALED ADC values. time_usec : Timestamp (microseconds since UNIX epoch or microseconds since system boot) (uint64_t) press_abs : Absolute pressure (raw) (int16_t) press_diff1 : Differential pressure 1 (raw) (int16_t) press_diff2 : Differential pressure 2 (raw) (int16_t) temperature : Raw Temperature measurement (raw) (int16_t) ''' msg = MAVLink_raw_pressure_message(time_usec, press_abs, press_diff1, press_diff2, temperature) msg.pack(self) return msg def raw_pressure_send(self, time_usec, press_abs, press_diff1, press_diff2, temperature): ''' The RAW pressure readings for the typical setup of one absolute pressure and one differential pressure sensor. The sensor values should be the raw, UNSCALED ADC values. time_usec : Timestamp (microseconds since UNIX epoch or microseconds since system boot) (uint64_t) press_abs : Absolute pressure (raw) (int16_t) press_diff1 : Differential pressure 1 (raw) (int16_t) press_diff2 : Differential pressure 2 (raw) (int16_t) temperature : Raw Temperature measurement (raw) (int16_t) ''' return self.send(self.raw_pressure_encode(time_usec, press_abs, press_diff1, press_diff2, temperature)) def scaled_pressure_encode(self, time_boot_ms, press_abs, press_diff, temperature): ''' The pressure readings for the typical setup of one absolute and differential pressure sensor. The units are as specified in each field. time_boot_ms : Timestamp (milliseconds since system boot) (uint32_t) press_abs : Absolute pressure (hectopascal) (float) press_diff : Differential pressure 1 (hectopascal) (float) temperature : Temperature measurement (0.01 degrees celsius) (int16_t) ''' msg = MAVLink_scaled_pressure_message(time_boot_ms, press_abs, press_diff, temperature) msg.pack(self) return msg def scaled_pressure_send(self, time_boot_ms, press_abs, press_diff, temperature): ''' The pressure readings for the typical setup of one absolute and differential pressure sensor. The units are as specified in each field. time_boot_ms : Timestamp (milliseconds since system boot) (uint32_t) press_abs : Absolute pressure (hectopascal) (float) press_diff : Differential pressure 1 (hectopascal) (float) temperature : Temperature measurement (0.01 degrees celsius) (int16_t) ''' return self.send(self.scaled_pressure_encode(time_boot_ms, press_abs, press_diff, temperature)) def attitude_encode(self, time_boot_ms, roll, pitch, yaw, rollspeed, pitchspeed, yawspeed): ''' The attitude in the aeronautical frame (right-handed, Z-down, X-front, Y-right). time_boot_ms : Timestamp (milliseconds since system boot) (uint32_t) roll : Roll angle (rad, -pi..+pi) (float) pitch : Pitch angle (rad, -pi..+pi) (float) yaw : Yaw angle (rad, -pi..+pi) (float) rollspeed : Roll angular speed (rad/s) (float) pitchspeed : Pitch angular speed (rad/s) (float) yawspeed : Yaw angular speed (rad/s) (float) ''' msg = MAVLink_attitude_message(time_boot_ms, roll, pitch, yaw, rollspeed, pitchspeed, yawspeed) msg.pack(self) return msg def attitude_send(self, time_boot_ms, roll, pitch, yaw, rollspeed, pitchspeed, yawspeed): ''' The attitude in the aeronautical frame (right-handed, Z-down, X-front, Y-right). time_boot_ms : Timestamp (milliseconds since system boot) (uint32_t) roll : Roll angle (rad, -pi..+pi) (float) pitch : Pitch angle (rad, -pi..+pi) (float) yaw : Yaw angle (rad, -pi..+pi) (float) rollspeed : Roll angular speed (rad/s) (float) pitchspeed : Pitch angular speed (rad/s) (float) yawspeed : Yaw angular speed (rad/s) (float) ''' return self.send(self.attitude_encode(time_boot_ms, roll, pitch, yaw, rollspeed, pitchspeed, yawspeed)) def attitude_quaternion_encode(self, time_boot_ms, q1, q2, q3, q4, rollspeed, pitchspeed, yawspeed): ''' The attitude in the aeronautical frame (right-handed, Z-down, X-front, Y-right), expressed as quaternion. time_boot_ms : Timestamp (milliseconds since system boot) (uint32_t) q1 : Quaternion component 1 (float) q2 : Quaternion component 2 (float) q3 : Quaternion component 3 (float) q4 : Quaternion component 4 (float) rollspeed : Roll angular speed (rad/s) (float) pitchspeed : Pitch angular speed (rad/s) (float) yawspeed : Yaw angular speed (rad/s) (float) ''' msg = MAVLink_attitude_quaternion_message(time_boot_ms, q1, q2, q3, q4, rollspeed, pitchspeed, yawspeed) msg.pack(self) return msg def attitude_quaternion_send(self, time_boot_ms, q1, q2, q3, q4, rollspeed, pitchspeed, yawspeed): ''' The attitude in the aeronautical frame (right-handed, Z-down, X-front, Y-right), expressed as quaternion. time_boot_ms : Timestamp (milliseconds since system boot) (uint32_t) q1 : Quaternion component 1 (float) q2 : Quaternion component 2 (float) q3 : Quaternion component 3 (float) q4 : Quaternion component 4 (float) rollspeed : Roll angular speed (rad/s) (float) pitchspeed : Pitch angular speed (rad/s) (float) yawspeed : Yaw angular speed (rad/s) (float) ''' return self.send(self.attitude_quaternion_encode(time_boot_ms, q1, q2, q3, q4, rollspeed, pitchspeed, yawspeed)) def local_position_ned_encode(self, time_boot_ms, x, y, z, vx, vy, vz): ''' The filtered local position (e.g. fused computer vision and accelerometers). Coordinate frame is right-handed, Z-axis down (aeronautical frame, NED / north-east-down convention) time_boot_ms : Timestamp (milliseconds since system boot) (uint32_t) x : X Position (float) y : Y Position (float) z : Z Position (float) vx : X Speed (float) vy : Y Speed (float) vz : Z Speed (float) ''' msg = MAVLink_local_position_ned_message(time_boot_ms, x, y, z, vx, vy, vz) msg.pack(self) return msg def local_position_ned_send(self, time_boot_ms, x, y, z, vx, vy, vz): ''' The filtered local position (e.g. fused computer vision and accelerometers). Coordinate frame is right-handed, Z-axis down (aeronautical frame, NED / north-east-down convention) time_boot_ms : Timestamp (milliseconds since system boot) (uint32_t) x : X Position (float) y : Y Position (float) z : Z Position (float) vx : X Speed (float) vy : Y Speed (float) vz : Z Speed (float) ''' return self.send(self.local_position_ned_encode(time_boot_ms, x, y, z, vx, vy, vz)) def global_position_int_encode(self, time_boot_ms, lat, lon, alt, relative_alt, vx, vy, vz, hdg): ''' The filtered global position (e.g. fused GPS and accelerometers). The position is in GPS-frame (right-handed, Z-up). It is designed as scaled integer message since the resolution of float is not sufficient. time_boot_ms : Timestamp (milliseconds since system boot) (uint32_t) lat : Latitude, expressed as * 1E7 (int32_t) lon : Longitude, expressed as * 1E7 (int32_t) alt : Altitude in meters, expressed as * 1000 (millimeters), above MSL (int32_t) relative_alt : Altitude above ground in meters, expressed as * 1000 (millimeters) (int32_t) vx : Ground X Speed (Latitude), expressed as m/s * 100 (int16_t) vy : Ground Y Speed (Longitude), expressed as m/s * 100 (int16_t) vz : Ground Z Speed (Altitude), expressed as m/s * 100 (int16_t) hdg : Compass heading in degrees * 100, 0.0..359.99 degrees. If unknown, set to: 65535 (uint16_t) ''' msg = MAVLink_global_position_int_message(time_boot_ms, lat, lon, alt, relative_alt, vx, vy, vz, hdg) msg.pack(self) return msg def global_position_int_send(self, time_boot_ms, lat, lon, alt, relative_alt, vx, vy, vz, hdg): ''' The filtered global position (e.g. fused GPS and accelerometers). The position is in GPS-frame (right-handed, Z-up). It is designed as scaled integer message since the resolution of float is not sufficient. time_boot_ms : Timestamp (milliseconds since system boot) (uint32_t) lat : Latitude, expressed as * 1E7 (int32_t) lon : Longitude, expressed as * 1E7 (int32_t) alt : Altitude in meters, expressed as * 1000 (millimeters), above MSL (int32_t) relative_alt : Altitude above ground in meters, expressed as * 1000 (millimeters) (int32_t) vx : Ground X Speed (Latitude), expressed as m/s * 100 (int16_t) vy : Ground Y Speed (Longitude), expressed as m/s * 100 (int16_t) vz : Ground Z Speed (Altitude), expressed as m/s * 100 (int16_t) hdg : Compass heading in degrees * 100, 0.0..359.99 degrees. If unknown, set to: 65535 (uint16_t) ''' return self.send(self.global_position_int_encode(time_boot_ms, lat, lon, alt, relative_alt, vx, vy, vz, hdg)) def rc_channels_scaled_encode(self, time_boot_ms, port, chan1_scaled, chan2_scaled, chan3_scaled, chan4_scaled, chan5_scaled, chan6_scaled, chan7_scaled, chan8_scaled, rssi): ''' The scaled values of the RC channels received. (-100%) -10000, (0%) 0, (100%) 10000. Channels that are inactive should be set to 65535. time_boot_ms : Timestamp (milliseconds since system boot) (uint32_t) port : Servo output port (set of 8 outputs = 1 port). Most MAVs will just use one, but this allows for more than 8 servos. (uint8_t) chan1_scaled : RC channel 1 value scaled, (-100%) -10000, (0%) 0, (100%) 10000, (invalid) 32767. (int16_t) chan2_scaled : RC channel 2 value scaled, (-100%) -10000, (0%) 0, (100%) 10000, (invalid) 32767. (int16_t) chan3_scaled : RC channel 3 value scaled, (-100%) -10000, (0%) 0, (100%) 10000, (invalid) 32767. (int16_t) chan4_scaled : RC channel 4 value scaled, (-100%) -10000, (0%) 0, (100%) 10000, (invalid) 32767. (int16_t) chan5_scaled : RC channel 5 value scaled, (-100%) -10000, (0%) 0, (100%) 10000, (invalid) 32767. (int16_t) chan6_scaled : RC channel 6 value scaled, (-100%) -10000, (0%) 0, (100%) 10000, (invalid) 32767. (int16_t) chan7_scaled : RC channel 7 value scaled, (-100%) -10000, (0%) 0, (100%) 10000, (invalid) 32767. (int16_t) chan8_scaled : RC channel 8 value scaled, (-100%) -10000, (0%) 0, (100%) 10000, (invalid) 32767. (int16_t) rssi : Receive signal strength indicator, 0: 0%, 100: 100%, 255: invalid/unknown. (uint8_t) ''' msg = MAVLink_rc_channels_scaled_message(time_boot_ms, port, chan1_scaled, chan2_scaled, chan3_scaled, chan4_scaled, chan5_scaled, chan6_scaled, chan7_scaled, chan8_scaled, rssi) msg.pack(self) return msg def rc_channels_scaled_send(self, time_boot_ms, port, chan1_scaled, chan2_scaled, chan3_scaled, chan4_scaled, chan5_scaled, chan6_scaled, chan7_scaled, chan8_scaled, rssi): ''' The scaled values of the RC channels received. (-100%) -10000, (0%) 0, (100%) 10000. Channels that are inactive should be set to 65535. time_boot_ms : Timestamp (milliseconds since system boot) (uint32_t) port : Servo output port (set of 8 outputs = 1 port). Most MAVs will just use one, but this allows for more than 8 servos. (uint8_t) chan1_scaled : RC channel 1 value scaled, (-100%) -10000, (0%) 0, (100%) 10000, (invalid) 32767. (int16_t) chan2_scaled : RC channel 2 value scaled, (-100%) -10000, (0%) 0, (100%) 10000, (invalid) 32767. (int16_t) chan3_scaled : RC channel 3 value scaled, (-100%) -10000, (0%) 0, (100%) 10000, (invalid) 32767. (int16_t) chan4_scaled : RC channel 4 value scaled, (-100%) -10000, (0%) 0, (100%) 10000, (invalid) 32767. (int16_t) chan5_scaled : RC channel 5 value scaled, (-100%) -10000, (0%) 0, (100%) 10000, (invalid) 32767. (int16_t) chan6_scaled : RC channel 6 value scaled, (-100%) -10000, (0%) 0, (100%) 10000, (invalid) 32767. (int16_t) chan7_scaled : RC channel 7 value scaled, (-100%) -10000, (0%) 0, (100%) 10000, (invalid) 32767. (int16_t) chan8_scaled : RC channel 8 value scaled, (-100%) -10000, (0%) 0, (100%) 10000, (invalid) 32767. (int16_t) rssi : Receive signal strength indicator, 0: 0%, 100: 100%, 255: invalid/unknown. (uint8_t) ''' return self.send(self.rc_channels_scaled_encode(time_boot_ms, port, chan1_scaled, chan2_scaled, chan3_scaled, chan4_scaled, chan5_scaled, chan6_scaled, chan7_scaled, chan8_scaled, rssi)) def rc_channels_raw_encode(self, time_boot_ms, port, chan1_raw, chan2_raw, chan3_raw, chan4_raw, chan5_raw, chan6_raw, chan7_raw, chan8_raw, rssi): ''' The RAW values of the RC channels received. The standard PPM modulation is as follows: 1000 microseconds: 0%, 2000 microseconds: 100%. Individual receivers/transmitters might violate this specification. time_boot_ms : Timestamp (milliseconds since system boot) (uint32_t) port : Servo output port (set of 8 outputs = 1 port). Most MAVs will just use one, but this allows for more than 8 servos. (uint8_t) chan1_raw : RC channel 1 value, in microseconds. A value of 65535 implies the channel is unused. (uint16_t) chan2_raw : RC channel 2 value, in microseconds. A value of 65535 implies the channel is unused. (uint16_t) chan3_raw : RC channel 3 value, in microseconds. A value of 65535 implies the channel is unused. (uint16_t) chan4_raw : RC channel 4 value, in microseconds. A value of 65535 implies the channel is unused. (uint16_t) chan5_raw : RC channel 5 value, in microseconds. A value of 65535 implies the channel is unused. (uint16_t) chan6_raw : RC channel 6 value, in microseconds. A value of 65535 implies the channel is unused. (uint16_t) chan7_raw : RC channel 7 value, in microseconds. A value of 65535 implies the channel is unused. (uint16_t) chan8_raw : RC channel 8 value, in microseconds. A value of 65535 implies the channel is unused. (uint16_t) rssi : Receive signal strength indicator, 0: 0%, 100: 100%, 255: invalid/unknown. (uint8_t) ''' msg = MAVLink_rc_channels_raw_message(time_boot_ms, port, chan1_raw, chan2_raw, chan3_raw, chan4_raw, chan5_raw, chan6_raw, chan7_raw, chan8_raw, rssi) msg.pack(self) return msg def rc_channels_raw_send(self, time_boot_ms, port, chan1_raw, chan2_raw, chan3_raw, chan4_raw, chan5_raw, chan6_raw, chan7_raw, chan8_raw, rssi): ''' The RAW values of the RC channels received. The standard PPM modulation is as follows: 1000 microseconds: 0%, 2000 microseconds: 100%. Individual receivers/transmitters might violate this specification. time_boot_ms : Timestamp (milliseconds since system boot) (uint32_t) port : Servo output port (set of 8 outputs = 1 port). Most MAVs will just use one, but this allows for more than 8 servos. (uint8_t) chan1_raw : RC channel 1 value, in microseconds. A value of 65535 implies the channel is unused. (uint16_t) chan2_raw : RC channel 2 value, in microseconds. A value of 65535 implies the channel is unused. (uint16_t) chan3_raw : RC channel 3 value, in microseconds. A value of 65535 implies the channel is unused. (uint16_t) chan4_raw : RC channel 4 value, in microseconds. A value of 65535 implies the channel is unused. (uint16_t) chan5_raw : RC channel 5 value, in microseconds. A value of 65535 implies the channel is unused. (uint16_t) chan6_raw : RC channel 6 value, in microseconds. A value of 65535 implies the channel is unused. (uint16_t) chan7_raw : RC channel 7 value, in microseconds. A value of 65535 implies the channel is unused. (uint16_t) chan8_raw : RC channel 8 value, in microseconds. A value of 65535 implies the channel is unused. (uint16_t) rssi : Receive signal strength indicator, 0: 0%, 100: 100%, 255: invalid/unknown. (uint8_t) ''' return self.send(self.rc_channels_raw_encode(time_boot_ms, port, chan1_raw, chan2_raw, chan3_raw, chan4_raw, chan5_raw, chan6_raw, chan7_raw, chan8_raw, rssi)) def servo_output_raw_encode(self, time_boot_ms, port, servo1_raw, servo2_raw, servo3_raw, servo4_raw, servo5_raw, servo6_raw, servo7_raw, servo8_raw): ''' The RAW values of the servo outputs (for RC input from the remote, use the RC_CHANNELS messages). The standard PPM modulation is as follows: 1000 microseconds: 0%, 2000 microseconds: 100%. time_boot_ms : Timestamp (microseconds since system boot) (uint32_t) port : Servo output port (set of 8 outputs = 1 port). Most MAVs will just use one, but this allows to encode more than 8 servos. (uint8_t) servo1_raw : Servo output 1 value, in microseconds (uint16_t) servo2_raw : Servo output 2 value, in microseconds (uint16_t) servo3_raw : Servo output 3 value, in microseconds (uint16_t) servo4_raw : Servo output 4 value, in microseconds (uint16_t) servo5_raw : Servo output 5 value, in microseconds (uint16_t) servo6_raw : Servo output 6 value, in microseconds (uint16_t) servo7_raw : Servo output 7 value, in microseconds (uint16_t) servo8_raw : Servo output 8 value, in microseconds (uint16_t) ''' msg = MAVLink_servo_output_raw_message(time_boot_ms, port, servo1_raw, servo2_raw, servo3_raw, servo4_raw, servo5_raw, servo6_raw, servo7_raw, servo8_raw) msg.pack(self) return msg def servo_output_raw_send(self, time_boot_ms, port, servo1_raw, servo2_raw, servo3_raw, servo4_raw, servo5_raw, servo6_raw, servo7_raw, servo8_raw): ''' The RAW values of the servo outputs (for RC input from the remote, use the RC_CHANNELS messages). The standard PPM modulation is as follows: 1000 microseconds: 0%, 2000 microseconds: 100%. time_boot_ms : Timestamp (microseconds since system boot) (uint32_t) port : Servo output port (set of 8 outputs = 1 port). Most MAVs will just use one, but this allows to encode more than 8 servos. (uint8_t) servo1_raw : Servo output 1 value, in microseconds (uint16_t) servo2_raw : Servo output 2 value, in microseconds (uint16_t) servo3_raw : Servo output 3 value, in microseconds (uint16_t) servo4_raw : Servo output 4 value, in microseconds (uint16_t) servo5_raw : Servo output 5 value, in microseconds (uint16_t) servo6_raw : Servo output 6 value, in microseconds (uint16_t) servo7_raw : Servo output 7 value, in microseconds (uint16_t) servo8_raw : Servo output 8 value, in microseconds (uint16_t) ''' return self.send(self.servo_output_raw_encode(time_boot_ms, port, servo1_raw, servo2_raw, servo3_raw, servo4_raw, servo5_raw, servo6_raw, servo7_raw, servo8_raw)) def mission_request_partial_list_encode(self, target_system, target_component, start_index, end_index): ''' Request a partial list of mission items from the system/component. http://qgroundcontrol.org/mavlink/waypoint_protocol. If start and end index are the same, just send one waypoint. target_system : System ID (uint8_t) target_component : Component ID (uint8_t) start_index : Start index, 0 by default (int16_t) end_index : End index, -1 by default (-1: send list to end). Else a valid index of the list (int16_t) ''' msg = MAVLink_mission_request_partial_list_message(target_system, target_component, start_index, end_index) msg.pack(self) return msg def mission_request_partial_list_send(self, target_system, target_component, start_index, end_index): ''' Request a partial list of mission items from the system/component. http://qgroundcontrol.org/mavlink/waypoint_protocol. If start and end index are the same, just send one waypoint. target_system : System ID (uint8_t) target_component : Component ID (uint8_t) start_index : Start index, 0 by default (int16_t) end_index : End index, -1 by default (-1: send list to end). Else a valid index of the list (int16_t) ''' return self.send(self.mission_request_partial_list_encode(target_system, target_component, start_index, end_index)) def mission_write_partial_list_encode(self, target_system, target_component, start_index, end_index): ''' This message is sent to the MAV to write a partial list. If start index == end index, only one item will be transmitted / updated. If the start index is NOT 0 and above the current list size, this request should be REJECTED! target_system : System ID (uint8_t) target_component : Component ID (uint8_t) start_index : Start index, 0 by default and smaller / equal to the largest index of the current onboard list. (int16_t) end_index : End index, equal or greater than start index. (int16_t) ''' msg = MAVLink_mission_write_partial_list_message(target_system, target_component, start_index, end_index) msg.pack(self) return msg def mission_write_partial_list_send(self, target_system, target_component, start_index, end_index): ''' This message is sent to the MAV to write a partial list. If start index == end index, only one item will be transmitted / updated. If the start index is NOT 0 and above the current list size, this request should be REJECTED! target_system : System ID (uint8_t) target_component : Component ID (uint8_t) start_index : Start index, 0 by default and smaller / equal to the largest index of the current onboard list. (int16_t) end_index : End index, equal or greater than start index. (int16_t) ''' return self.send(self.mission_write_partial_list_encode(target_system, target_component, start_index, end_index)) def mission_item_encode(self, target_system, target_component, seq, frame, command, current, autocontinue, param1, param2, param3, param4, x, y, z): ''' Message encoding a mission item. This message is emitted to announce the presence of a mission item and to set a mission item on the system. The mission item can be either in x, y, z meters (type: LOCAL) or x:lat, y:lon, z:altitude. Local frame is Z-down, right handed (NED), global frame is Z-up, right handed (ENU). See also http://qgroundcontrol.org/mavlink/waypoint_protocol. target_system : System ID (uint8_t) target_component : Component ID (uint8_t) seq : Sequence (uint16_t) frame : The coordinate system of the MISSION. see MAV_FRAME in mavlink_types.h (uint8_t) command : The scheduled action for the MISSION. see MAV_CMD in common.xml MAVLink specs (uint16_t) current : false:0, true:1 (uint8_t) autocontinue : autocontinue to next wp (uint8_t) param1 : PARAM1 / For NAV command MISSIONs: Radius in which the MISSION is accepted as reached, in meters (float) param2 : PARAM2 / For NAV command MISSIONs: Time that the MAV should stay inside the PARAM1 radius before advancing, in milliseconds (float) param3 : PARAM3 / For LOITER command MISSIONs: Orbit to circle around the MISSION, in meters. If positive the orbit direction should be clockwise, if negative the orbit direction should be counter-clockwise. (float) param4 : PARAM4 / For NAV and LOITER command MISSIONs: Yaw orientation in degrees, [0..360] 0 = NORTH (float) x : PARAM5 / local: x position, global: latitude (float) y : PARAM6 / y position: global: longitude (float) z : PARAM7 / z position: global: altitude (float) ''' msg = MAVLink_mission_item_message(target_system, target_component, seq, frame, command, current, autocontinue, param1, param2, param3, param4, x, y, z) msg.pack(self) return msg def mission_item_send(self, target_system, target_component, seq, frame, command, current, autocontinue, param1, param2, param3, param4, x, y, z): ''' Message encoding a mission item. This message is emitted to announce the presence of a mission item and to set a mission item on the system. The mission item can be either in x, y, z meters (type: LOCAL) or x:lat, y:lon, z:altitude. Local frame is Z-down, right handed (NED), global frame is Z-up, right handed (ENU). See also http://qgroundcontrol.org/mavlink/waypoint_protocol. target_system : System ID (uint8_t) target_component : Component ID (uint8_t) seq : Sequence (uint16_t) frame : The coordinate system of the MISSION. see MAV_FRAME in mavlink_types.h (uint8_t) command : The scheduled action for the MISSION. see MAV_CMD in common.xml MAVLink specs (uint16_t) current : false:0, true:1 (uint8_t) autocontinue : autocontinue to next wp (uint8_t) param1 : PARAM1 / For NAV command MISSIONs: Radius in which the MISSION is accepted as reached, in meters (float) param2 : PARAM2 / For NAV command MISSIONs: Time that the MAV should stay inside the PARAM1 radius before advancing, in milliseconds (float) param3 : PARAM3 / For LOITER command MISSIONs: Orbit to circle around the MISSION, in meters. If positive the orbit direction should be clockwise, if negative the orbit direction should be counter-clockwise. (float) param4 : PARAM4 / For NAV and LOITER command MISSIONs: Yaw orientation in degrees, [0..360] 0 = NORTH (float) x : PARAM5 / local: x position, global: latitude (float) y : PARAM6 / y position: global: longitude (float) z : PARAM7 / z position: global: altitude (float) ''' return self.send(self.mission_item_encode(target_system, target_component, seq, frame, command, current, autocontinue, param1, param2, param3, param4, x, y, z)) def mission_request_encode(self, target_system, target_component, seq): ''' Request the information of the mission item with the sequence number seq. The response of the system to this message should be a MISSION_ITEM message. http://qgroundcontrol.org/mavlink/waypoint_protocol target_system : System ID (uint8_t) target_component : Component ID (uint8_t) seq : Sequence (uint16_t) ''' msg = MAVLink_mission_request_message(target_system, target_component, seq) msg.pack(self) return msg def mission_request_send(self, target_system, target_component, seq): ''' Request the information of the mission item with the sequence number seq. The response of the system to this message should be a MISSION_ITEM message. http://qgroundcontrol.org/mavlink/waypoint_protocol target_system : System ID (uint8_t) target_component : Component ID (uint8_t) seq : Sequence (uint16_t) ''' return self.send(self.mission_request_encode(target_system, target_component, seq)) def mission_set_current_encode(self, target_system, target_component, seq): ''' Set the mission item with sequence number seq as current item. This means that the MAV will continue to this mission item on the shortest path (not following the mission items in-between). target_system : System ID (uint8_t) target_component : Component ID (uint8_t) seq : Sequence (uint16_t) ''' msg = MAVLink_mission_set_current_message(target_system, target_component, seq) msg.pack(self) return msg def mission_set_current_send(self, target_system, target_component, seq): ''' Set the mission item with sequence number seq as current item. This means that the MAV will continue to this mission item on the shortest path (not following the mission items in-between). target_system : System ID (uint8_t) target_component : Component ID (uint8_t) seq : Sequence (uint16_t) ''' return self.send(self.mission_set_current_encode(target_system, target_component, seq)) def mission_current_encode(self, seq): ''' Message that announces the sequence number of the current active mission item. The MAV will fly towards this mission item. seq : Sequence (uint16_t) ''' msg = MAVLink_mission_current_message(seq) msg.pack(self) return msg def mission_current_send(self, seq): ''' Message that announces the sequence number of the current active mission item. The MAV will fly towards this mission item. seq : Sequence (uint16_t) ''' return self.send(self.mission_current_encode(seq)) def mission_request_list_encode(self, target_system, target_component): ''' Request the overall list of mission items from the system/component. target_system : System ID (uint8_t) target_component : Component ID (uint8_t) ''' msg = MAVLink_mission_request_list_message(target_system, target_component) msg.pack(self) return msg def mission_request_list_send(self, target_system, target_component): ''' Request the overall list of mission items from the system/component. target_system : System ID (uint8_t) target_component : Component ID (uint8_t) ''' return self.send(self.mission_request_list_encode(target_system, target_component)) def mission_count_encode(self, target_system, target_component, count): ''' This message is emitted as response to MISSION_REQUEST_LIST by the MAV and to initiate a write transaction. The GCS can then request the individual mission item based on the knowledge of the total number of MISSIONs. target_system : System ID (uint8_t) target_component : Component ID (uint8_t) count : Number of mission items in the sequence (uint16_t) ''' msg = MAVLink_mission_count_message(target_system, target_component, count) msg.pack(self) return msg def mission_count_send(self, target_system, target_component, count): ''' This message is emitted as response to MISSION_REQUEST_LIST by the MAV and to initiate a write transaction. The GCS can then request the individual mission item based on the knowledge of the total number of MISSIONs. target_system : System ID (uint8_t) target_component : Component ID (uint8_t) count : Number of mission items in the sequence (uint16_t) ''' return self.send(self.mission_count_encode(target_system, target_component, count)) def mission_clear_all_encode(self, target_system, target_component): ''' Delete all mission items at once. target_system : System ID (uint8_t) target_component : Component ID (uint8_t) ''' msg = MAVLink_mission_clear_all_message(target_system, target_component) msg.pack(self) return msg def mission_clear_all_send(self, target_system, target_component): ''' Delete all mission items at once. target_system : System ID (uint8_t) target_component : Component ID (uint8_t) ''' return self.send(self.mission_clear_all_encode(target_system, target_component)) def mission_item_reached_encode(self, seq): ''' A certain mission item has been reached. The system will either hold this position (or circle on the orbit) or (if the autocontinue on the WP was set) continue to the next MISSION. seq : Sequence (uint16_t) ''' msg = MAVLink_mission_item_reached_message(seq) msg.pack(self) return msg def mission_item_reached_send(self, seq): ''' A certain mission item has been reached. The system will either hold this position (or circle on the orbit) or (if the autocontinue on the WP was set) continue to the next MISSION. seq : Sequence (uint16_t) ''' return self.send(self.mission_item_reached_encode(seq)) def mission_ack_encode(self, target_system, target_component, type): ''' Ack message during MISSION handling. The type field states if this message is a positive ack (type=0) or if an error happened (type=non-zero). target_system : System ID (uint8_t) target_component : Component ID (uint8_t) type : See MAV_MISSION_RESULT enum (uint8_t) ''' msg = MAVLink_mission_ack_message(target_system, target_component, type) msg.pack(self) return msg def mission_ack_send(self, target_system, target_component, type): ''' Ack message during MISSION handling. The type field states if this message is a positive ack (type=0) or if an error happened (type=non-zero). target_system : System ID (uint8_t) target_component : Component ID (uint8_t) type : See MAV_MISSION_RESULT enum (uint8_t) ''' return self.send(self.mission_ack_encode(target_system, target_component, type)) def set_gps_global_origin_encode(self, target_system, latitude, longitude, altitude): ''' As local waypoints exist, the global MISSION reference allows to transform between the local coordinate frame and the global (GPS) coordinate frame. This can be necessary when e.g. in- and outdoor settings are connected and the MAV should move from in- to outdoor. target_system : System ID (uint8_t) latitude : global position * 1E7 (int32_t) longitude : global position * 1E7 (int32_t) altitude : global position * 1000 (int32_t) ''' msg = MAVLink_set_gps_global_origin_message(target_system, latitude, longitude, altitude) msg.pack(self) return msg def set_gps_global_origin_send(self, target_system, latitude, longitude, altitude): ''' As local waypoints exist, the global MISSION reference allows to transform between the local coordinate frame and the global (GPS) coordinate frame. This can be necessary when e.g. in- and outdoor settings are connected and the MAV should move from in- to outdoor. target_system : System ID (uint8_t) latitude : global position * 1E7 (int32_t) longitude : global position * 1E7 (int32_t) altitude : global position * 1000 (int32_t) ''' return self.send(self.set_gps_global_origin_encode(target_system, latitude, longitude, altitude)) def gps_global_origin_encode(self, latitude, longitude, altitude): ''' Once the MAV sets a new GPS-Local correspondence, this message announces the origin (0,0,0) position latitude : Latitude (WGS84), expressed as * 1E7 (int32_t) longitude : Longitude (WGS84), expressed as * 1E7 (int32_t) altitude : Altitude(WGS84), expressed as * 1000 (int32_t) ''' msg = MAVLink_gps_global_origin_message(latitude, longitude, altitude) msg.pack(self) return msg def gps_global_origin_send(self, latitude, longitude, altitude): ''' Once the MAV sets a new GPS-Local correspondence, this message announces the origin (0,0,0) position latitude : Latitude (WGS84), expressed as * 1E7 (int32_t) longitude : Longitude (WGS84), expressed as * 1E7 (int32_t) altitude : Altitude(WGS84), expressed as * 1000 (int32_t) ''' return self.send(self.gps_global_origin_encode(latitude, longitude, altitude)) def set_local_position_setpoint_encode(self, target_system, target_component, coordinate_frame, x, y, z, yaw): ''' Set the setpoint for a local position controller. This is the position in local coordinates the MAV should fly to. This message is sent by the path/MISSION planner to the onboard position controller. As some MAVs have a degree of freedom in yaw (e.g. all helicopters/quadrotors), the desired yaw angle is part of the message. target_system : System ID (uint8_t) target_component : Component ID (uint8_t) coordinate_frame : Coordinate frame - valid values are only MAV_FRAME_LOCAL_NED or MAV_FRAME_LOCAL_ENU (uint8_t) x : x position (float) y : y position (float) z : z position (float) yaw : Desired yaw angle (float) ''' msg = MAVLink_set_local_position_setpoint_message(target_system, target_component, coordinate_frame, x, y, z, yaw) msg.pack(self) return msg def set_local_position_setpoint_send(self, target_system, target_component, coordinate_frame, x, y, z, yaw): ''' Set the setpoint for a local position controller. This is the position in local coordinates the MAV should fly to. This message is sent by the path/MISSION planner to the onboard position controller. As some MAVs have a degree of freedom in yaw (e.g. all helicopters/quadrotors), the desired yaw angle is part of the message. target_system : System ID (uint8_t) target_component : Component ID (uint8_t) coordinate_frame : Coordinate frame - valid values are only MAV_FRAME_LOCAL_NED or MAV_FRAME_LOCAL_ENU (uint8_t) x : x position (float) y : y position (float) z : z position (float) yaw : Desired yaw angle (float) ''' return self.send(self.set_local_position_setpoint_encode(target_system, target_component, coordinate_frame, x, y, z, yaw)) def local_position_setpoint_encode(self, coordinate_frame, x, y, z, yaw): ''' Transmit the current local setpoint of the controller to other MAVs (collision avoidance) and to the GCS. coordinate_frame : Coordinate frame - valid values are only MAV_FRAME_LOCAL_NED or MAV_FRAME_LOCAL_ENU (uint8_t) x : x position (float) y : y position (float) z : z position (float) yaw : Desired yaw angle (float) ''' msg = MAVLink_local_position_setpoint_message(coordinate_frame, x, y, z, yaw) msg.pack(self) return msg def local_position_setpoint_send(self, coordinate_frame, x, y, z, yaw): ''' Transmit the current local setpoint of the controller to other MAVs (collision avoidance) and to the GCS. coordinate_frame : Coordinate frame - valid values are only MAV_FRAME_LOCAL_NED or MAV_FRAME_LOCAL_ENU (uint8_t) x : x position (float) y : y position (float) z : z position (float) yaw : Desired yaw angle (float) ''' return self.send(self.local_position_setpoint_encode(coordinate_frame, x, y, z, yaw)) def global_position_setpoint_int_encode(self, coordinate_frame, latitude, longitude, altitude, yaw): ''' Transmit the current local setpoint of the controller to other MAVs (collision avoidance) and to the GCS. coordinate_frame : Coordinate frame - valid values are only MAV_FRAME_GLOBAL or MAV_FRAME_GLOBAL_RELATIVE_ALT (uint8_t) latitude : WGS84 Latitude position in degrees * 1E7 (int32_t) longitude : WGS84 Longitude position in degrees * 1E7 (int32_t) altitude : WGS84 Altitude in meters * 1000 (positive for up) (int32_t) yaw : Desired yaw angle in degrees * 100 (int16_t) ''' msg = MAVLink_global_position_setpoint_int_message(coordinate_frame, latitude, longitude, altitude, yaw) msg.pack(self) return msg def global_position_setpoint_int_send(self, coordinate_frame, latitude, longitude, altitude, yaw): ''' Transmit the current local setpoint of the controller to other MAVs (collision avoidance) and to the GCS. coordinate_frame : Coordinate frame - valid values are only MAV_FRAME_GLOBAL or MAV_FRAME_GLOBAL_RELATIVE_ALT (uint8_t) latitude : WGS84 Latitude position in degrees * 1E7 (int32_t) longitude : WGS84 Longitude position in degrees * 1E7 (int32_t) altitude : WGS84 Altitude in meters * 1000 (positive for up) (int32_t) yaw : Desired yaw angle in degrees * 100 (int16_t) ''' return self.send(self.global_position_setpoint_int_encode(coordinate_frame, latitude, longitude, altitude, yaw)) def set_global_position_setpoint_int_encode(self, coordinate_frame, latitude, longitude, altitude, yaw): ''' Set the current global position setpoint. coordinate_frame : Coordinate frame - valid values are only MAV_FRAME_GLOBAL or MAV_FRAME_GLOBAL_RELATIVE_ALT (uint8_t) latitude : WGS84 Latitude position in degrees * 1E7 (int32_t) longitude : WGS84 Longitude position in degrees * 1E7 (int32_t) altitude : WGS84 Altitude in meters * 1000 (positive for up) (int32_t) yaw : Desired yaw angle in degrees * 100 (int16_t) ''' msg = MAVLink_set_global_position_setpoint_int_message(coordinate_frame, latitude, longitude, altitude, yaw) msg.pack(self) return msg def set_global_position_setpoint_int_send(self, coordinate_frame, latitude, longitude, altitude, yaw): ''' Set the current global position setpoint. coordinate_frame : Coordinate frame - valid values are only MAV_FRAME_GLOBAL or MAV_FRAME_GLOBAL_RELATIVE_ALT (uint8_t) latitude : WGS84 Latitude position in degrees * 1E7 (int32_t) longitude : WGS84 Longitude position in degrees * 1E7 (int32_t) altitude : WGS84 Altitude in meters * 1000 (positive for up) (int32_t) yaw : Desired yaw angle in degrees * 100 (int16_t) ''' return self.send(self.set_global_position_setpoint_int_encode(coordinate_frame, latitude, longitude, altitude, yaw)) def safety_set_allowed_area_encode(self, target_system, target_component, frame, p1x, p1y, p1z, p2x, p2y, p2z): ''' Set a safety zone (volume), which is defined by two corners of a cube. This message can be used to tell the MAV which setpoints/MISSIONs to accept and which to reject. Safety areas are often enforced by national or competition regulations. target_system : System ID (uint8_t) target_component : Component ID (uint8_t) frame : Coordinate frame, as defined by MAV_FRAME enum in mavlink_types.h. Can be either global, GPS, right-handed with Z axis up or local, right handed, Z axis down. (uint8_t) p1x : x position 1 / Latitude 1 (float) p1y : y position 1 / Longitude 1 (float) p1z : z position 1 / Altitude 1 (float) p2x : x position 2 / Latitude 2 (float) p2y : y position 2 / Longitude 2 (float) p2z : z position 2 / Altitude 2 (float) ''' msg = MAVLink_safety_set_allowed_area_message(target_system, target_component, frame, p1x, p1y, p1z, p2x, p2y, p2z) msg.pack(self) return msg def safety_set_allowed_area_send(self, target_system, target_component, frame, p1x, p1y, p1z, p2x, p2y, p2z): ''' Set a safety zone (volume), which is defined by two corners of a cube. This message can be used to tell the MAV which setpoints/MISSIONs to accept and which to reject. Safety areas are often enforced by national or competition regulations. target_system : System ID (uint8_t) target_component : Component ID (uint8_t) frame : Coordinate frame, as defined by MAV_FRAME enum in mavlink_types.h. Can be either global, GPS, right-handed with Z axis up or local, right handed, Z axis down. (uint8_t) p1x : x position 1 / Latitude 1 (float) p1y : y position 1 / Longitude 1 (float) p1z : z position 1 / Altitude 1 (float) p2x : x position 2 / Latitude 2 (float) p2y : y position 2 / Longitude 2 (float) p2z : z position 2 / Altitude 2 (float) ''' return self.send(self.safety_set_allowed_area_encode(target_system, target_component, frame, p1x, p1y, p1z, p2x, p2y, p2z)) def safety_allowed_area_encode(self, frame, p1x, p1y, p1z, p2x, p2y, p2z): ''' Read out the safety zone the MAV currently assumes. frame : Coordinate frame, as defined by MAV_FRAME enum in mavlink_types.h. Can be either global, GPS, right-handed with Z axis up or local, right handed, Z axis down. (uint8_t) p1x : x position 1 / Latitude 1 (float) p1y : y position 1 / Longitude 1 (float) p1z : z position 1 / Altitude 1 (float) p2x : x position 2 / Latitude 2 (float) p2y : y position 2 / Longitude 2 (float) p2z : z position 2 / Altitude 2 (float) ''' msg = MAVLink_safety_allowed_area_message(frame, p1x, p1y, p1z, p2x, p2y, p2z) msg.pack(self) return msg def safety_allowed_area_send(self, frame, p1x, p1y, p1z, p2x, p2y, p2z): ''' Read out the safety zone the MAV currently assumes. frame : Coordinate frame, as defined by MAV_FRAME enum in mavlink_types.h. Can be either global, GPS, right-handed with Z axis up or local, right handed, Z axis down. (uint8_t) p1x : x position 1 / Latitude 1 (float) p1y : y position 1 / Longitude 1 (float) p1z : z position 1 / Altitude 1 (float) p2x : x position 2 / Latitude 2 (float) p2y : y position 2 / Longitude 2 (float) p2z : z position 2 / Altitude 2 (float) ''' return self.send(self.safety_allowed_area_encode(frame, p1x, p1y, p1z, p2x, p2y, p2z)) def set_roll_pitch_yaw_thrust_encode(self, target_system, target_component, roll, pitch, yaw, thrust): ''' Set roll, pitch and yaw. target_system : System ID (uint8_t) target_component : Component ID (uint8_t) roll : Desired roll angle in radians (float) pitch : Desired pitch angle in radians (float) yaw : Desired yaw angle in radians (float) thrust : Collective thrust, normalized to 0 .. 1 (float) ''' msg = MAVLink_set_roll_pitch_yaw_thrust_message(target_system, target_component, roll, pitch, yaw, thrust) msg.pack(self) return msg def set_roll_pitch_yaw_thrust_send(self, target_system, target_component, roll, pitch, yaw, thrust): ''' Set roll, pitch and yaw. target_system : System ID (uint8_t) target_component : Component ID (uint8_t) roll : Desired roll angle in radians (float) pitch : Desired pitch angle in radians (float) yaw : Desired yaw angle in radians (float) thrust : Collective thrust, normalized to 0 .. 1 (float) ''' return self.send(self.set_roll_pitch_yaw_thrust_encode(target_system, target_component, roll, pitch, yaw, thrust)) def set_roll_pitch_yaw_speed_thrust_encode(self, target_system, target_component, roll_speed, pitch_speed, yaw_speed, thrust): ''' Set roll, pitch and yaw. target_system : System ID (uint8_t) target_component : Component ID (uint8_t) roll_speed : Desired roll angular speed in rad/s (float) pitch_speed : Desired pitch angular speed in rad/s (float) yaw_speed : Desired yaw angular speed in rad/s (float) thrust : Collective thrust, normalized to 0 .. 1 (float) ''' msg = MAVLink_set_roll_pitch_yaw_speed_thrust_message(target_system, target_component, roll_speed, pitch_speed, yaw_speed, thrust) msg.pack(self) return msg def set_roll_pitch_yaw_speed_thrust_send(self, target_system, target_component, roll_speed, pitch_speed, yaw_speed, thrust): ''' Set roll, pitch and yaw. target_system : System ID (uint8_t) target_component : Component ID (uint8_t) roll_speed : Desired roll angular speed in rad/s (float) pitch_speed : Desired pitch angular speed in rad/s (float) yaw_speed : Desired yaw angular speed in rad/s (float) thrust : Collective thrust, normalized to 0 .. 1 (float) ''' return self.send(self.set_roll_pitch_yaw_speed_thrust_encode(target_system, target_component, roll_speed, pitch_speed, yaw_speed, thrust)) def roll_pitch_yaw_thrust_setpoint_encode(self, time_boot_ms, roll, pitch, yaw, thrust): ''' Setpoint in roll, pitch, yaw currently active on the system. time_boot_ms : Timestamp in milliseconds since system boot (uint32_t) roll : Desired roll angle in radians (float) pitch : Desired pitch angle in radians (float) yaw : Desired yaw angle in radians (float) thrust : Collective thrust, normalized to 0 .. 1 (float) ''' msg = MAVLink_roll_pitch_yaw_thrust_setpoint_message(time_boot_ms, roll, pitch, yaw, thrust) msg.pack(self) return msg def roll_pitch_yaw_thrust_setpoint_send(self, time_boot_ms, roll, pitch, yaw, thrust): ''' Setpoint in roll, pitch, yaw currently active on the system. time_boot_ms : Timestamp in milliseconds since system boot (uint32_t) roll : Desired roll angle in radians (float) pitch : Desired pitch angle in radians (float) yaw : Desired yaw angle in radians (float) thrust : Collective thrust, normalized to 0 .. 1 (float) ''' return self.send(self.roll_pitch_yaw_thrust_setpoint_encode(time_boot_ms, roll, pitch, yaw, thrust)) def roll_pitch_yaw_speed_thrust_setpoint_encode(self, time_boot_ms, roll_speed, pitch_speed, yaw_speed, thrust): ''' Setpoint in rollspeed, pitchspeed, yawspeed currently active on the system. time_boot_ms : Timestamp in milliseconds since system boot (uint32_t) roll_speed : Desired roll angular speed in rad/s (float) pitch_speed : Desired pitch angular speed in rad/s (float) yaw_speed : Desired yaw angular speed in rad/s (float) thrust : Collective thrust, normalized to 0 .. 1 (float) ''' msg = MAVLink_roll_pitch_yaw_speed_thrust_setpoint_message(time_boot_ms, roll_speed, pitch_speed, yaw_speed, thrust) msg.pack(self) return msg def roll_pitch_yaw_speed_thrust_setpoint_send(self, time_boot_ms, roll_speed, pitch_speed, yaw_speed, thrust): ''' Setpoint in rollspeed, pitchspeed, yawspeed currently active on the system. time_boot_ms : Timestamp in milliseconds since system boot (uint32_t) roll_speed : Desired roll angular speed in rad/s (float) pitch_speed : Desired pitch angular speed in rad/s (float) yaw_speed : Desired yaw angular speed in rad/s (float) thrust : Collective thrust, normalized to 0 .. 1 (float) ''' return self.send(self.roll_pitch_yaw_speed_thrust_setpoint_encode(time_boot_ms, roll_speed, pitch_speed, yaw_speed, thrust)) def set_quad_motors_setpoint_encode(self, target_system, motor_front_nw, motor_right_ne, motor_back_se, motor_left_sw): ''' Setpoint in the four motor speeds target_system : System ID of the system that should set these motor commands (uint8_t) motor_front_nw : Front motor in + configuration, front left motor in x configuration (uint16_t) motor_right_ne : Right motor in + configuration, front right motor in x configuration (uint16_t) motor_back_se : Back motor in + configuration, back right motor in x configuration (uint16_t) motor_left_sw : Left motor in + configuration, back left motor in x configuration (uint16_t) ''' msg = MAVLink_set_quad_motors_setpoint_message(target_system, motor_front_nw, motor_right_ne, motor_back_se, motor_left_sw) msg.pack(self) return msg def set_quad_motors_setpoint_send(self, target_system, motor_front_nw, motor_right_ne, motor_back_se, motor_left_sw): ''' Setpoint in the four motor speeds target_system : System ID of the system that should set these motor commands (uint8_t) motor_front_nw : Front motor in + configuration, front left motor in x configuration (uint16_t) motor_right_ne : Right motor in + configuration, front right motor in x configuration (uint16_t) motor_back_se : Back motor in + configuration, back right motor in x configuration (uint16_t) motor_left_sw : Left motor in + configuration, back left motor in x configuration (uint16_t) ''' return self.send(self.set_quad_motors_setpoint_encode(target_system, motor_front_nw, motor_right_ne, motor_back_se, motor_left_sw)) def set_quad_swarm_roll_pitch_yaw_thrust_encode(self, group, mode, roll, pitch, yaw, thrust): ''' Setpoint for up to four quadrotors in a group / wing group : ID of the quadrotor group (0 - 255, up to 256 groups supported) (uint8_t) mode : ID of the flight mode (0 - 255, up to 256 modes supported) (uint8_t) roll : Desired roll angle in radians +-PI (+-32767) (int16_t) pitch : Desired pitch angle in radians +-PI (+-32767) (int16_t) yaw : Desired yaw angle in radians, scaled to int16 +-PI (+-32767) (int16_t) thrust : Collective thrust, scaled to uint16 (0..65535) (uint16_t) ''' msg = MAVLink_set_quad_swarm_roll_pitch_yaw_thrust_message(group, mode, roll, pitch, yaw, thrust) msg.pack(self) return msg def set_quad_swarm_roll_pitch_yaw_thrust_send(self, group, mode, roll, pitch, yaw, thrust): ''' Setpoint for up to four quadrotors in a group / wing group : ID of the quadrotor group (0 - 255, up to 256 groups supported) (uint8_t) mode : ID of the flight mode (0 - 255, up to 256 modes supported) (uint8_t) roll : Desired roll angle in radians +-PI (+-32767) (int16_t) pitch : Desired pitch angle in radians +-PI (+-32767) (int16_t) yaw : Desired yaw angle in radians, scaled to int16 +-PI (+-32767) (int16_t) thrust : Collective thrust, scaled to uint16 (0..65535) (uint16_t) ''' return self.send(self.set_quad_swarm_roll_pitch_yaw_thrust_encode(group, mode, roll, pitch, yaw, thrust)) def nav_controller_output_encode(self, nav_roll, nav_pitch, nav_bearing, target_bearing, wp_dist, alt_error, aspd_error, xtrack_error): ''' Outputs of the APM navigation controller. The primary use of this message is to check the response and signs of the controller before actual flight and to assist with tuning controller parameters. nav_roll : Current desired roll in degrees (float) nav_pitch : Current desired pitch in degrees (float) nav_bearing : Current desired heading in degrees (int16_t) target_bearing : Bearing to current MISSION/target in degrees (int16_t) wp_dist : Distance to active MISSION in meters (uint16_t) alt_error : Current altitude error in meters (float) aspd_error : Current airspeed error in meters/second (float) xtrack_error : Current crosstrack error on x-y plane in meters (float) ''' msg = MAVLink_nav_controller_output_message(nav_roll, nav_pitch, nav_bearing, target_bearing, wp_dist, alt_error, aspd_error, xtrack_error) msg.pack(self) return msg def nav_controller_output_send(self, nav_roll, nav_pitch, nav_bearing, target_bearing, wp_dist, alt_error, aspd_error, xtrack_error): ''' Outputs of the APM navigation controller. The primary use of this message is to check the response and signs of the controller before actual flight and to assist with tuning controller parameters. nav_roll : Current desired roll in degrees (float) nav_pitch : Current desired pitch in degrees (float) nav_bearing : Current desired heading in degrees (int16_t) target_bearing : Bearing to current MISSION/target in degrees (int16_t) wp_dist : Distance to active MISSION in meters (uint16_t) alt_error : Current altitude error in meters (float) aspd_error : Current airspeed error in meters/second (float) xtrack_error : Current crosstrack error on x-y plane in meters (float) ''' return self.send(self.nav_controller_output_encode(nav_roll, nav_pitch, nav_bearing, target_bearing, wp_dist, alt_error, aspd_error, xtrack_error)) def set_quad_swarm_led_roll_pitch_yaw_thrust_encode(self, group, mode, led_red, led_blue, led_green, roll, pitch, yaw, thrust): ''' Setpoint for up to four quadrotors in a group / wing group : ID of the quadrotor group (0 - 255, up to 256 groups supported) (uint8_t) mode : ID of the flight mode (0 - 255, up to 256 modes supported) (uint8_t) led_red : RGB red channel (0-255) (uint8_t) led_blue : RGB green channel (0-255) (uint8_t) led_green : RGB blue channel (0-255) (uint8_t) roll : Desired roll angle in radians +-PI (+-32767) (int16_t) pitch : Desired pitch angle in radians +-PI (+-32767) (int16_t) yaw : Desired yaw angle in radians, scaled to int16 +-PI (+-32767) (int16_t) thrust : Collective thrust, scaled to uint16 (0..65535) (uint16_t) ''' msg = MAVLink_set_quad_swarm_led_roll_pitch_yaw_thrust_message(group, mode, led_red, led_blue, led_green, roll, pitch, yaw, thrust) msg.pack(self) return msg def set_quad_swarm_led_roll_pitch_yaw_thrust_send(self, group, mode, led_red, led_blue, led_green, roll, pitch, yaw, thrust): ''' Setpoint for up to four quadrotors in a group / wing group : ID of the quadrotor group (0 - 255, up to 256 groups supported) (uint8_t) mode : ID of the flight mode (0 - 255, up to 256 modes supported) (uint8_t) led_red : RGB red channel (0-255) (uint8_t) led_blue : RGB green channel (0-255) (uint8_t) led_green : RGB blue channel (0-255) (uint8_t) roll : Desired roll angle in radians +-PI (+-32767) (int16_t) pitch : Desired pitch angle in radians +-PI (+-32767) (int16_t) yaw : Desired yaw angle in radians, scaled to int16 +-PI (+-32767) (int16_t) thrust : Collective thrust, scaled to uint16 (0..65535) (uint16_t) ''' return self.send(self.set_quad_swarm_led_roll_pitch_yaw_thrust_encode(group, mode, led_red, led_blue, led_green, roll, pitch, yaw, thrust)) def state_correction_encode(self, xErr, yErr, zErr, rollErr, pitchErr, yawErr, vxErr, vyErr, vzErr): ''' Corrects the systems state by adding an error correction term to the position and velocity, and by rotating the attitude by a correction angle. xErr : x position error (float) yErr : y position error (float) zErr : z position error (float) rollErr : roll error (radians) (float) pitchErr : pitch error (radians) (float) yawErr : yaw error (radians) (float) vxErr : x velocity (float) vyErr : y velocity (float) vzErr : z velocity (float) ''' msg = MAVLink_state_correction_message(xErr, yErr, zErr, rollErr, pitchErr, yawErr, vxErr, vyErr, vzErr) msg.pack(self) return msg def state_correction_send(self, xErr, yErr, zErr, rollErr, pitchErr, yawErr, vxErr, vyErr, vzErr): ''' Corrects the systems state by adding an error correction term to the position and velocity, and by rotating the attitude by a correction angle. xErr : x position error (float) yErr : y position error (float) zErr : z position error (float) rollErr : roll error (radians) (float) pitchErr : pitch error (radians) (float) yawErr : yaw error (radians) (float) vxErr : x velocity (float) vyErr : y velocity (float) vzErr : z velocity (float) ''' return self.send(self.state_correction_encode(xErr, yErr, zErr, rollErr, pitchErr, yawErr, vxErr, vyErr, vzErr)) def request_data_stream_encode(self, target_system, target_component, req_stream_id, req_message_rate, start_stop): ''' target_system : The target requested to send the message stream. (uint8_t) target_component : The target requested to send the message stream. (uint8_t) req_stream_id : The ID of the requested data stream (uint8_t) req_message_rate : The requested interval between two messages of this type (uint16_t) start_stop : 1 to start sending, 0 to stop sending. (uint8_t) ''' msg = MAVLink_request_data_stream_message(target_system, target_component, req_stream_id, req_message_rate, start_stop) msg.pack(self) return msg def request_data_stream_send(self, target_system, target_component, req_stream_id, req_message_rate, start_stop): ''' target_system : The target requested to send the message stream. (uint8_t) target_component : The target requested to send the message stream. (uint8_t) req_stream_id : The ID of the requested data stream (uint8_t) req_message_rate : The requested interval between two messages of this type (uint16_t) start_stop : 1 to start sending, 0 to stop sending. (uint8_t) ''' return self.send(self.request_data_stream_encode(target_system, target_component, req_stream_id, req_message_rate, start_stop)) def data_stream_encode(self, stream_id, message_rate, on_off): ''' stream_id : The ID of the requested data stream (uint8_t) message_rate : The requested interval between two messages of this type (uint16_t) on_off : 1 stream is enabled, 0 stream is stopped. (uint8_t) ''' msg = MAVLink_data_stream_message(stream_id, message_rate, on_off) msg.pack(self) return msg def data_stream_send(self, stream_id, message_rate, on_off): ''' stream_id : The ID of the requested data stream (uint8_t) message_rate : The requested interval between two messages of this type (uint16_t) on_off : 1 stream is enabled, 0 stream is stopped. (uint8_t) ''' return self.send(self.data_stream_encode(stream_id, message_rate, on_off)) def manual_control_encode(self, target, x, y, z, r, buttons): ''' This message provides an API for manually controlling the vehicle using standard joystick axes nomenclature, along with a joystick-like input device. Unused axes can be disabled an buttons are also transmit as boolean values of their target : The system to be controlled. (uint8_t) x : X-axis, normalized to the range [-1000,1000]. A value of INT16_MAX indicates that this axis is invalid. Generally corresponds to forward(1000)-backward(-1000) movement on a joystick and the pitch of a vehicle. (int16_t) y : Y-axis, normalized to the range [-1000,1000]. A value of INT16_MAX indicates that this axis is invalid. Generally corresponds to left(-1000)-right(1000) movement on a joystick and the roll of a vehicle. (int16_t) z : Z-axis, normalized to the range [-1000,1000]. A value of INT16_MAX indicates that this axis is invalid. Generally corresponds to a separate slider movement with maximum being 1000 and minimum being -1000 on a joystick and the thrust of a vehicle. (int16_t) r : R-axis, normalized to the range [-1000,1000]. A value of INT16_MAX indicates that this axis is invalid. Generally corresponds to a twisting of the joystick, with counter-clockwise being 1000 and clockwise being -1000, and the yaw of a vehicle. (int16_t) buttons : A bitfield corresponding to the joystick buttons' current state, 1 for pressed, 0 for released. The lowest bit corresponds to Button 1. (uint16_t) ''' msg = MAVLink_manual_control_message(target, x, y, z, r, buttons) msg.pack(self) return msg def manual_control_send(self, target, x, y, z, r, buttons): ''' This message provides an API for manually controlling the vehicle using standard joystick axes nomenclature, along with a joystick-like input device. Unused axes can be disabled an buttons are also transmit as boolean values of their target : The system to be controlled. (uint8_t) x : X-axis, normalized to the range [-1000,1000]. A value of INT16_MAX indicates that this axis is invalid. Generally corresponds to forward(1000)-backward(-1000) movement on a joystick and the pitch of a vehicle. (int16_t) y : Y-axis, normalized to the range [-1000,1000]. A value of INT16_MAX indicates that this axis is invalid. Generally corresponds to left(-1000)-right(1000) movement on a joystick and the roll of a vehicle. (int16_t) z : Z-axis, normalized to the range [-1000,1000]. A value of INT16_MAX indicates that this axis is invalid. Generally corresponds to a separate slider movement with maximum being 1000 and minimum being -1000 on a joystick and the thrust of a vehicle. (int16_t) r : R-axis, normalized to the range [-1000,1000]. A value of INT16_MAX indicates that this axis is invalid. Generally corresponds to a twisting of the joystick, with counter-clockwise being 1000 and clockwise being -1000, and the yaw of a vehicle. (int16_t) buttons : A bitfield corresponding to the joystick buttons' current state, 1 for pressed, 0 for released. The lowest bit corresponds to Button 1. (uint16_t) ''' return self.send(self.manual_control_encode(target, x, y, z, r, buttons)) def rc_channels_override_encode(self, target_system, target_component, chan1_raw, chan2_raw, chan3_raw, chan4_raw, chan5_raw, chan6_raw, chan7_raw, chan8_raw): ''' The RAW values of the RC channels sent to the MAV to override info received from the RC radio. A value of -1 means no change to that channel. A value of 0 means control of that channel should be released back to the RC radio. The standard PPM modulation is as follows: 1000 microseconds: 0%, 2000 microseconds: 100%. Individual receivers/transmitters might violate this specification. target_system : System ID (uint8_t) target_component : Component ID (uint8_t) chan1_raw : RC channel 1 value, in microseconds (uint16_t) chan2_raw : RC channel 2 value, in microseconds (uint16_t) chan3_raw : RC channel 3 value, in microseconds (uint16_t) chan4_raw : RC channel 4 value, in microseconds (uint16_t) chan5_raw : RC channel 5 value, in microseconds (uint16_t) chan6_raw : RC channel 6 value, in microseconds (uint16_t) chan7_raw : RC channel 7 value, in microseconds (uint16_t) chan8_raw : RC channel 8 value, in microseconds (uint16_t) ''' msg = MAVLink_rc_channels_override_message(target_system, target_component, chan1_raw, chan2_raw, chan3_raw, chan4_raw, chan5_raw, chan6_raw, chan7_raw, chan8_raw) msg.pack(self) return msg def rc_channels_override_send(self, target_system, target_component, chan1_raw, chan2_raw, chan3_raw, chan4_raw, chan5_raw, chan6_raw, chan7_raw, chan8_raw): ''' The RAW values of the RC channels sent to the MAV to override info received from the RC radio. A value of -1 means no change to that channel. A value of 0 means control of that channel should be released back to the RC radio. The standard PPM modulation is as follows: 1000 microseconds: 0%, 2000 microseconds: 100%. Individual receivers/transmitters might violate this specification. target_system : System ID (uint8_t) target_component : Component ID (uint8_t) chan1_raw : RC channel 1 value, in microseconds (uint16_t) chan2_raw : RC channel 2 value, in microseconds (uint16_t) chan3_raw : RC channel 3 value, in microseconds (uint16_t) chan4_raw : RC channel 4 value, in microseconds (uint16_t) chan5_raw : RC channel 5 value, in microseconds (uint16_t) chan6_raw : RC channel 6 value, in microseconds (uint16_t) chan7_raw : RC channel 7 value, in microseconds (uint16_t) chan8_raw : RC channel 8 value, in microseconds (uint16_t) ''' return self.send(self.rc_channels_override_encode(target_system, target_component, chan1_raw, chan2_raw, chan3_raw, chan4_raw, chan5_raw, chan6_raw, chan7_raw, chan8_raw)) def vfr_hud_encode(self, airspeed, groundspeed, heading, throttle, alt, climb): ''' Metrics typically displayed on a HUD for fixed wing aircraft airspeed : Current airspeed in m/s (float) groundspeed : Current ground speed in m/s (float) heading : Current heading in degrees, in compass units (0..360, 0=north) (int16_t) throttle : Current throttle setting in integer percent, 0 to 100 (uint16_t) alt : Current altitude (MSL), in meters (float) climb : Current climb rate in meters/second (float) ''' msg = MAVLink_vfr_hud_message(airspeed, groundspeed, heading, throttle, alt, climb) msg.pack(self) return msg def vfr_hud_send(self, airspeed, groundspeed, heading, throttle, alt, climb): ''' Metrics typically displayed on a HUD for fixed wing aircraft airspeed : Current airspeed in m/s (float) groundspeed : Current ground speed in m/s (float) heading : Current heading in degrees, in compass units (0..360, 0=north) (int16_t) throttle : Current throttle setting in integer percent, 0 to 100 (uint16_t) alt : Current altitude (MSL), in meters (float) climb : Current climb rate in meters/second (float) ''' return self.send(self.vfr_hud_encode(airspeed, groundspeed, heading, throttle, alt, climb)) def command_long_encode(self, target_system, target_component, command, confirmation, param1, param2, param3, param4, param5, param6, param7): ''' Send a command with up to four parameters to the MAV target_system : System which should execute the command (uint8_t) target_component : Component which should execute the command, 0 for all components (uint8_t) command : Command ID, as defined by MAV_CMD enum. (uint16_t) confirmation : 0: First transmission of this command. 1-255: Confirmation transmissions (e.g. for kill command) (uint8_t) param1 : Parameter 1, as defined by MAV_CMD enum. (float) param2 : Parameter 2, as defined by MAV_CMD enum. (float) param3 : Parameter 3, as defined by MAV_CMD enum. (float) param4 : Parameter 4, as defined by MAV_CMD enum. (float) param5 : Parameter 5, as defined by MAV_CMD enum. (float) param6 : Parameter 6, as defined by MAV_CMD enum. (float) param7 : Parameter 7, as defined by MAV_CMD enum. (float) ''' msg = MAVLink_command_long_message(target_system, target_component, command, confirmation, param1, param2, param3, param4, param5, param6, param7) msg.pack(self) return msg def command_long_send(self, target_system, target_component, command, confirmation, param1, param2, param3, param4, param5, param6, param7): ''' Send a command with up to four parameters to the MAV target_system : System which should execute the command (uint8_t) target_component : Component which should execute the command, 0 for all components (uint8_t) command : Command ID, as defined by MAV_CMD enum. (uint16_t) confirmation : 0: First transmission of this command. 1-255: Confirmation transmissions (e.g. for kill command) (uint8_t) param1 : Parameter 1, as defined by MAV_CMD enum. (float) param2 : Parameter 2, as defined by MAV_CMD enum. (float) param3 : Parameter 3, as defined by MAV_CMD enum. (float) param4 : Parameter 4, as defined by MAV_CMD enum. (float) param5 : Parameter 5, as defined by MAV_CMD enum. (float) param6 : Parameter 6, as defined by MAV_CMD enum. (float) param7 : Parameter 7, as defined by MAV_CMD enum. (float) ''' return self.send(self.command_long_encode(target_system, target_component, command, confirmation, param1, param2, param3, param4, param5, param6, param7)) def command_ack_encode(self, command, result): ''' Report status of a command. Includes feedback wether the command was executed. command : Command ID, as defined by MAV_CMD enum. (uint16_t) result : See MAV_RESULT enum (uint8_t) ''' msg = MAVLink_command_ack_message(command, result) msg.pack(self) return msg def command_ack_send(self, command, result): ''' Report status of a command. Includes feedback wether the command was executed. command : Command ID, as defined by MAV_CMD enum. (uint16_t) result : See MAV_RESULT enum (uint8_t) ''' return self.send(self.command_ack_encode(command, result)) def roll_pitch_yaw_rates_thrust_setpoint_encode(self, time_boot_ms, roll_rate, pitch_rate, yaw_rate, thrust): ''' Setpoint in roll, pitch, yaw rates and thrust currently active on the system. time_boot_ms : Timestamp in milliseconds since system boot (uint32_t) roll_rate : Desired roll rate in radians per second (float) pitch_rate : Desired pitch rate in radians per second (float) yaw_rate : Desired yaw rate in radians per second (float) thrust : Collective thrust, normalized to 0 .. 1 (float) ''' msg = MAVLink_roll_pitch_yaw_rates_thrust_setpoint_message(time_boot_ms, roll_rate, pitch_rate, yaw_rate, thrust) msg.pack(self) return msg def roll_pitch_yaw_rates_thrust_setpoint_send(self, time_boot_ms, roll_rate, pitch_rate, yaw_rate, thrust): ''' Setpoint in roll, pitch, yaw rates and thrust currently active on the system. time_boot_ms : Timestamp in milliseconds since system boot (uint32_t) roll_rate : Desired roll rate in radians per second (float) pitch_rate : Desired pitch rate in radians per second (float) yaw_rate : Desired yaw rate in radians per second (float) thrust : Collective thrust, normalized to 0 .. 1 (float) ''' return self.send(self.roll_pitch_yaw_rates_thrust_setpoint_encode(time_boot_ms, roll_rate, pitch_rate, yaw_rate, thrust)) def manual_setpoint_encode(self, time_boot_ms, roll, pitch, yaw, thrust, mode_switch, manual_override_switch): ''' Setpoint in roll, pitch, yaw and thrust from the operator time_boot_ms : Timestamp in milliseconds since system boot (uint32_t) roll : Desired roll rate in radians per second (float) pitch : Desired pitch rate in radians per second (float) yaw : Desired yaw rate in radians per second (float) thrust : Collective thrust, normalized to 0 .. 1 (float) mode_switch : Flight mode switch position, 0.. 255 (uint8_t) manual_override_switch : Override mode switch position, 0.. 255 (uint8_t) ''' msg = MAVLink_manual_setpoint_message(time_boot_ms, roll, pitch, yaw, thrust, mode_switch, manual_override_switch) msg.pack(self) return msg def manual_setpoint_send(self, time_boot_ms, roll, pitch, yaw, thrust, mode_switch, manual_override_switch): ''' Setpoint in roll, pitch, yaw and thrust from the operator time_boot_ms : Timestamp in milliseconds since system boot (uint32_t) roll : Desired roll rate in radians per second (float) pitch : Desired pitch rate in radians per second (float) yaw : Desired yaw rate in radians per second (float) thrust : Collective thrust, normalized to 0 .. 1 (float) mode_switch : Flight mode switch position, 0.. 255 (uint8_t) manual_override_switch : Override mode switch position, 0.. 255 (uint8_t) ''' return self.send(self.manual_setpoint_encode(time_boot_ms, roll, pitch, yaw, thrust, mode_switch, manual_override_switch)) def local_position_ned_system_global_offset_encode(self, time_boot_ms, x, y, z, roll, pitch, yaw): ''' The offset in X, Y, Z and yaw between the LOCAL_POSITION_NED messages of MAV X and the global coordinate frame in NED coordinates. Coordinate frame is right-handed, Z-axis down (aeronautical frame, NED / north-east-down convention) time_boot_ms : Timestamp (milliseconds since system boot) (uint32_t) x : X Position (float) y : Y Position (float) z : Z Position (float) roll : Roll (float) pitch : Pitch (float) yaw : Yaw (float) ''' msg = MAVLink_local_position_ned_system_global_offset_message(time_boot_ms, x, y, z, roll, pitch, yaw) msg.pack(self) return msg def local_position_ned_system_global_offset_send(self, time_boot_ms, x, y, z, roll, pitch, yaw): ''' The offset in X, Y, Z and yaw between the LOCAL_POSITION_NED messages of MAV X and the global coordinate frame in NED coordinates. Coordinate frame is right-handed, Z-axis down (aeronautical frame, NED / north-east-down convention) time_boot_ms : Timestamp (milliseconds since system boot) (uint32_t) x : X Position (float) y : Y Position (float) z : Z Position (float) roll : Roll (float) pitch : Pitch (float) yaw : Yaw (float) ''' return self.send(self.local_position_ned_system_global_offset_encode(time_boot_ms, x, y, z, roll, pitch, yaw)) def hil_state_encode(self, time_usec, roll, pitch, yaw, rollspeed, pitchspeed, yawspeed, lat, lon, alt, vx, vy, vz, xacc, yacc, zacc): ''' Sent from simulation to autopilot. This packet is useful for high throughput applications such as hardware in the loop simulations. time_usec : Timestamp (microseconds since UNIX epoch or microseconds since system boot) (uint64_t) roll : Roll angle (rad) (float) pitch : Pitch angle (rad) (float) yaw : Yaw angle (rad) (float) rollspeed : Roll angular speed (rad/s) (float) pitchspeed : Pitch angular speed (rad/s) (float) yawspeed : Yaw angular speed (rad/s) (float) lat : Latitude, expressed as * 1E7 (int32_t) lon : Longitude, expressed as * 1E7 (int32_t) alt : Altitude in meters, expressed as * 1000 (millimeters) (int32_t) vx : Ground X Speed (Latitude), expressed as m/s * 100 (int16_t) vy : Ground Y Speed (Longitude), expressed as m/s * 100 (int16_t) vz : Ground Z Speed (Altitude), expressed as m/s * 100 (int16_t) xacc : X acceleration (mg) (int16_t) yacc : Y acceleration (mg) (int16_t) zacc : Z acceleration (mg) (int16_t) ''' msg = MAVLink_hil_state_message(time_usec, roll, pitch, yaw, rollspeed, pitchspeed, yawspeed, lat, lon, alt, vx, vy, vz, xacc, yacc, zacc) msg.pack(self) return msg def hil_state_send(self, time_usec, roll, pitch, yaw, rollspeed, pitchspeed, yawspeed, lat, lon, alt, vx, vy, vz, xacc, yacc, zacc): ''' Sent from simulation to autopilot. This packet is useful for high throughput applications such as hardware in the loop simulations. time_usec : Timestamp (microseconds since UNIX epoch or microseconds since system boot) (uint64_t) roll : Roll angle (rad) (float) pitch : Pitch angle (rad) (float) yaw : Yaw angle (rad) (float) rollspeed : Roll angular speed (rad/s) (float) pitchspeed : Pitch angular speed (rad/s) (float) yawspeed : Yaw angular speed (rad/s) (float) lat : Latitude, expressed as * 1E7 (int32_t) lon : Longitude, expressed as * 1E7 (int32_t) alt : Altitude in meters, expressed as * 1000 (millimeters) (int32_t) vx : Ground X Speed (Latitude), expressed as m/s * 100 (int16_t) vy : Ground Y Speed (Longitude), expressed as m/s * 100 (int16_t) vz : Ground Z Speed (Altitude), expressed as m/s * 100 (int16_t) xacc : X acceleration (mg) (int16_t) yacc : Y acceleration (mg) (int16_t) zacc : Z acceleration (mg) (int16_t) ''' return self.send(self.hil_state_encode(time_usec, roll, pitch, yaw, rollspeed, pitchspeed, yawspeed, lat, lon, alt, vx, vy, vz, xacc, yacc, zacc)) def hil_controls_encode(self, time_usec, roll_ailerons, pitch_elevator, yaw_rudder, throttle, aux1, aux2, aux3, aux4, mode, nav_mode): ''' Sent from autopilot to simulation. Hardware in the loop control outputs time_usec : Timestamp (microseconds since UNIX epoch or microseconds since system boot) (uint64_t) roll_ailerons : Control output -1 .. 1 (float) pitch_elevator : Control output -1 .. 1 (float) yaw_rudder : Control output -1 .. 1 (float) throttle : Throttle 0 .. 1 (float) aux1 : Aux 1, -1 .. 1 (float) aux2 : Aux 2, -1 .. 1 (float) aux3 : Aux 3, -1 .. 1 (float) aux4 : Aux 4, -1 .. 1 (float) mode : System mode (MAV_MODE) (uint8_t) nav_mode : Navigation mode (MAV_NAV_MODE) (uint8_t) ''' msg = MAVLink_hil_controls_message(time_usec, roll_ailerons, pitch_elevator, yaw_rudder, throttle, aux1, aux2, aux3, aux4, mode, nav_mode) msg.pack(self) return msg def hil_controls_send(self, time_usec, roll_ailerons, pitch_elevator, yaw_rudder, throttle, aux1, aux2, aux3, aux4, mode, nav_mode): ''' Sent from autopilot to simulation. Hardware in the loop control outputs time_usec : Timestamp (microseconds since UNIX epoch or microseconds since system boot) (uint64_t) roll_ailerons : Control output -1 .. 1 (float) pitch_elevator : Control output -1 .. 1 (float) yaw_rudder : Control output -1 .. 1 (float) throttle : Throttle 0 .. 1 (float) aux1 : Aux 1, -1 .. 1 (float) aux2 : Aux 2, -1 .. 1 (float) aux3 : Aux 3, -1 .. 1 (float) aux4 : Aux 4, -1 .. 1 (float) mode : System mode (MAV_MODE) (uint8_t) nav_mode : Navigation mode (MAV_NAV_MODE) (uint8_t) ''' return self.send(self.hil_controls_encode(time_usec, roll_ailerons, pitch_elevator, yaw_rudder, throttle, aux1, aux2, aux3, aux4, mode, nav_mode)) def hil_rc_inputs_raw_encode(self, time_usec, chan1_raw, chan2_raw, chan3_raw, chan4_raw, chan5_raw, chan6_raw, chan7_raw, chan8_raw, chan9_raw, chan10_raw, chan11_raw, chan12_raw, rssi): ''' Sent from simulation to autopilot. The RAW values of the RC channels received. The standard PPM modulation is as follows: 1000 microseconds: 0%, 2000 microseconds: 100%. Individual receivers/transmitters might violate this specification. time_usec : Timestamp (microseconds since UNIX epoch or microseconds since system boot) (uint64_t) chan1_raw : RC channel 1 value, in microseconds (uint16_t) chan2_raw : RC channel 2 value, in microseconds (uint16_t) chan3_raw : RC channel 3 value, in microseconds (uint16_t) chan4_raw : RC channel 4 value, in microseconds (uint16_t) chan5_raw : RC channel 5 value, in microseconds (uint16_t) chan6_raw : RC channel 6 value, in microseconds (uint16_t) chan7_raw : RC channel 7 value, in microseconds (uint16_t) chan8_raw : RC channel 8 value, in microseconds (uint16_t) chan9_raw : RC channel 9 value, in microseconds (uint16_t) chan10_raw : RC channel 10 value, in microseconds (uint16_t) chan11_raw : RC channel 11 value, in microseconds (uint16_t) chan12_raw : RC channel 12 value, in microseconds (uint16_t) rssi : Receive signal strength indicator, 0: 0%, 255: 100% (uint8_t) ''' msg = MAVLink_hil_rc_inputs_raw_message(time_usec, chan1_raw, chan2_raw, chan3_raw, chan4_raw, chan5_raw, chan6_raw, chan7_raw, chan8_raw, chan9_raw, chan10_raw, chan11_raw, chan12_raw, rssi) msg.pack(self) return msg def hil_rc_inputs_raw_send(self, time_usec, chan1_raw, chan2_raw, chan3_raw, chan4_raw, chan5_raw, chan6_raw, chan7_raw, chan8_raw, chan9_raw, chan10_raw, chan11_raw, chan12_raw, rssi): ''' Sent from simulation to autopilot. The RAW values of the RC channels received. The standard PPM modulation is as follows: 1000 microseconds: 0%, 2000 microseconds: 100%. Individual receivers/transmitters might violate this specification. time_usec : Timestamp (microseconds since UNIX epoch or microseconds since system boot) (uint64_t) chan1_raw : RC channel 1 value, in microseconds (uint16_t) chan2_raw : RC channel 2 value, in microseconds (uint16_t) chan3_raw : RC channel 3 value, in microseconds (uint16_t) chan4_raw : RC channel 4 value, in microseconds (uint16_t) chan5_raw : RC channel 5 value, in microseconds (uint16_t) chan6_raw : RC channel 6 value, in microseconds (uint16_t) chan7_raw : RC channel 7 value, in microseconds (uint16_t) chan8_raw : RC channel 8 value, in microseconds (uint16_t) chan9_raw : RC channel 9 value, in microseconds (uint16_t) chan10_raw : RC channel 10 value, in microseconds (uint16_t) chan11_raw : RC channel 11 value, in microseconds (uint16_t) chan12_raw : RC channel 12 value, in microseconds (uint16_t) rssi : Receive signal strength indicator, 0: 0%, 255: 100% (uint8_t) ''' return self.send(self.hil_rc_inputs_raw_encode(time_usec, chan1_raw, chan2_raw, chan3_raw, chan4_raw, chan5_raw, chan6_raw, chan7_raw, chan8_raw, chan9_raw, chan10_raw, chan11_raw, chan12_raw, rssi)) def optical_flow_encode(self, time_usec, sensor_id, flow_x, flow_y, flow_comp_m_x, flow_comp_m_y, quality, ground_distance): ''' Optical flow from a flow sensor (e.g. optical mouse sensor) time_usec : Timestamp (UNIX) (uint64_t) sensor_id : Sensor ID (uint8_t) flow_x : Flow in pixels in x-sensor direction (int16_t) flow_y : Flow in pixels in y-sensor direction (int16_t) flow_comp_m_x : Flow in meters in x-sensor direction, angular-speed compensated (float) flow_comp_m_y : Flow in meters in y-sensor direction, angular-speed compensated (float) quality : Optical flow quality / confidence. 0: bad, 255: maximum quality (uint8_t) ground_distance : Ground distance in meters. Positive value: distance known. Negative value: Unknown distance (float) ''' msg = MAVLink_optical_flow_message(time_usec, sensor_id, flow_x, flow_y, flow_comp_m_x, flow_comp_m_y, quality, ground_distance) msg.pack(self) return msg def optical_flow_send(self, time_usec, sensor_id, flow_x, flow_y, flow_comp_m_x, flow_comp_m_y, quality, ground_distance): ''' Optical flow from a flow sensor (e.g. optical mouse sensor) time_usec : Timestamp (UNIX) (uint64_t) sensor_id : Sensor ID (uint8_t) flow_x : Flow in pixels in x-sensor direction (int16_t) flow_y : Flow in pixels in y-sensor direction (int16_t) flow_comp_m_x : Flow in meters in x-sensor direction, angular-speed compensated (float) flow_comp_m_y : Flow in meters in y-sensor direction, angular-speed compensated (float) quality : Optical flow quality / confidence. 0: bad, 255: maximum quality (uint8_t) ground_distance : Ground distance in meters. Positive value: distance known. Negative value: Unknown distance (float) ''' return self.send(self.optical_flow_encode(time_usec, sensor_id, flow_x, flow_y, flow_comp_m_x, flow_comp_m_y, quality, ground_distance)) def global_vision_position_estimate_encode(self, usec, x, y, z, roll, pitch, yaw): ''' usec : Timestamp (microseconds, synced to UNIX time or since system boot) (uint64_t) x : Global X position (float) y : Global Y position (float) z : Global Z position (float) roll : Roll angle in rad (float) pitch : Pitch angle in rad (float) yaw : Yaw angle in rad (float) ''' msg = MAVLink_global_vision_position_estimate_message(usec, x, y, z, roll, pitch, yaw) msg.pack(self) return msg def global_vision_position_estimate_send(self, usec, x, y, z, roll, pitch, yaw): ''' usec : Timestamp (microseconds, synced to UNIX time or since system boot) (uint64_t) x : Global X position (float) y : Global Y position (float) z : Global Z position (float) roll : Roll angle in rad (float) pitch : Pitch angle in rad (float) yaw : Yaw angle in rad (float) ''' return self.send(self.global_vision_position_estimate_encode(usec, x, y, z, roll, pitch, yaw)) def vision_position_estimate_encode(self, usec, x, y, z, roll, pitch, yaw): ''' usec : Timestamp (microseconds, synced to UNIX time or since system boot) (uint64_t) x : Global X position (float) y : Global Y position (float) z : Global Z position (float) roll : Roll angle in rad (float) pitch : Pitch angle in rad (float) yaw : Yaw angle in rad (float) ''' msg = MAVLink_vision_position_estimate_message(usec, x, y, z, roll, pitch, yaw) msg.pack(self) return msg def vision_position_estimate_send(self, usec, x, y, z, roll, pitch, yaw): ''' usec : Timestamp (microseconds, synced to UNIX time or since system boot) (uint64_t) x : Global X position (float) y : Global Y position (float) z : Global Z position (float) roll : Roll angle in rad (float) pitch : Pitch angle in rad (float) yaw : Yaw angle in rad (float) ''' return self.send(self.vision_position_estimate_encode(usec, x, y, z, roll, pitch, yaw)) def vision_speed_estimate_encode(self, usec, x, y, z): ''' usec : Timestamp (microseconds, synced to UNIX time or since system boot) (uint64_t) x : Global X speed (float) y : Global Y speed (float) z : Global Z speed (float) ''' msg = MAVLink_vision_speed_estimate_message(usec, x, y, z) msg.pack(self) return msg def vision_speed_estimate_send(self, usec, x, y, z): ''' usec : Timestamp (microseconds, synced to UNIX time or since system boot) (uint64_t) x : Global X speed (float) y : Global Y speed (float) z : Global Z speed (float) ''' return self.send(self.vision_speed_estimate_encode(usec, x, y, z)) def vicon_position_estimate_encode(self, usec, x, y, z, roll, pitch, yaw): ''' usec : Timestamp (microseconds, synced to UNIX time or since system boot) (uint64_t) x : Global X position (float) y : Global Y position (float) z : Global Z position (float) roll : Roll angle in rad (float) pitch : Pitch angle in rad (float) yaw : Yaw angle in rad (float) ''' msg = MAVLink_vicon_position_estimate_message(usec, x, y, z, roll, pitch, yaw) msg.pack(self) return msg def vicon_position_estimate_send(self, usec, x, y, z, roll, pitch, yaw): ''' usec : Timestamp (microseconds, synced to UNIX time or since system boot) (uint64_t) x : Global X position (float) y : Global Y position (float) z : Global Z position (float) roll : Roll angle in rad (float) pitch : Pitch angle in rad (float) yaw : Yaw angle in rad (float) ''' return self.send(self.vicon_position_estimate_encode(usec, x, y, z, roll, pitch, yaw)) def highres_imu_encode(self, time_usec, xacc, yacc, zacc, xgyro, ygyro, zgyro, xmag, ymag, zmag, abs_pressure, diff_pressure, pressure_alt, temperature, fields_updated): ''' The IMU readings in SI units in NED body frame time_usec : Timestamp (microseconds, synced to UNIX time or since system boot) (uint64_t) xacc : X acceleration (m/s^2) (float) yacc : Y acceleration (m/s^2) (float) zacc : Z acceleration (m/s^2) (float) xgyro : Angular speed around X axis (rad / sec) (float) ygyro : Angular speed around Y axis (rad / sec) (float) zgyro : Angular speed around Z axis (rad / sec) (float) xmag : X Magnetic field (Gauss) (float) ymag : Y Magnetic field (Gauss) (float) zmag : Z Magnetic field (Gauss) (float) abs_pressure : Absolute pressure in millibar (float) diff_pressure : Differential pressure in millibar (float) pressure_alt : Altitude calculated from pressure (float) temperature : Temperature in degrees celsius (float) fields_updated : Bitmask for fields that have updated since last message, bit 0 = xacc, bit 12: temperature (uint16_t) ''' msg = MAVLink_highres_imu_message(time_usec, xacc, yacc, zacc, xgyro, ygyro, zgyro, xmag, ymag, zmag, abs_pressure, diff_pressure, pressure_alt, temperature, fields_updated) msg.pack(self) return msg def highres_imu_send(self, time_usec, xacc, yacc, zacc, xgyro, ygyro, zgyro, xmag, ymag, zmag, abs_pressure, diff_pressure, pressure_alt, temperature, fields_updated): ''' The IMU readings in SI units in NED body frame time_usec : Timestamp (microseconds, synced to UNIX time or since system boot) (uint64_t) xacc : X acceleration (m/s^2) (float) yacc : Y acceleration (m/s^2) (float) zacc : Z acceleration (m/s^2) (float) xgyro : Angular speed around X axis (rad / sec) (float) ygyro : Angular speed around Y axis (rad / sec) (float) zgyro : Angular speed around Z axis (rad / sec) (float) xmag : X Magnetic field (Gauss) (float) ymag : Y Magnetic field (Gauss) (float) zmag : Z Magnetic field (Gauss) (float) abs_pressure : Absolute pressure in millibar (float) diff_pressure : Differential pressure in millibar (float) pressure_alt : Altitude calculated from pressure (float) temperature : Temperature in degrees celsius (float) fields_updated : Bitmask for fields that have updated since last message, bit 0 = xacc, bit 12: temperature (uint16_t) ''' return self.send(self.highres_imu_encode(time_usec, xacc, yacc, zacc, xgyro, ygyro, zgyro, xmag, ymag, zmag, abs_pressure, diff_pressure, pressure_alt, temperature, fields_updated)) def file_transfer_start_encode(self, transfer_uid, dest_path, direction, file_size, flags): ''' Begin file transfer transfer_uid : Unique transfer ID (uint64_t) dest_path : Destination path (char) direction : Transfer direction: 0: from requester, 1: to requester (uint8_t) file_size : File size in bytes (uint32_t) flags : RESERVED (uint8_t) ''' msg = MAVLink_file_transfer_start_message(transfer_uid, dest_path, direction, file_size, flags) msg.pack(self) return msg def file_transfer_start_send(self, transfer_uid, dest_path, direction, file_size, flags): ''' Begin file transfer transfer_uid : Unique transfer ID (uint64_t) dest_path : Destination path (char) direction : Transfer direction: 0: from requester, 1: to requester (uint8_t) file_size : File size in bytes (uint32_t) flags : RESERVED (uint8_t) ''' return self.send(self.file_transfer_start_encode(transfer_uid, dest_path, direction, file_size, flags)) def file_transfer_dir_list_encode(self, transfer_uid, dir_path, flags): ''' Get directory listing transfer_uid : Unique transfer ID (uint64_t) dir_path : Directory path to list (char) flags : RESERVED (uint8_t) ''' msg = MAVLink_file_transfer_dir_list_message(transfer_uid, dir_path, flags) msg.pack(self) return msg def file_transfer_dir_list_send(self, transfer_uid, dir_path, flags): ''' Get directory listing transfer_uid : Unique transfer ID (uint64_t) dir_path : Directory path to list (char) flags : RESERVED (uint8_t) ''' return self.send(self.file_transfer_dir_list_encode(transfer_uid, dir_path, flags)) def file_transfer_res_encode(self, transfer_uid, result): ''' File transfer result transfer_uid : Unique transfer ID (uint64_t) result : 0: OK, 1: not permitted, 2: bad path / file name, 3: no space left on device (uint8_t) ''' msg = MAVLink_file_transfer_res_message(transfer_uid, result) msg.pack(self) return msg def file_transfer_res_send(self, transfer_uid, result): ''' File transfer result transfer_uid : Unique transfer ID (uint64_t) result : 0: OK, 1: not permitted, 2: bad path / file name, 3: no space left on device (uint8_t) ''' return self.send(self.file_transfer_res_encode(transfer_uid, result)) def battery_status_encode(self, accu_id, voltage_cell_1, voltage_cell_2, voltage_cell_3, voltage_cell_4, voltage_cell_5, voltage_cell_6, current_battery, battery_remaining): ''' Transmitte battery informations for a accu pack. accu_id : Accupack ID (uint8_t) voltage_cell_1 : Battery voltage of cell 1, in millivolts (1 = 1 millivolt) (uint16_t) voltage_cell_2 : Battery voltage of cell 2, in millivolts (1 = 1 millivolt), -1: no cell (uint16_t) voltage_cell_3 : Battery voltage of cell 3, in millivolts (1 = 1 millivolt), -1: no cell (uint16_t) voltage_cell_4 : Battery voltage of cell 4, in millivolts (1 = 1 millivolt), -1: no cell (uint16_t) voltage_cell_5 : Battery voltage of cell 5, in millivolts (1 = 1 millivolt), -1: no cell (uint16_t) voltage_cell_6 : Battery voltage of cell 6, in millivolts (1 = 1 millivolt), -1: no cell (uint16_t) current_battery : Battery current, in 10*milliamperes (1 = 10 milliampere), -1: autopilot does not measure the current (int16_t) battery_remaining : Remaining battery energy: (0%: 0, 100%: 100), -1: autopilot does not estimate the remaining battery (int8_t) ''' msg = MAVLink_battery_status_message(accu_id, voltage_cell_1, voltage_cell_2, voltage_cell_3, voltage_cell_4, voltage_cell_5, voltage_cell_6, current_battery, battery_remaining) msg.pack(self) return msg def battery_status_send(self, accu_id, voltage_cell_1, voltage_cell_2, voltage_cell_3, voltage_cell_4, voltage_cell_5, voltage_cell_6, current_battery, battery_remaining): ''' Transmitte battery informations for a accu pack. accu_id : Accupack ID (uint8_t) voltage_cell_1 : Battery voltage of cell 1, in millivolts (1 = 1 millivolt) (uint16_t) voltage_cell_2 : Battery voltage of cell 2, in millivolts (1 = 1 millivolt), -1: no cell (uint16_t) voltage_cell_3 : Battery voltage of cell 3, in millivolts (1 = 1 millivolt), -1: no cell (uint16_t) voltage_cell_4 : Battery voltage of cell 4, in millivolts (1 = 1 millivolt), -1: no cell (uint16_t) voltage_cell_5 : Battery voltage of cell 5, in millivolts (1 = 1 millivolt), -1: no cell (uint16_t) voltage_cell_6 : Battery voltage of cell 6, in millivolts (1 = 1 millivolt), -1: no cell (uint16_t) current_battery : Battery current, in 10*milliamperes (1 = 10 milliampere), -1: autopilot does not measure the current (int16_t) battery_remaining : Remaining battery energy: (0%: 0, 100%: 100), -1: autopilot does not estimate the remaining battery (int8_t) ''' return self.send(self.battery_status_encode(accu_id, voltage_cell_1, voltage_cell_2, voltage_cell_3, voltage_cell_4, voltage_cell_5, voltage_cell_6, current_battery, battery_remaining)) def setpoint_8dof_encode(self, target_system, val1, val2, val3, val4, val5, val6, val7, val8): ''' Set the 8 DOF setpoint for a controller. target_system : System ID (uint8_t) val1 : Value 1 (float) val2 : Value 2 (float) val3 : Value 3 (float) val4 : Value 4 (float) val5 : Value 5 (float) val6 : Value 6 (float) val7 : Value 7 (float) val8 : Value 8 (float) ''' msg = MAVLink_setpoint_8dof_message(target_system, val1, val2, val3, val4, val5, val6, val7, val8) msg.pack(self) return msg def setpoint_8dof_send(self, target_system, val1, val2, val3, val4, val5, val6, val7, val8): ''' Set the 8 DOF setpoint for a controller. target_system : System ID (uint8_t) val1 : Value 1 (float) val2 : Value 2 (float) val3 : Value 3 (float) val4 : Value 4 (float) val5 : Value 5 (float) val6 : Value 6 (float) val7 : Value 7 (float) val8 : Value 8 (float) ''' return self.send(self.setpoint_8dof_encode(target_system, val1, val2, val3, val4, val5, val6, val7, val8)) def setpoint_6dof_encode(self, target_system, trans_x, trans_y, trans_z, rot_x, rot_y, rot_z): ''' Set the 6 DOF setpoint for a attitude and position controller. target_system : System ID (uint8_t) trans_x : Translational Component in x (float) trans_y : Translational Component in y (float) trans_z : Translational Component in z (float) rot_x : Rotational Component in x (float) rot_y : Rotational Component in y (float) rot_z : Rotational Component in z (float) ''' msg = MAVLink_setpoint_6dof_message(target_system, trans_x, trans_y, trans_z, rot_x, rot_y, rot_z) msg.pack(self) return msg def setpoint_6dof_send(self, target_system, trans_x, trans_y, trans_z, rot_x, rot_y, rot_z): ''' Set the 6 DOF setpoint for a attitude and position controller. target_system : System ID (uint8_t) trans_x : Translational Component in x (float) trans_y : Translational Component in y (float) trans_z : Translational Component in z (float) rot_x : Rotational Component in x (float) rot_y : Rotational Component in y (float) rot_z : Rotational Component in z (float) ''' return self.send(self.setpoint_6dof_encode(target_system, trans_x, trans_y, trans_z, rot_x, rot_y, rot_z)) def memory_vect_encode(self, address, ver, type, value): ''' Send raw controller memory. The use of this message is discouraged for normal packets, but a quite efficient way for testing new messages and getting experimental debug output. address : Starting address of the debug variables (uint16_t) ver : Version code of the type variable. 0=unknown, type ignored and assumed int16_t. 1=as below (uint8_t) type : Type code of the memory variables. for ver = 1: 0=16 x int16_t, 1=16 x uint16_t, 2=16 x Q15, 3=16 x 1Q14 (uint8_t) value : Memory contents at specified address (int8_t) ''' msg = MAVLink_memory_vect_message(address, ver, type, value) msg.pack(self) return msg def memory_vect_send(self, address, ver, type, value): ''' Send raw controller memory. The use of this message is discouraged for normal packets, but a quite efficient way for testing new messages and getting experimental debug output. address : Starting address of the debug variables (uint16_t) ver : Version code of the type variable. 0=unknown, type ignored and assumed int16_t. 1=as below (uint8_t) type : Type code of the memory variables. for ver = 1: 0=16 x int16_t, 1=16 x uint16_t, 2=16 x Q15, 3=16 x 1Q14 (uint8_t) value : Memory contents at specified address (int8_t) ''' return self.send(self.memory_vect_encode(address, ver, type, value)) def debug_vect_encode(self, name, time_usec, x, y, z): ''' name : Name (char) time_usec : Timestamp (uint64_t) x : x (float) y : y (float) z : z (float) ''' msg = MAVLink_debug_vect_message(name, time_usec, x, y, z) msg.pack(self) return msg def debug_vect_send(self, name, time_usec, x, y, z): ''' name : Name (char) time_usec : Timestamp (uint64_t) x : x (float) y : y (float) z : z (float) ''' return self.send(self.debug_vect_encode(name, time_usec, x, y, z)) def named_value_float_encode(self, time_boot_ms, name, value): ''' Send a key-value pair as float. The use of this message is discouraged for normal packets, but a quite efficient way for testing new messages and getting experimental debug output. time_boot_ms : Timestamp (milliseconds since system boot) (uint32_t) name : Name of the debug variable (char) value : Floating point value (float) ''' msg = MAVLink_named_value_float_message(time_boot_ms, name, value) msg.pack(self) return msg def named_value_float_send(self, time_boot_ms, name, value): ''' Send a key-value pair as float. The use of this message is discouraged for normal packets, but a quite efficient way for testing new messages and getting experimental debug output. time_boot_ms : Timestamp (milliseconds since system boot) (uint32_t) name : Name of the debug variable (char) value : Floating point value (float) ''' return self.send(self.named_value_float_encode(time_boot_ms, name, value)) def named_value_int_encode(self, time_boot_ms, name, value): ''' Send a key-value pair as integer. The use of this message is discouraged for normal packets, but a quite efficient way for testing new messages and getting experimental debug output. time_boot_ms : Timestamp (milliseconds since system boot) (uint32_t) name : Name of the debug variable (char) value : Signed integer value (int32_t) ''' msg = MAVLink_named_value_int_message(time_boot_ms, name, value) msg.pack(self) return msg def named_value_int_send(self, time_boot_ms, name, value): ''' Send a key-value pair as integer. The use of this message is discouraged for normal packets, but a quite efficient way for testing new messages and getting experimental debug output. time_boot_ms : Timestamp (milliseconds since system boot) (uint32_t) name : Name of the debug variable (char) value : Signed integer value (int32_t) ''' return self.send(self.named_value_int_encode(time_boot_ms, name, value)) def statustext_encode(self, severity, text): ''' Status text message. These messages are printed in yellow in the COMM console of QGroundControl. WARNING: They consume quite some bandwidth, so use only for important status and error messages. If implemented wisely, these messages are buffered on the MCU and sent only at a limited rate (e.g. 10 Hz). severity : Severity of status. Relies on the definitions within RFC-5424. See enum MAV_SEVERITY. (uint8_t) text : Status text message, without null termination character (char) ''' msg = MAVLink_statustext_message(severity, text) msg.pack(self) return msg def statustext_send(self, severity, text): ''' Status text message. These messages are printed in yellow in the COMM console of QGroundControl. WARNING: They consume quite some bandwidth, so use only for important status and error messages. If implemented wisely, these messages are buffered on the MCU and sent only at a limited rate (e.g. 10 Hz). severity : Severity of status. Relies on the definitions within RFC-5424. See enum MAV_SEVERITY. (uint8_t) text : Status text message, without null termination character (char) ''' return self.send(self.statustext_encode(severity, text)) def debug_encode(self, time_boot_ms, ind, value): ''' Send a debug value. The index is used to discriminate between values. These values show up in the plot of QGroundControl as DEBUG N. time_boot_ms : Timestamp (milliseconds since system boot) (uint32_t) ind : index of debug variable (uint8_t) value : DEBUG value (float) ''' msg = MAVLink_debug_message(time_boot_ms, ind, value) msg.pack(self) return msg def debug_send(self, time_boot_ms, ind, value): ''' Send a debug value. The index is used to discriminate between values. These values show up in the plot of QGroundControl as DEBUG N. time_boot_ms : Timestamp (milliseconds since system boot) (uint32_t) ind : index of debug variable (uint8_t) value : DEBUG value (float) ''' return self.send(self.debug_encode(time_boot_ms, ind, value))
promgen/proxy.py
kackey0-1/promgen
913
132796
<filename>promgen/proxy.py # Copyright (c) 2019 LINE Corporation # These sources are released under the terms of the MIT license: see LICENSE import concurrent.futures import json import logging from urllib.parse import urljoin import requests from dateutil import parser from django.conf import settings from django.http import HttpResponse, JsonResponse from django.template import defaultfilters from django.views.generic import View from django.views.generic.base import TemplateView from promgen import forms, models, prometheus, util from requests.exceptions import HTTPError logger = logging.getLogger(__name__) def proxy_error(response): """ Return a wrapped proxy error Taking a request.response object as input, return it slightly modified with an extra header for debugging so that we can see where the request failed """ r = HttpResponse( response.content, content_type=response.headers["content-type"], status=response.status_code, ) r.setdefault("X-PROMGEN-PROXY", response.url) return r class PrometheusProxy(View): proxy_headers = {"HTTP_REFERER": "Referer"} @property def headers(self): # Loop through the headers from our request, and decide which ones # we should pass through upstream. Currently, our 'Referer' header is # the main one we are interested in, since this can help us debug which # grafana dashboard is responsible for the query. return { self.proxy_headers[k]: self.request.META[k] for k in self.proxy_headers if k in self.request.META } def proxy(self, request): futures = [] with concurrent.futures.ThreadPoolExecutor(max_workers=20) as executor: for host in models.Shard.objects.filter(proxy=True): futures.append( executor.submit( util.get, urljoin(host.url, request.get_full_path_info()), headers=self.headers, ) ) for future in concurrent.futures.as_completed(futures): yield future class ProxyGraph(TemplateView): template_name = "promgen/graph.html" def get_context_data(self, **kwargs): context = super(ProxyGraph, self).get_context_data(**kwargs) context["shard_list"] = models.Shard.objects.filter(proxy=True) for k, v in self.request.GET.items(): _, k = k.split(".") context[k] = v return context class ProxyLabels(PrometheusProxy): def get(self, request): data = set() for future in self.proxy(request): try: result = future.result() result.raise_for_status() _json = result.json() logger.debug("Appending data from %s", result.url) data.update(_json["data"]) except HTTPError: logger.warning("Error with response") return proxy_error(result) return JsonResponse({"status": "success", "data": sorted(data)}) class ProxyLabelValues(PrometheusProxy): def get(self, request, label): data = set() for future in self.proxy(request): try: result = future.result() result.raise_for_status() _json = result.json() logger.debug("Appending data from %s", result.url) data.update(_json["data"]) except HTTPError: logger.warning("Error with response") return proxy_error(result) return JsonResponse({"status": "success", "data": sorted(data)}) class ProxySeries(PrometheusProxy): def get(self, request): data = [] for future in self.proxy(request): try: result = future.result() result.raise_for_status() _json = result.json() logger.debug("Appending data from %s", result.url) data += _json["data"] except HTTPError: logger.warning("Error with response") return proxy_error(result) return JsonResponse({"status": "success", "data": data}) class ProxyQueryRange(PrometheusProxy): def get(self, request): data = [] resultType = None for future in self.proxy(request): try: result = future.result() result.raise_for_status() _json = result.json() logger.debug("Appending data from %s", result.url) data += _json["data"]["result"] resultType = _json["data"]["resultType"] except HTTPError: return proxy_error(result) return JsonResponse( {"status": "success", "data": {"resultType": resultType, "result": data}} ) class ProxyQuery(PrometheusProxy): def get(self, request): data = [] resultType = None for future in self.proxy(request): try: result = future.result() result.raise_for_status() _json = result.json() logger.debug("Appending data from %s", result.url) data += _json["data"]["result"] resultType = _json["data"]["resultType"] except HTTPError: return proxy_error(result) return JsonResponse( {"status": "success", "data": {"resultType": resultType, "result": data}} ) class ProxyAlerts(View): def get(self, request): try: url = urljoin(util.setting("alertmanager:url"), "/api/v1/alerts") response = util.get(url) except requests.exceptions.ConnectionError: logger.error("Error connecting to %s", url) return JsonResponse({}) else: return HttpResponse(response.content, content_type="application/json") class ProxySilences(View): def get(self, request): try: url = urljoin(util.setting("alertmanager:url"), "/api/v1/silences") response = util.get(url, params={"silenced": False}) except requests.exceptions.ConnectionError: logger.error("Error connecting to %s", url) return JsonResponse({}) else: return HttpResponse(response.content, content_type="application/json") def post(self, request): body = json.loads(request.body.decode("utf-8")) body.setdefault("comment", "Silenced from Promgen") body.setdefault("createdBy", request.user.email) form = forms.SilenceForm(body) if not form.is_valid(): return JsonResponse( { "messages": [ {"class": "alert alert-warning", "message": m, "label": k} for k in form.errors for m in form.errors[k] ] }, status=422, ) try: response = prometheus.silence(body.pop("labels"), **form.cleaned_data) except Exception as e: return JsonResponse( {"messages": [{"class": "alert alert-danger", "message": str(e)}]}, status=400, ) return HttpResponse( response.text, status=response.status_code, content_type="application/json" ) class ProxyDeleteSilence(View): def delete(self, request, silence_id): url = urljoin( util.setting("alertmanager:url"), "/api/v1/silence/%s" % silence_id ) response = util.delete(url) return HttpResponse( response.text, status=response.status_code, content_type="application/json" )
appium/webdriver/extensions/log_event.py
salabogdan/python-client
1,383
132802
#!/usr/bin/env python # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING, Dict, List, TypeVar, Union from selenium import webdriver from ..mobilecommand import MobileCommand as Command if TYPE_CHECKING: # noinspection PyUnresolvedReferences from appium.webdriver.webdriver import WebDriver T = TypeVar('T', bound=Union['WebDriver', 'LogEvent']) class LogEvent(webdriver.Remote): def get_events(self: T, type: List[str] = None) -> Dict[str, Union[str, int]]: """Retrieves events information from the current session (Since Appium 1.16.0) Args: type: The event type to filter with Usage: | events = driver.get_events() | events = driver.get_events(['appium:funEvent']) Returns: `dict`: A dictionary of events timing information containing the following entries | commands: (`list` of `dict`) List of dictionaries containing the following entries | cmd: The command name that has been sent to the appium server | startTime: Received time | endTime: Response time """ data = {} if type is not None: data['type'] = type return self.execute(Command.GET_EVENTS, data)['value'] def log_event(self: T, vendor: str, event: str) -> T: """Log a custom event on the Appium server. (Since Appium 1.16.0) Args: vendor: The vendor to log event: The event to log Usage: driver.log_event('appium', 'funEvent') Returns: Union['WebDriver', 'LogEvent']: Self instance """ data = {'vendor': vendor, 'event': event} self.execute(Command.LOG_EVENT, data) return self # pylint: disable=protected-access # noinspection PyProtectedMember def _addCommands(self) -> None: self.command_executor._commands[Command.GET_EVENTS] = ('POST', '/session/$sessionId/appium/events') self.command_executor._commands[Command.LOG_EVENT] = ('POST', '/session/$sessionId/appium/log_event')
gitgud/skills/basics/__init__.py
osai-mirrors/git-gud
133
132803
<gh_stars>100-1000 from gitgud.util import Skill from gitgud.util.level_builder import BasicLevel skill = Skill( 'Basics', 'basics', [ BasicLevel('Introduction to Commits', 'committing', __name__), BasicLevel('Branching in Git', 'branching', __name__), BasicLevel('Merging in Git', 'merging', __name__), BasicLevel('Introduction to Rebasing', 'rebasing', __name__) ] )
resources/SE.py
mehrdad-shokri/Dr0p1t-Framework
1,345
132806
#Written by: <NAME> - D4Vinci ( Dr0p1t-Framework ) #In this script I store some SE tricks to use ;) #Start #Get the user password by fooling him and then uses it to run commands as the user by psexec to bypass UAC def ask_pwd(): while True: cmd = '''Powershell "$cred=$host.ui.promptforcredential('Windows firewall permission','',[Environment]::UserName,[Environment]::UserDomainName); echo $cred.getnetworkcredential().password;"''' response = get_output(cmd) if response.strip() != '' and not response.strip().startswith('[!]'): break return response.strip()
on-box-python/op-scripts/rpc_execute.py
Juniper/junosautomation
117
132841
<filename>on-box-python/op-scripts/rpc_execute.py from jnpr.junos import Device from lxml import etree with Device() as jdev: #with Device(host=<hostname>, user=<user>, password=<password>) as jdev: rsp = jdev.rpc.get_interface_information(interface_name='fxp0', terse=True) print (etree.tostring(rsp, encoding='unicode'))
snowfall/models/transformer.py
aarora8/snowfall
145
132850
<gh_stars>100-1000 #!/usr/bin/env python3 # Copyright (c) 2021 University of Chinese Academy of Sciences (author: <NAME>) # Apache 2.0 import math from typing import Dict, List, Optional, Tuple import k2 import torch from torch import Tensor, nn from snowfall.common import get_texts from snowfall.models import AcousticModel # Note: TorchScript requires Dict/List/etc. to be fully typed. Supervisions = Dict[str, Tensor] class Transformer(AcousticModel): """ Args: num_features (int): Number of input features num_classes (int): Number of output classes subsampling_factor (int): subsampling factor of encoder (the convolution layers before transformers) d_model (int): attention dimension nhead (int): number of head dim_feedforward (int): feedforward dimention num_encoder_layers (int): number of encoder layers num_decoder_layers (int): number of decoder layers dropout (float): dropout rate normalize_before (bool): whether to use layer_norm before the first block. vgg_frontend (bool): whether to use vgg frontend. """ def __init__(self, num_features: int, num_classes: int, subsampling_factor: int = 4, d_model: int = 256, nhead: int = 4, dim_feedforward: int = 2048, num_encoder_layers: int = 12, num_decoder_layers: int = 6, dropout: float = 0.1, normalize_before: bool = True, vgg_frontend: bool = False, mmi_loss: bool = True) -> None: super().__init__() self.num_features = num_features self.num_classes = num_classes self.subsampling_factor = subsampling_factor if subsampling_factor != 4: raise NotImplementedError("Support only 'subsampling_factor=4'.") self.encoder_embed = (VggSubsampling(num_features, d_model) if vgg_frontend else Conv2dSubsampling(num_features, d_model)) self.encoder_pos = PositionalEncoding(d_model, dropout) encoder_layer = TransformerEncoderLayer(d_model, nhead, dim_feedforward, dropout, normalize_before=normalize_before) if normalize_before: encoder_norm = nn.LayerNorm(d_model) else: encoder_norm = None self.encoder = nn.TransformerEncoder(encoder_layer, num_encoder_layers, encoder_norm) self.encoder_output_layer = nn.Sequential( nn.Dropout(p=dropout), nn.Linear(d_model, num_classes) ) if num_decoder_layers > 0: if mmi_loss: self.decoder_num_class = self.num_classes + 1 # +1 for the sos/eos symbol else: self.decoder_num_class = self.num_classes # bpe model already has sos/eos symbol self.decoder_embed = nn.Embedding(self.decoder_num_class, d_model) self.decoder_pos = PositionalEncoding(d_model, dropout) decoder_layer = TransformerDecoderLayer(d_model, nhead, dim_feedforward, dropout, normalize_before=normalize_before) if normalize_before: decoder_norm = nn.LayerNorm(d_model) else: decoder_norm = None self.decoder = nn.TransformerDecoder(decoder_layer, num_decoder_layers, decoder_norm) self.decoder_output_layer = torch.nn.Linear(d_model, self.decoder_num_class) self.decoder_criterion = LabelSmoothingLoss(self.decoder_num_class) else: self.decoder_criterion = None def forward(self, x: Tensor, supervision: Optional[Supervisions] = None) -> Tuple[Tensor, Tensor, Optional[Tensor]]: """ Args: x: Tensor of dimension (batch_size, num_features, input_length). supervision: Supervison in lhotse format, get from batch['supervisions'] Returns: Tensor: After log-softmax tensor of dimension (batch_size, number_of_classes, input_length). Tensor: Before linear layer tensor of dimension (input_length, batch_size, d_model). Optional[Tensor]: Mask tensor of dimension (batch_size, input_length) or None. """ encoder_memory, memory_mask = self.encode(x, supervision) x = self.encoder_output(encoder_memory) return x, encoder_memory, memory_mask def encode(self, x: Tensor, supervisions: Optional[Supervisions] = None) -> Tuple[Tensor, Optional[Tensor]]: """ Args: x: Tensor of dimension (batch_size, num_features, input_length). supervisions : Supervison in lhotse format, i.e., batch['supervisions'] Returns: Tensor: Predictor tensor of dimension (input_length, batch_size, d_model). Optional[Tensor]: Mask tensor of dimension (batch_size, input_length) or None. """ x = x.permute(0, 2, 1) # (B, F, T) -> (B, T, F) x = self.encoder_embed(x) x = self.encoder_pos(x) x = x.permute(1, 0, 2) # (B, T, F) -> (T, B, F) mask = encoder_padding_mask(x.size(0), supervisions) mask = mask.to(x.device) if mask != None else None x = self.encoder(x, src_key_padding_mask=mask) # (T, B, F) return x, mask def encoder_output(self, x: Tensor) -> Tensor: """ Args: x: Tensor of dimension (input_length, batch_size, d_model). Returns: Tensor: After log-softmax tensor of dimension (batch_size, number_of_classes, input_length). """ x = self.encoder_output_layer(x).permute(1, 2, 0) # (T, B, F) ->(B, F, T) x = nn.functional.log_softmax(x, dim=1) # (B, F, T) return x def decoder_forward(self, x: Tensor, encoder_mask: Tensor, supervision: Supervisions = None, graph_compiler: object = None, token_ids: List[int] = None) -> Tensor: """ Args: x: Tensor of dimension (input_length, batch_size, d_model). encoder_mask: Mask tensor of dimension (batch_size, input_length) supervision: Supervison in lhotse format, get from batch['supervisions'] graph_compiler: use graph_compiler.L_inv (Its labels are words, while its aux_labels are phones) , graph_compiler.words and graph_compiler.oov Returns: Tensor: Decoder loss. """ if supervision is not None and graph_compiler is not None: batch_text = get_normal_transcripts(supervision, graph_compiler.lexicon.words, graph_compiler.oov) ys_in_pad, ys_out_pad = add_sos_eos(batch_text, graph_compiler.L_inv, self.decoder_num_class - 1, self.decoder_num_class - 1) elif token_ids is not None: # speical token ids: # <blank> 0 # <UNK> 1 # <sos/eos> self.decoder_num_class - 1 sos_id = self.decoder_num_class - 1 eos_id = self.decoder_num_class - 1 _sos = torch.tensor([sos_id]) _eos = torch.tensor([eos_id]) ys_in = [torch.cat([_sos, torch.tensor(y)], dim=0) for y in token_ids] ys_out = [torch.cat([torch.tensor(y), _eos], dim=0) for y in token_ids] ys_in_pad = pad_list(ys_in, eos_id) ys_out_pad = pad_list(ys_in, -1) else: raise ValueError("Invalid input for decoder self attetion") ys_in_pad = ys_in_pad.to(x.device) ys_out_pad = ys_out_pad.to(x.device) tgt_mask = generate_square_subsequent_mask(ys_in_pad.shape[-1]).to(x.device) tgt_key_padding_mask = decoder_padding_mask(ys_in_pad) tgt = self.decoder_embed(ys_in_pad) # (B, T) -> (B, T, F) tgt = self.decoder_pos(tgt) tgt = tgt.permute(1, 0, 2) # (B, T, F) -> (T, B, F) pred_pad = self.decoder(tgt=tgt, memory=x, tgt_mask=tgt_mask, tgt_key_padding_mask=tgt_key_padding_mask, memory_key_padding_mask=encoder_mask) # (T, B, F) pred_pad = pred_pad.permute(1, 0, 2) # (T, B, F) -> (B, T, F) pred_pad = self.decoder_output_layer(pred_pad) # (B, T, F) decoder_loss = self.decoder_criterion(pred_pad, ys_out_pad) return decoder_loss class TransformerEncoderLayer(nn.Module): """ Modified from torch.nn.TransformerEncoderLayer. Add support of normalize_before, i.e., use layer_norm before the first block. Args: d_model: the number of expected features in the input (required). nhead: the number of heads in the multiheadattention models (required). dim_feedforward: the dimension of the feedforward network model (default=2048). dropout: the dropout value (default=0.1). activation: the activation function of intermediate layer, relu or gelu (default=relu). normalize_before: whether to use layer_norm before the first block. Examples:: >>> encoder_layer = TransformerEncoderLayer(d_model=512, nhead=8) >>> src = torch.rand(10, 32, 512) >>> out = encoder_layer(src) """ def __init__(self, d_model: int, nhead: int, dim_feedforward: int = 2048, dropout: float = 0.1, activation: str = "relu", normalize_before: bool = True) -> None: super(TransformerEncoderLayer, self).__init__() self.self_attn = nn.MultiheadAttention(d_model, nhead, dropout=0.0) # Implementation of Feedforward model self.linear1 = nn.Linear(d_model, dim_feedforward) self.dropout = nn.Dropout(dropout) self.linear2 = nn.Linear(dim_feedforward, d_model) self.norm1 = nn.LayerNorm(d_model) self.norm2 = nn.LayerNorm(d_model) self.dropout1 = nn.Dropout(dropout) self.dropout2 = nn.Dropout(dropout) self.activation = _get_activation_fn(activation) self.normalize_before = normalize_before def __setstate__(self, state): if 'activation' not in state: state['activation'] = nn.functional.relu super(TransformerEncoderLayer, self).__setstate__(state) def forward(self, src: Tensor, src_mask: Optional[Tensor] = None, src_key_padding_mask: Optional[Tensor] = None) -> Tensor: """ Pass the input through the encoder layer. Args: src: the sequence to the encoder layer (required). src_mask: the mask for the src sequence (optional). src_key_padding_mask: the mask for the src keys per batch (optional). Shape: src: (S, N, E). src_mask: (S, S). src_key_padding_mask: (N, S). S is the source sequence length, T is the target sequence length, N is the batch size, E is the feature number """ residual = src if self.normalize_before: src = self.norm1(src) src2 = self.self_attn(src, src, src, attn_mask=src_mask, key_padding_mask=src_key_padding_mask)[0] src = residual + self.dropout1(src2) if not self.normalize_before: src = self.norm1(src) residual = src if self.normalize_before: src = self.norm2(src) src2 = self.linear2(self.dropout(self.activation(self.linear1(src)))) src = residual + self.dropout2(src2) if not self.normalize_before: src = self.norm2(src) return src class TransformerDecoderLayer(nn.Module): """ Modified from torch.nn.TransformerDecoderLayer. Add support of normalize_before, i.e., use layer_norm before the first block. Args: d_model: the number of expected features in the input (required). nhead: the number of heads in the multiheadattention models (required). dim_feedforward: the dimension of the feedforward network model (default=2048). dropout: the dropout value (default=0.1). activation: the activation function of intermediate layer, relu or gelu (default=relu). Examples:: >>> decoder_layer = nn.TransformerDecoderLayer(d_model=512, nhead=8) >>> memory = torch.rand(10, 32, 512) >>> tgt = torch.rand(20, 32, 512) >>> out = decoder_layer(tgt, memory) """ def __init__(self, d_model: int, nhead: int, dim_feedforward: int = 2048, dropout: float = 0.1, activation: str = "relu", normalize_before: bool = True) -> None: super(TransformerDecoderLayer, self).__init__() self.self_attn = nn.MultiheadAttention(d_model, nhead, dropout=0.0) self.src_attn = nn.MultiheadAttention(d_model, nhead, dropout=0.0) # Implementation of Feedforward model self.linear1 = nn.Linear(d_model, dim_feedforward) self.dropout = nn.Dropout(dropout) self.linear2 = nn.Linear(dim_feedforward, d_model) self.norm1 = nn.LayerNorm(d_model) self.norm2 = nn.LayerNorm(d_model) self.norm3 = nn.LayerNorm(d_model) self.dropout1 = nn.Dropout(dropout) self.dropout2 = nn.Dropout(dropout) self.dropout3 = nn.Dropout(dropout) self.activation = _get_activation_fn(activation) self.normalize_before = normalize_before def __setstate__(self, state): if 'activation' not in state: state['activation'] = nn.functional.relu super(TransformerDecoderLayer, self).__setstate__(state) def forward(self, tgt: Tensor, memory: Tensor, tgt_mask: Optional[Tensor] = None, memory_mask: Optional[Tensor] = None, tgt_key_padding_mask: Optional[Tensor] = None, memory_key_padding_mask: Optional[Tensor] = None) -> Tensor: """Pass the inputs (and mask) through the decoder layer. Args: tgt: the sequence to the decoder layer (required). memory: the sequence from the last layer of the encoder (required). tgt_mask: the mask for the tgt sequence (optional). memory_mask: the mask for the memory sequence (optional). tgt_key_padding_mask: the mask for the tgt keys per batch (optional). memory_key_padding_mask: the mask for the memory keys per batch (optional). Shape: tgt: (T, N, E). memory: (S, N, E). tgt_mask: (T, T). memory_mask: (T, S). tgt_key_padding_mask: (N, T). memory_key_padding_mask: (N, S). S is the source sequence length, T is the target sequence length, N is the batch size, E is the feature number """ residual = tgt if self.normalize_before: tgt = self.norm1(tgt) tgt2 = self.self_attn(tgt, tgt, tgt, attn_mask=tgt_mask, key_padding_mask=tgt_key_padding_mask)[0] tgt = residual + self.dropout1(tgt2) if not self.normalize_before: tgt = self.norm1(tgt) residual = tgt if self.normalize_before: tgt = self.norm2(tgt) tgt2 = self.src_attn(tgt, memory, memory, attn_mask=memory_mask, key_padding_mask=memory_key_padding_mask)[0] tgt = residual + self.dropout2(tgt2) if not self.normalize_before: tgt = self.norm2(tgt) residual = tgt if self.normalize_before: tgt = self.norm3(tgt) tgt2 = self.linear2(self.dropout(self.activation(self.linear1(tgt)))) tgt = residual + self.dropout3(tgt2) if not self.normalize_before: tgt = self.norm3(tgt) return tgt def _get_activation_fn(activation: str): if activation == "relu": return nn.functional.relu elif activation == "gelu": return nn.functional.gelu raise RuntimeError("activation should be relu/gelu, not {}".format(activation)) class Conv2dSubsampling(nn.Module): """Convolutional 2D subsampling (to 1/4 length). Modified from https://github.com/espnet/espnet/blob/master/espnet/nets/pytorch_backend/transformer/subsampling.py Args: idim: Input dimension. odim: Output dimension. """ def __init__(self, idim: int, odim: int) -> None: """Construct a Conv2dSubsampling object.""" super(Conv2dSubsampling, self).__init__() self.conv = nn.Sequential( nn.Conv2d(in_channels=1, out_channels=odim, kernel_size=3, stride=2), nn.ReLU(), nn.Conv2d(in_channels=odim, out_channels=odim, kernel_size=3, stride=2), nn.ReLU(), ) self.out = nn.Linear(odim * (((idim - 1) // 2 - 1) // 2), odim) def forward(self, x: Tensor) -> Tensor: """Subsample x. Args: x: Input tensor of dimension (batch_size, input_length, num_features). (#batch, time, idim). Returns: torch.Tensor: Subsampled tensor of dimension (batch_size, input_length, d_model). where time' = time // 4. """ x = x.unsqueeze(1) # (b, c, t, f) x = self.conv(x) b, c, t, f = x.size() x = self.out(x.transpose(1, 2).contiguous().view(b, t, c * f)) return x class VggSubsampling(nn.Module): """Trying to follow the setup described here https://arxiv.org/pdf/1910.09799.pdf This paper is not 100% explicit so I am guessing to some extent, and trying to compare with other VGG implementations. Args: idim: Input dimension. odim: Output dimension. """ def __init__(self, idim: int, odim: int) -> None: """Construct a VggSubsampling object. This uses 2 VGG blocks with 2 Conv2d layers each, subsampling its input by a factor of 4 in the time dimensions. Args: idim: Number of features at input, e.g. 40 or 80 for MFCC (will be treated as the image height). odim: Output dimension (number of features), e.g. 256 """ super(VggSubsampling, self).__init__() cur_channels = 1 layers = [] block_dims = [32,64] # The decision to use padding=1 for the 1st convolution, then padding=0 # for the 2nd and for the max-pooling, and ceil_mode=True, was driven by # a back-compatibility concern so that the number of frames at the # output would be equal to: # (((T-1)//2)-1)//2. # We can consider changing this by using padding=1 on the 2nd convolution, # so the num-frames at the output would be T//4. for block_dim in block_dims: layers.append(torch.nn.Conv2d(in_channels=cur_channels, out_channels=block_dim, kernel_size=3, padding=1, stride=1)) layers.append(torch.nn.ReLU()) layers.append(torch.nn.Conv2d(in_channels=block_dim, out_channels=block_dim, kernel_size=3, padding=0, stride=1)) layers.append(torch.nn.MaxPool2d(kernel_size=2, stride=2, padding=0, ceil_mode=True)) cur_channels = block_dim self.layers = nn.Sequential(*layers) self.out = nn.Linear(block_dims[-1] * (((idim - 1) // 2 - 1) // 2), odim) def forward(self, x: Tensor) -> Tensor: """Subsample x. Args: x: Input tensor of dimension (batch_size, input_length, num_features). (#batch, time, idim). Returns: torch.Tensor: Subsampled tensor of dimension (batch_size, input_length', d_model). where input_length' == (((input_length - 1) // 2) - 1) // 2 """ x = x.unsqueeze(1) # (b, c, t, f) x = self.layers(x) b, c, t, f = x.size() x = self.out(x.transpose(1, 2).contiguous().view(b, t, c * f)) return x class PositionalEncoding(nn.Module): """ Positional encoding. Args: d_model: Embedding dimension. dropout: Dropout rate. max_len: Maximum input length. """ def __init__(self, d_model: int, dropout: float = 0.1, max_len: int = 5000) -> None: """Construct an PositionalEncoding object.""" super(PositionalEncoding, self).__init__() self.d_model = d_model self.xscale = math.sqrt(self.d_model) self.dropout = nn.Dropout(p=dropout) self.pe = None self.extend_pe(torch.tensor(0.0).expand(1, max_len)) def extend_pe(self, x: Tensor) -> None: """Reset the positional encodings.""" if self.pe is not None: if self.pe.size(1) >= x.size(1): if self.pe.dtype != x.dtype or self.pe.device != x.device: self.pe = self.pe.to(dtype=x.dtype, device=x.device) return pe = torch.zeros(x.size(1), self.d_model) position = torch.arange(0, x.size(1), dtype=torch.float32).unsqueeze(1) div_term = torch.exp( torch.arange(0, self.d_model, 2, dtype=torch.float32) * -(math.log(10000.0) / self.d_model) ) pe[:, 0::2] = torch.sin(position * div_term) pe[:, 1::2] = torch.cos(position * div_term) pe = pe.unsqueeze(0) self.pe = pe.to(device=x.device, dtype=x.dtype) def forward(self, x: Tensor) -> Tensor: """ Add positional encoding. Args: x: Input tensor of dimention (batch_size, input_length, d_model). Returns: torch.Tensor: Encoded tensor of dimention (batch_size, input_length, d_model). """ self.extend_pe(x) x = x * self.xscale + self.pe[:, : x.size(1)] return self.dropout(x) class Noam(object): """ Implements Noam optimizer. Proposed in "Attention Is All You Need", https://arxiv.org/pdf/1706.03762.pdf Modified from https://github.com/espnet/espnet/blob/master/espnet/nets/pytorch_backend/transformer/optimizer.py Args: params (iterable): iterable of parameters to optimize or dicts defining parameter groups model_size: attention dimension of the transformer model factor: learning rate factor warm_step: warmup steps """ def __init__(self, params, model_size: int = 256, factor: float = 10.0, warm_step: int = 25000, weight_decay=0) -> None: """Construct an Noam object.""" self.optimizer = torch.optim.Adam(params, lr=0, betas=(0.9, 0.98), eps=1e-9, weight_decay=weight_decay) self._step = 0 self.warmup = warm_step self.factor = factor self.model_size = model_size self._rate = 0 @property def param_groups(self): """Return param_groups.""" return self.optimizer.param_groups def step(self): """Update parameters and rate.""" self._step += 1 rate = self.rate() for p in self.optimizer.param_groups: p["lr"] = rate self._rate = rate self.optimizer.step() def rate(self, step=None): """Implement `lrate` above.""" if step is None: step = self._step return ( self.factor * self.model_size ** (-0.5) * min(step ** (-0.5), step * self.warmup ** (-1.5)) ) def zero_grad(self): """Reset gradient.""" self.optimizer.zero_grad() def state_dict(self): """Return state_dict.""" return { "_step": self._step, "warmup": self.warmup, "factor": self.factor, "model_size": self.model_size, "_rate": self._rate, "optimizer": self.optimizer.state_dict(), } def load_state_dict(self, state_dict): """Load state_dict.""" for key, value in state_dict.items(): if key == "optimizer": self.optimizer.load_state_dict(state_dict["optimizer"]) else: setattr(self, key, value) class LabelSmoothingLoss(nn.Module): """ Label-smoothing loss. KL-divergence between q_{smoothed ground truth prob.}(w) and p_{prob. computed by model}(w) is minimized. Modified from https://github.com/espnet/espnet/blob/master/espnet/nets/pytorch_backend/transformer/label_smoothing_loss.py Args: size: the number of class padding_idx: padding_idx: ignored class id smoothing: smoothing rate (0.0 means the conventional CE) normalize_length: normalize loss by sequence length if True criterion: loss function to be smoothed """ def __init__( self, size: int, padding_idx: int = -1, smoothing: float = 0.1, normalize_length: bool = False, criterion: nn.Module = nn.KLDivLoss(reduction="none"), ) -> None: """Construct an LabelSmoothingLoss object.""" super(LabelSmoothingLoss, self).__init__() self.criterion = criterion self.padding_idx = padding_idx assert 0.0 < smoothing <= 1.0 self.confidence = 1.0 - smoothing self.smoothing = smoothing self.size = size self.true_dist = None self.normalize_length = normalize_length def forward(self, x: Tensor, target: Tensor) -> Tensor: """ Compute loss between x and target. Args: x: prediction of dimention (batch_size, input_length, number_of_classes). target: target masked with self.padding_id of dimention (batch_size, input_length). Returns: torch.Tensor: scalar float value """ assert x.size(2) == self.size batch_size = x.size(0) x = x.view(-1, self.size) target = target.view(-1) with torch.no_grad(): true_dist = x.clone() true_dist.fill_(self.smoothing / (self.size - 1)) ignore = target == self.padding_idx # (B,) total = len(target) - ignore.sum().item() target = target.masked_fill(ignore, 0) # avoid -1 index true_dist.scatter_(1, target.unsqueeze(1), self.confidence) kl = self.criterion(torch.log_softmax(x, dim=1), true_dist) denom = total if self.normalize_length else batch_size return kl.masked_fill(ignore.unsqueeze(1), 0).sum() / denom def encoder_padding_mask(max_len: int, supervisions: Optional[Supervisions] = None) -> Optional[Tensor]: """Make mask tensor containing indices of padded part. Args: max_len: maximum length of input features supervisions : Supervison in lhotse format, i.e., batch['supervisions'] Returns: Tensor: Mask tensor of dimension (batch_size, input_length), True denote the masked indices. """ if supervisions is None: return None supervision_segments = torch.stack( (supervisions['sequence_idx'], supervisions['start_frame'], supervisions['num_frames']), 1).to(torch.int32) lengths = [0 for _ in range(int(supervision_segments[:, 0].max().item()) + 1)] for idx in range(supervision_segments.size(0)): # Note: TorchScript doesn't allow to unpack tensors as tuples sequence_idx = supervision_segments[idx, 0].item() start_frame = supervision_segments[idx, 1].item() num_frames = supervision_segments[idx, 2].item() lengths[sequence_idx] = start_frame + num_frames lengths = [((i - 1) // 2 - 1) // 2 for i in lengths] bs = int(len(lengths)) seq_range = torch.arange(0, max_len, dtype=torch.int64) seq_range_expand = seq_range.unsqueeze(0).expand(bs, max_len) # Note: TorchScript doesn't implement Tensor.new() seq_length_expand = torch.tensor( lengths, device=seq_range_expand.device, dtype=seq_range_expand.dtype ).unsqueeze(-1) mask = seq_range_expand >= seq_length_expand return mask def decoder_padding_mask(ys_pad: Tensor, ignore_id: int = -1) -> Tensor: """Generate a length mask for input. The masked position are filled with bool(True), Unmasked positions are filled with bool(False). Args: ys_pad: padded tensor of dimension (batch_size, input_length). ignore_id: the ignored number (the padding number) in ys_pad Returns: Tensor: a mask tensor of dimension (batch_size, input_length). """ ys_mask = ys_pad == ignore_id return ys_mask def get_normal_transcripts(supervision: Supervisions, words: k2.SymbolTable, oov: str = '<UNK>') -> List[List[int]]: """Get normal transcripts (1 input recording has 1 transcript) from lhotse cut format. Achieved by concatenate the transcripts corresponding to the same recording. Args: supervision : Supervison in lhotse format, i.e., batch['supervisions'] words: The word symbol table. oov: Out of vocabulary word. Returns: List[List[int]]: List of concatenated transcripts, length is batch_size """ texts = [[token if token in words else oov for token in text.split(' ')] for text in supervision['text']] texts_ids = [[words[token] for token in text] for text in texts] batch_text = [[] for _ in range(int(supervision['sequence_idx'].max().item()) + 1)] for sequence_idx, text in zip(supervision['sequence_idx'], texts_ids): batch_text[sequence_idx] = batch_text[sequence_idx] + text return batch_text def generate_square_subsequent_mask(sz: int) -> Tensor: """Generate a square mask for the sequence. The masked positions are filled with float('-inf'). Unmasked positions are filled with float(0.0). Args: sz: mask size Returns: Tensor: a square mask of dimension (sz, sz) """ return torch.triu(torch.full((sz, sz), float('-inf')), diagonal=1) def add_sos_eos(ys: List[List[int]], lexicon: k2.Fsa, sos: int, eos: int, ignore_id: int = -1) -> Tuple[Tensor, Tensor]: """Add <sos> and <eos> labels. Args: ys: batch of unpadded target sequences lexicon: Its labels are words, while its aux_labels are phones. sos: index of <sos> eos: index of <eos> ignore_id: index of padding Returns: Tensor: Input of transformer decoder. Padded tensor of dimention (batch_size, max_length). Tensor: Output of transformer decoder. padded tensor of dimention (batch_size, max_length). """ _sos = torch.tensor([sos]) _eos = torch.tensor([eos]) ys = get_hierarchical_targets(ys, lexicon) ys_in = [torch.cat([_sos, y], dim=0) for y in ys] ys_out = [torch.cat([y, _eos], dim=0) for y in ys] return pad_list(ys_in, eos), pad_list(ys_out, ignore_id) def pad_list(ys: List[Tensor], pad_value: float) -> Tensor: """Perform padding for the list of tensors. Args: ys: List of tensors. len(ys) = batch_size. pad_value: Value for padding. Returns: Tensor: Padded tensor (batch_size, max_length, `*`). Examples: >>> x = [torch.ones(4), torch.ones(2), torch.ones(1)] >>> x [tensor([1., 1., 1., 1.]), tensor([1., 1.]), tensor([1.])] >>> pad_list(x, 0) tensor([[1., 1., 1., 1.], [1., 1., 0., 0.], [1., 0., 0., 0.]]) """ n_batch = len(ys) max_len = max(x.size(0) for x in ys) pad = ys[0].new_full((n_batch, max_len, *ys[0].size()[1:]), pad_value) for i in range(n_batch): pad[i, : ys[i].size(0)] = ys[i] return pad def get_hierarchical_targets(ys: List[List[int]], lexicon: k2.Fsa) -> List[Tensor]: """Get hierarchical transcripts (i.e., phone level transcripts) from transcripts (i.e., word level transcripts). Args: ys: Word level transcripts. lexicon: Its labels are words, while its aux_labels are phones. Returns: List[Tensor]: Phone level transcripts. """ if lexicon is None: return ys else: L_inv = lexicon n_batch = len(ys) indices = torch.tensor(range(n_batch)) device = L_inv.device transcripts = k2.create_fsa_vec([k2.linear_fsa(x, device=device) for x in ys]) transcripts_with_self_loops = k2.add_epsilon_self_loops(transcripts) transcripts_lexicon = k2.intersect( L_inv, transcripts_with_self_loops, treat_epsilons_specially=False) # Don't call invert_() above because we want to return phone IDs, # which is the `aux_labels` of transcripts_lexicon transcripts_lexicon = k2.remove_epsilon(transcripts_lexicon) transcripts_lexicon = k2.top_sort(transcripts_lexicon) transcripts_lexicon = k2.shortest_path(transcripts_lexicon, use_double_scores=True) ys = get_texts(transcripts_lexicon, indices) ys = [torch.tensor(y) for y in ys] return ys def test_transformer(): t = Transformer(40, 1281) T = 200 f = torch.rand(31, 40, T) g, _, _ = t(f) assert g.shape == (31, 1281, (((T-1)//2)-1)//2) def main(): test_transformer() if __name__ == '__main__': main()
challenges/3.9.True_Operator/main.py
pradeepsaiu/python-coding-challenges
141
132853
<gh_stars>100-1000 def boolean_true(): return value # Change the varable named value to the correct answer print(boolean_true())
test/regression/daily/ledger_lte.py
WDeepali/blockchaindemo
172
132865
<reponame>WDeepali/blockchaindemo # Copyright IBM Corp. All Rights Reserved. # # SPDX-License-Identifier: Apache-2.0 # import unittest import subprocess tool_directory = '../../tools/LTE/scripts' class perf_goleveldb(unittest.TestCase): def test_FAB_3790_VaryNumParallelTxPerChain(self): ''' In this Performance test, we observe the performance (time to complete a set number of Ledger operations) of the Ledger component, with goleveldb as the state database. We vary the number of parallel transactions per chain and observe the performance. Passing criteria: Underlying LTE test completed successfully with exit code 0 ''' logfile = open("output_VaryNumParallelTxPerChain.log", "w") returncode = subprocess.call( "./runbenchmarks.sh -f parameters_daily_CI.sh varyNumParallelTxPerChain", shell=True, stderr=subprocess.STDOUT, stdout=logfile, cwd=tool_directory) logfile.close() self.assertEqual(returncode, 0, msg="VaryNumParallelTxPerChain " "performance test failed. \nPlease check the logfile " +logfile.name+" for more details.") def test_FAB_3795_VaryNumChains(self): ''' In this Performance test, we observe the performance (time to complete a set number of Ledger operations) of the Ledger component, with goleveldb as the state database. We vary the number of chains (ledgers). Passing criteria: Underlying LTE test completed successfully with exit code 0 ''' logfile = open("output_VaryNumChains.log", "w") returncode = subprocess.call( "./runbenchmarks.sh -f parameters_daily_CI.sh varyNumChains", shell=True, stderr=subprocess.STDOUT, stdout=logfile, cwd=tool_directory) logfile.close() self.assertEqual(returncode, 0, msg="VaryNumChains performance test" " failed. \nPlease check the logfile "+logfile.name+" for more " "details.") def test_FAB_3798_VaryNumParallelTxWithSingleChain(self): ''' In this Performance test, we observe the performance (time to complete a set number of Ledger operations) of the Ledger component, with goleveldb as the state database. We vary the number of parallel transactions on a single chain. Passing criteria: Underlying LTE test completed successfully with exit code 0 ''' logfile = open("output_VaryNumParallelTxWithSingleChain.log", "w") returncode = subprocess.call( "./runbenchmarks.sh -f parameters_daily_CI.sh varyNumParallelTxWithSingleChain", shell=True, stderr=subprocess.STDOUT, stdout=logfile, cwd=tool_directory) logfile.close() self.assertEqual(returncode, 0, msg="VaryNumParallelTxWithSingleChain " "performance test failed. \nPlease check the logfile " +logfile.name+" for more details.") def test_FAB_3799_VaryNumChainsWithNoParallelism(self): ''' In this Performance test, we observe the performance (time to complete a set number of Ledger operations) of the Ledger component, with goleveldb as the state database. We vary the number of chains without any parallelism within a single chain. Passing criteria: Underlying LTE test completed successfully with exit code 0 ''' logfile = open("output_VaryNumChainsWithNoParallelism.log", "w") returncode = subprocess.call( "./runbenchmarks.sh -f parameters_daily_CI.sh varyNumChainsWithNoParallelism", shell=True, stderr=subprocess.STDOUT, stdout=logfile, cwd=tool_directory) logfile.close() self.assertEqual(returncode, 0, msg="varyNumChainsWithNoParallelism " "performance test failed. \nPlease check the logfile " +logfile.name+" for more details.") def test_FAB_3801_VaryKVSize(self): ''' In this Performance test, we observe the performance (time to complete a set number of Ledger operations) of the Ledger component, with goleveldb as the state database. We vary the size of key-value. Passing criteria: Underlying LTE test completed successfully with exit code 0 ''' logfile = open("output_VaryKVSize.log", "w") returncode = subprocess.call( "./runbenchmarks.sh -f parameters_daily_CI.sh varyKVSize", shell=True, stderr=subprocess.STDOUT, stdout=logfile, cwd=tool_directory) logfile.close() self.assertEqual(returncode, 0, msg="varyKVSize performance test" " failed. \nPlease check the logfile "+logfile.name+" for more " "details.") def test_FAB_3802_VaryBatchSize(self): ''' In this Performance test, we observe the performance (time to complete a set number of Ledger operations) of the Ledger component, with goleveldb as the state database. We vary the value of the batch size Passing criteria: Underlying LTE test completed successfully with exit code 0 ''' logfile = open("output_VaryBatchSize.log", "w") returncode = subprocess.call( "./runbenchmarks.sh -f parameters_daily_CI.sh varyBatchSize", shell=True, stderr=subprocess.STDOUT, stdout=logfile, cwd=tool_directory) logfile.close() self.assertEqual(returncode, 0, msg="varyBatchSize performance test" " failed. \nPlease check the logfile "+logfile.name+" for more " "details.") def test_FAB_3800_VaryNumKeysInEachTx(self): ''' In this Performance test, we observe the performance (time to complete a set number of Ledger operations) of the Ledger component, with goleveldb as the state database. We vary the number of keys in each transaction. Passing criteria: Underlying LTE test completed successfully with exit code 0 ''' logfile = open("output_VaryNumKeysInEachTx.log", "w") returncode = subprocess.call( "./runbenchmarks.sh -f parameters_daily_CI.sh varyNumKeysInEachTx", shell=True, stderr=subprocess.STDOUT, stdout=logfile, cwd=tool_directory) logfile.close() self.assertEqual(returncode, 0, msg="varyNumKeysInEachTx performance " "test failed. \nPlease check the logfile "+logfile.name +" for more details.") def test_FAB_3803_VaryNumTxs(self): ''' In this Performance test, we observe the performance (time to complete a set number of Ledger operations) of the Ledger component, with goleveldb as the state database. We vary the number of transactions carried out. Passing criteria: Underlying LTE test completed successfully with exit code 0 ''' logfile = open("output_VaryNumTxs.log", "w") returncode = subprocess.call( "./runbenchmarks.sh -f parameters_daily_CI.sh varyNumTxs", shell=True, stderr=subprocess.STDOUT, stdout=logfile, cwd=tool_directory) logfile.close() self.assertEqual(returncode, 0, msg="varyNumTxs performance test" " failed. \nPlease check the logfile "+logfile.name+" for more " "details.") class perf_couchdb(unittest.TestCase): @unittest.skip("WIP, skipping") def test_FAB_3870_VaryNumParallelTxPerChain(self): ''' In this Performance test, we observe the performance (operations per second) of the Ledger component, with CouchDB as the state database, as we vary the number of parallel transactions per chain. ''' self.assertTrue(True) @unittest.skip("WIP, skipping") def test_FAB_3871_VaryNumChain(self): ''' In this Performance test, we observe the performance (operations per second) of the Ledger component, with CouchDB as the state database, as we vary the number of chains (ledgers). ''' self.assertTrue(True) @unittest.skip("WIP, skipping") def test_FAB_3872_VaryNumParallelTxWithSingleChain(self): ''' In this Performance test, we observe the performance (operations per second) of the Ledger component, with CouchDB as the state database, vary the number of parallel transactions on a single chain. ''' self.assertTrue(True) @unittest.skip("WIP, skipping") def test_FAB_3873_VaryNumChainWithNoParallelism(self): ''' In this Performance test, we observe the performance (operations per second) of the Ledger component, with CouchDB as the state database, as we vary the number of chains without any parallelism. within a single chain. ''' self.assertTrue(True) @unittest.skip("WIP, skipping") def test_FAB_3874_VaryKVSize(self): ''' In this Performance test, we observe the performance (operations per second) of the Ledger component, with CouchDB as the state database, varying the size of key-value. ''' self.assertTrue(True) @unittest.skip("WIP, skipping") def test_FAB_3875_VaryBatchSize(self): ''' In this Performance test, we observe the performance (operations per second) of the Ledger component, with CouchDB as the state database, as we vary the value of the batch size. ''' self.assertTrue(True) @unittest.skip("WIP, skipping") def test_FAB_3876_VaryNumKeysInEachTX(self): ''' In this Performance test, we observe the performance (operations per second) of the Ledger component, with CouchDB as the state database, as we vary the number of keys in each transaction. ''' self.assertTrue(True) @unittest.skip("WIP, skipping") def test_FAB_3877_VaryNumTxs(self): ''' In this Performance test, we observe the performance (operations per second) of the Ledger component, with CouchDB as the state database, as we vary the number of transactions carried out. ''' self.assertTrue(True)
tools/tcam-capture/tcam_capture/FPSCounter.py
lkucalaba/tiscamera
241
132870
<gh_stars>100-1000 # Copyright 2020 The Imaging Source Europe GmbH # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from collections import deque import time import threading import logging log = logging.getLogger(__name__) class TaskThread(threading.Thread): """ Thread that executes a task every N seconds """ def __init__(self, func, pars=None, interval=15.0): """ """ threading.Thread.__init__(self) self._finished = threading.Event() self.interval = interval self.user_func = func self.user_params = pars def set_interval(self, interval): """ Set the number of seconds we sleep between executing our task """ self.interval = interval def shutdown(self): """ Stop this thread """ self._finished.set() def start(self): """ """ self._finished.clear() super().start() def run(self): """ Implicitly called by executing start() executes self.user_func every self.interval until self.shutdown() is called """ while not self._finished.is_set(): self.task() # sleep for interval or until shutdown self._finished.wait(self.interval) def task(self): """ The task done by this thread - override in subclasses """ if self.user_params: self.user_func(self.user_params) else: self.user_func() class FPSCounter(object): """ This class is intended for fps measurements Uses a delta from the frames of the last n seconds to calculate fps Also offers an average fps from start to now """ def __init__(self): """ Arguments: seconds: int continaing the amount of seconds that shall be used for the fps calculation """ self._running = False self._start_time = None self.framecounter = 0 self.frames_in_last_second = 0 self._actual_fps = 0.0 self._delta_queue = deque() self.seconds_max = 5 self._lock = threading.Lock() self._tick_thread = TaskThread(self.__update_values, None, 1.0) def start(self): """ Start fps measurements """ if self._running: raise RuntimeError("Already running") self._start_time = time.time() self.framecounter = 0 self.frames_in_last_second = 0 self._tick_thread._finished.clear() self._tick_thread.start() self._running = True def stop(self): """ """ self._tick_thread.shutdown() if self._running: self._running = False self._tick_thread.join() def tick(self): """ Add frame """ self.framecounter += 1 with self._lock: self.frames_in_last_second += 1 def __update_values(self): """ will be called in a separate thread to keep fps values etc up to date. """ frame_count = 0 seconds = len(self._delta_queue) with self._lock: self._delta_queue.append(self.frames_in_last_second) self.frames_in_last_second = 0 # we only want n entries. get rid of the rest while len(self._delta_queue) > self.seconds_max: self._delta_queue.popleft() seconds = len(self._delta_queue) for i in range(len(self._delta_queue)): frame_count += self._delta_queue[i] if seconds == 0: self._actual_fps = 0.0 else: self._actual_fps = frame_count / seconds def get_fps(self): """ return fps from the last n seconds """ return self._actual_fps def get_avg_fps(self): """ returns float containg the avg fps since start has been called """ if self._start_time == 0: self._start_time = time.time() return 0.0 else: diff = int(time.time() - self._start_time) if diff == 0: return 0.0 actual_fps = self.framecounter / diff return actual_fps
meta_policy_search/optimizers/conjugate_gradient_optimizer.py
Zhiwei-Z/prompzzw
210
132904
from meta_policy_search.utils import logger import numpy as np import tensorflow as tf from collections import OrderedDict from meta_policy_search.optimizers.base import Optimizer class FiniteDifferenceHvp(Optimizer): def __init__(self, base_eps=1e-5, symmetric=True, grad_clip=None): self.base_eps = np.cast['float32'](base_eps) self.symmetric = symmetric self.grad_clip = grad_clip self._target = None self.reg_coeff = None self._constraint_gradient = None self._input_ph_dict = None def build_graph(self, constraint_obj, target, input_val_dict, reg_coeff): """ Sets the objective function and target weights for the optimize function Args: constraint_obj (tf_op) : constraint objective target (Policy) : Policy whose values we are optimizing over inputs (list) : tuple of tf.placeholders for input data which may be subsampled. The first dimension corresponds to the number of data points reg_coeff (float): regularization coefficient """ self._target = target self.reg_coeff = reg_coeff self._input_ph_dict = input_val_dict params = list(target.get_params().values()) constraint_grads = tf.gradients(constraint_obj, xs=params) for idx, (grad, param) in enumerate(zip(constraint_grads, params)): if grad is None: constraint_grads[idx] = tf.zeros_like(param) constraint_gradient = tf.concat([tf.reshape(grad, [-1]) for grad in constraint_grads], axis=0) self._constraint_gradient = constraint_gradient def constraint_gradient(self, input_val_dict): """ Computes the gradient of the constraint objective Args: inputs (list): inputs needed to compute the gradient Returns: (np.ndarray): flattened gradient """ sess = tf.get_default_session() feed_dict = self.create_feed_dict(input_val_dict) constraint_gradient = sess.run(self._constraint_gradient, feed_dict) return constraint_gradient def Hx(self, input_val_dict, x): """ Compute the second derivative of the constraint val in the direction of the vector x Args: inputs (list): inputs needed to compute the gradient of the constraint objective x (np.ndarray): vector indicating the direction on which the Hessian has to be computed Returns: (np.ndarray): second derivative in the direction of x """ assert isinstance(x, np.ndarray) param_vals = self._target.get_param_values().copy() flat_param_vals = _flatten_params(param_vals) eps = self.base_eps params_plus_eps_vals = _unflatten_params(flat_param_vals + eps * x, params_example=param_vals) self._target.set_params(params_plus_eps_vals) constraint_grad_plus_eps = self.constraint_gradient(input_val_dict) self._target.set_params(param_vals) if self.symmetric: params_minus_eps_vals = _unflatten_params(flat_param_vals - eps * x, params_example=param_vals) self._target.set_params(params_minus_eps_vals) constraint_grad_minus_eps = self.constraint_gradient(input_val_dict) self._target.set_params(param_vals) hx = (constraint_grad_plus_eps - constraint_grad_minus_eps)/(2 * eps) else: constraint_grad = self.constraint_gradient(input_val_dict) hx = (constraint_grad_plus_eps - constraint_grad)/eps return hx def build_eval(self, inputs): """ Build the Hessian evaluation function. It let's you evaluate the hessian of the constraint objective in any direction. Args: inputs (list): inputs needed to compute the gradient of the constraint objective Returns: (function): function that evaluates the Hessian of the constraint objective in the input direction """ def evaluate_hessian(x): return self.Hx(inputs, x) + self.reg_coeff * x return evaluate_hessian class ConjugateGradientOptimizer(Optimizer): """ Performs constrained optimization via line search. The search direction is computed using a conjugate gradient algorithm, which gives x = A^{-1}g, where A is a second order approximation of the constraint and g is the gradient of the loss function. Args: cg_iters (int) : The number of conjugate gradients iterations used to calculate A^-1 g reg_coeff (float) : A small value so that A -> A + reg*I subsample_factor (float) : Subsampling factor to reduce samples when using "conjugate gradient. Since the computation time for the descent direction dominates, this can greatly reduce the overall computation time. backtrack_ratio (float) : ratio for decreasing the step size for the line search max_backtracks (int) : maximum number of backtracking iterations for the line search debug_nan (bool) : if set to True, NanGuard will be added to the compilation, and ipdb will be invoked when nan is detected accept_violation (bool) : whether to accept the descent step if it violates the line search condition after exhausting all backtracking budgets hvp_approach (obj) : Hessian vector product approach """ def __init__( self, cg_iters=10, reg_coeff=0, subsample_factor=1., backtrack_ratio=0.8, max_backtracks=15, debug_nan=False, accept_violation=False, hvp_approach=FiniteDifferenceHvp(), ): self._cg_iters = cg_iters self._reg_coeff = reg_coeff self._subsample_factor = subsample_factor self._backtrack_ratio = backtrack_ratio self._max_backtracks = max_backtracks self._target = None self._max_constraint_val = None self._constraint_name = "kl-div" self._debug_nan = debug_nan self._accept_violation = accept_violation self._hvp_approach = hvp_approach self._loss = None self._gradient = None self._constraint_objective = None self._input_ph_dict = None def build_graph(self, loss, target, input_ph_dict, leq_constraint): """ Sets the objective function and target weights for the optimize function Args: loss (tf_op) : minimization objective target (Policy) : Policy whose values we are optimizing over inputs (list) : tuple of tf.placeholders for input data which may be subsampled. The first dimension corresponds to the number of data points extra_inputs (list) : tuple of tf.placeholders for hyperparameters (e.g. learning rate, if annealed) leq_constraint (tuple) : A constraint provided as a tuple (f, epsilon), of the form f(*inputs) <= epsilon. """ assert isinstance(loss, tf.Tensor) assert hasattr(target, 'get_params') assert isinstance(input_ph_dict, dict) constraint_objective, constraint_value = leq_constraint self._target = target self._constraint_objective = constraint_objective self._max_constraint_val = constraint_value self._input_ph_dict = input_ph_dict self._loss = loss # build the graph of the hessian vector product (hvp) self._hvp_approach.build_graph(constraint_objective, target, self._input_ph_dict, self._reg_coeff) # build the graph of the gradients params = list(target.get_params().values()) grads = tf.gradients(loss, xs=params) for idx, (grad, param) in enumerate(zip(grads, params)): if grad is None: grads[idx] = tf.zeros_like(param) gradient = tf.concat([tf.reshape(grad, [-1]) for grad in grads], axis=0) self._gradient = gradient def loss(self, input_val_dict): """ Computes the value of the loss for given inputs Args: inputs (list): inputs needed to compute the loss function extra_inputs (list): additional inputs needed to compute the loss function Returns: (float): value of the loss """ sess = tf.get_default_session() feed_dict = self.create_feed_dict(input_val_dict) loss = sess.run(self._loss, feed_dict=feed_dict) return loss def constraint_val(self, input_val_dict): """ Computes the value of the KL-divergence between pre-update policies for given inputs Args: inputs (list): inputs needed to compute the inner KL extra_inputs (list): additional inputs needed to compute the inner KL Returns: (float): value of the loss """ sess = tf.get_default_session() feed_dict = self.create_feed_dict(input_val_dict) constrain_val = sess.run(self._constraint_objective, feed_dict) return constrain_val def gradient(self, input_val_dict): """ Computes the gradient of the loss function Args: inputs (list): inputs needed to compute the gradient extra_inputs (list): additional inputs needed to compute the loss function Returns: (np.ndarray): flattened gradient """ sess = tf.get_default_session() feed_dict = self.create_feed_dict(input_val_dict) gradient = sess.run(self._gradient, feed_dict) return gradient def optimize(self, input_val_dict): """ Carries out the optimization step Args: inputs (list): inputs for the optimization extra_inputs (list): extra inputs for the optimization subsample_grouped_inputs (None or list): subsample data from each element of the list """ logger.log("Start CG optimization") logger.log("computing loss before") loss_before = self.loss(input_val_dict) logger.log("performing update") logger.log("computing gradient") gradient = self.gradient(input_val_dict) logger.log("gradient computed") logger.log("computing descent direction") Hx = self._hvp_approach.build_eval(input_val_dict) descent_direction = conjugate_gradients(Hx, gradient, cg_iters=self._cg_iters) initial_step_size = np.sqrt(2.0 * self._max_constraint_val * (1. / (descent_direction.dot(Hx(descent_direction)) + 1e-8))) if np.isnan(initial_step_size): logger.log("Initial step size is NaN! Rejecting the step!") return initial_descent_step = initial_step_size * descent_direction logger.log("descent direction computed") prev_params = self._target.get_param_values() prev_params_values = _flatten_params(prev_params) loss, constraint_val, n_iter, violated = 0, 0, 0, False for n_iter, ratio in enumerate(self._backtrack_ratio ** np.arange(self._max_backtracks)): cur_step = ratio * initial_descent_step cur_params_values = prev_params_values - cur_step cur_params = _unflatten_params(cur_params_values, params_example=prev_params) self._target.set_params(cur_params) loss, constraint_val = self.loss(input_val_dict), self.constraint_val(input_val_dict) if loss < loss_before and constraint_val <= self._max_constraint_val: break """ ------------------- Logging Stuff -------------------------- """ if np.isnan(loss): violated = True logger.log("Line search violated because loss is NaN") if np.isnan(constraint_val): violated = True logger.log("Line search violated because constraint %s is NaN" % self._constraint_name) if loss >= loss_before: violated = True logger.log("Line search violated because loss not improving") if constraint_val >= self._max_constraint_val: violated = True logger.log("Line search violated because constraint %s is violated" % self._constraint_name) if violated and not self._accept_violation: logger.log("Line search condition violated. Rejecting the step!") self._target.set_params(prev_params) logger.log("backtrack iters: %d" % n_iter) logger.log("computing loss after") logger.log("optimization finished") def _unflatten_params(flat_params, params_example): unflat_params = [] idx = 0 for key, param in params_example.items(): size_param = np.prod(param.shape) reshaped_param = np.reshape(flat_params[idx:idx+size_param], newshape=param.shape) unflat_params.append((key, reshaped_param)) idx += size_param return OrderedDict(unflat_params) def _flatten_params(params): return np.concatenate([param.reshape(-1) for param in params.values()]) def conjugate_gradients(f_Ax, b, cg_iters=10, verbose=False, residual_tol=1e-10): """ Demmel p 312 """ p = b.copy() r = b.copy() x = np.zeros_like(b, dtype=np.float32) rdotr = r.dot(r) fmtstr = "%10i %10.3g %10.3g" titlestr = "%10s %10s %10s" if verbose: print(titlestr % ("iter", "residual norm", "soln norm")) for i in range(cg_iters): if verbose: print(fmtstr % (i, rdotr, np.linalg.norm(x))) z = f_Ax(p) v = rdotr / p.dot(z) x += v * p r -= v * z newrdotr = r.dot(r) mu = newrdotr / rdotr p = r + mu * p rdotr = newrdotr if rdotr < residual_tol: break if verbose: print(fmtstr % (i + 1, rdotr, np.linalg.norm(x))) return x
build/android/pylib/perf/thermal_throttle.py
nagineni/chromium-crosswalk
231
132911
<reponame>nagineni/chromium-crosswalk # Copyright 2013 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. import logging class OmapThrottlingDetector(object): """Class to detect and track thermal throttling on an OMAP 4.""" OMAP_TEMP_FILE = ('/sys/devices/platform/omap/omap_temp_sensor.0/' 'temperature') @staticmethod def IsSupported(adb): return adb.FileExistsOnDevice(OmapThrottlingDetector.OMAP_TEMP_FILE) def __init__(self, adb): self._adb = adb def BecameThrottled(self, log_line): return 'omap_thermal_throttle' in log_line def BecameUnthrottled(self, log_line): return 'omap_thermal_unthrottle' in log_line def GetThrottlingTemperature(self, log_line): if 'throttle_delayed_work_fn' in log_line: return float([s for s in log_line.split() if s.isdigit()][0]) / 1000.0 def GetCurrentTemperature(self): tempdata = self._adb.GetFileContents(OmapThrottlingDetector.OMAP_TEMP_FILE) return float(tempdata[0]) / 1000.0 class ExynosThrottlingDetector(object): """Class to detect and track thermal throttling on an Exynos 5.""" @staticmethod def IsSupported(adb): return adb.FileExistsOnDevice('/sys/bus/exynos5-core') def __init__(self, adb): pass def BecameThrottled(self, log_line): return 'exynos_tmu: Throttling interrupt' in log_line def BecameUnthrottled(self, log_line): return 'exynos_thermal_unthrottle: not throttling' in log_line def GetThrottlingTemperature(self, log_line): return None def GetCurrentTemperature(self): return None class ThermalThrottle(object): """Class to detect and track thermal throttling. Usage: Wait for IsThrottled() to be False before running test After running test call HasBeenThrottled() to find out if the test run was affected by thermal throttling. """ def __init__(self, adb): self._adb = adb self._throttled = False self._detector = None if OmapThrottlingDetector.IsSupported(adb): self._detector = OmapThrottlingDetector(adb) elif ExynosThrottlingDetector.IsSupported(adb): self._detector = ExynosThrottlingDetector(adb) def HasBeenThrottled(self): """True if there has been any throttling since the last call to HasBeenThrottled or IsThrottled. """ return self._ReadLog() def IsThrottled(self): """True if currently throttled.""" self._ReadLog() return self._throttled def _ReadLog(self): if not self._detector: return False has_been_throttled = False serial_number = self._adb.Adb().GetSerialNumber() log = self._adb.RunShellCommand('dmesg -c') degree_symbol = unichr(0x00B0) for line in log: if self._detector.BecameThrottled(line): if not self._throttled: logging.warning('>>> Device %s thermally throttled', serial_number) self._throttled = True has_been_throttled = True elif self._detector.BecameUnthrottled(line): if self._throttled: logging.warning('>>> Device %s thermally unthrottled', serial_number) self._throttled = False has_been_throttled = True temperature = self._detector.GetThrottlingTemperature(line) if temperature is not None: logging.info(u'Device %s thermally throttled at %3.1f%sC', serial_number, temperature, degree_symbol) if logging.getLogger().isEnabledFor(logging.DEBUG): # Print current temperature of CPU SoC. temperature = self._detector.GetCurrentTemperature() if temperature is not None: logging.debug(u'Current SoC temperature of %s = %3.1f%sC', serial_number, temperature, degree_symbol) # Print temperature of battery, to give a system temperature dumpsys_log = self._adb.RunShellCommand('dumpsys battery') for line in dumpsys_log: if 'temperature' in line: btemp = float([s for s in line.split() if s.isdigit()][0]) / 10.0 logging.debug(u'Current battery temperature of %s = %3.1f%sC', serial_number, btemp, degree_symbol) return has_been_throttled
rollbar/examples/starlette/app_logger.py
jackton1/pyrollbar
177
132928
<gh_stars>100-1000 #!/usr/bin/env python # This example uses Uvicorn package that must be installed. However, it can be # replaced with any other ASGI-compliant server. # # NOTE: Python 3.6 requires aiocontextvars package to be installed. # Optional asynchronous reporting requires HTTPX package to be installed. # # Run: python app_logger.py import logging import rollbar import uvicorn from rollbar.contrib.starlette import LoggerMiddleware from rollbar.logger import RollbarHandler from starlette.applications import Starlette from starlette.responses import PlainTextResponse # Initialize Rollbar SDK with your server-side ACCESS_TOKEN rollbar.init( 'ACCESS_TOKEN', environment='staging', handler='async', # For asynchronous reporting use: default, async or httpx ) # Set root logger to log DEBUG and above logger = logging.getLogger(__name__) logger.setLevel(logging.DEBUG) # Report ERROR and above to Rollbar rollbar_handler = RollbarHandler() rollbar_handler.setLevel(logging.ERROR) # Attach Rollbar handler to the root logger logger.addHandler(rollbar_handler) # Integrate Rollbar with Starlette application app = Starlette() app.add_middleware(LoggerMiddleware) # should be added as the last middleware # GET query params will be sent to Rollbar and available in the UI # $ curl http://localhost:8888?param1=hello&param2=world @app.route('/') async def root(request): # Report log entries logger.critical('Critical message sent to Rollbar') logger.error('Error message sent to Rollbar') # Ignore log entries logger.warning('Warning message is not sent to Rollbar') logger.info('Info message is not sent to Rollbar') logger.debug('Debug message is not sent to Rollbar') return PlainTextResponse('hello world') if __name__ == '__main__': uvicorn.run(app, host='localhost', port=8888)
tools/reserialize.py
amaajemyfren/data
435
132937
import argparse import logging import json import sys sys.path.insert(0, '.') from tools.constants import JSON_FORMAT_KWARGS from tools.utils import get_json_files def reserialize(file_): """Reformat json file""" with open(file_) as fp: try: data = json.load(fp) except ValueError: logging.error('Json syntax error in file {}'.format(file_)) raise with open(file_, 'w') as fp: json.dump(data, fp, **JSON_FORMAT_KWARGS) fp.write("\n") def main(): """Convert json file(s) to the project format standards""" logging.basicConfig(level=logging.WARNING) parser = argparse.ArgumentParser() parser.add_argument("path", help="path to file(s) to reserialize") parser.add_argument("-a", "--all", action="store_true", help="reserialize all JSON files under path") args = parser.parse_args() if args.all: category_paths, video_paths = get_json_files(args.path) paths = category_paths + video_paths for path in paths: reserialize(path) else: reserialize(args.path) if __name__ == '__main__': main()
lib/python/treadmill/zkadmin.py
vrautela/treadmill
133
132938
"""Zookeeper admin interface. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function from __future__ import unicode_literals import socket import logging _LOGGER = logging.getLogger(__name__) def netcat(hostname, port, command): """Send 4letter netcat to Zookeeper control port. """ sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) sock.connect((hostname, port)) sock.sendall(command) sock.shutdown(socket.SHUT_WR) data = [] while True: chunk = sock.recv(1024) if not chunk: break data.append(chunk) sock.close() return b''.join(data).decode() # pylint does not like "ok" as a function name. # pylint: disable=C0103 def ok(hostname, port): """Send ruok command to Zookeeper instance. """ try: return netcat(hostname, port, b'ruok\n') == 'imok' except socket.error: return False def stat(hostname, port): """Send stat command to Zookeeper instance. """ return netcat(hostname, port, b'stat\n')
tests/unit/interpreters/test_py_spec.py
snsnlou/tox
2,811
132973
<filename>tests/unit/interpreters/test_py_spec.py<gh_stars>1000+ from tox.interpreters.py_spec import PythonSpec def test_py_3_10(): spec = PythonSpec.from_name("python3.10") assert (spec.major, spec.minor) == (3, 10) def test_debug_python(): spec = PythonSpec.from_name("python3.10-dbg") assert (spec.major, spec.minor) == (None, None) def test_parse_architecture(): spec = PythonSpec.from_name("python3.10-32") assert (spec.major, spec.minor, spec.architecture) == (3, 10, 32)
gtpython/sketch_constructed.py
satta/genometools
202
132989
#!/usr/bin/python # -*- coding: utf-8 -*- from gt.core import * from gt.extended import * from gt.annotationsketch import * from gt.annotationsketch.custom_track import CustomTrack from gt.core.gtrange import Range import sys if __name__ == "__main__": if len(sys.argv) != 3: sys.stderr.write("Usage: " + (sys.argv)[0] + " style_file PNG_file\n") sys.exit(1) seqid = "chromosome_21" nodes = [] # construct a gene on the forward strand with two exons gene = FeatureNode.create_new(seqid, "gene", 100, 900, "+") exon = FeatureNode.create_new(seqid, "exon", 100, 200, "+") gene.add_child(exon) intron = FeatureNode.create_new(seqid, "intron", 201, 799, "+") gene.add_child(intron) exon = FeatureNode.create_new(seqid, "exon", 800, 900, "+") gene.add_child(exon) # construct a single-exon gene on the reverse strand # (within the intron of the forward strand gene) reverse_gene = FeatureNode.create_new(seqid, "gene", 400, 600, "-") reverse_exon = FeatureNode.create_new(seqid, "exon", 400, 600, "-") reverse_gene.add_child(reverse_exon) pngfile = (sys.argv)[2] style = Style() style.load_file((sys.argv)[1]) diagram = Diagram.from_array([gene, reverse_gene], Range(1, 1000), style) layout = Layout(diagram, 600, style) height = layout.get_height() canvas = CanvasCairoFile(style, 600, height) layout.sketch(canvas) canvas.to_file(pngfile)
gcp_variant_transforms/transforms/limit_write_test.py
tsa87/gcp-variant-transforms
113
133010
<reponame>tsa87/gcp-variant-transforms # Copyright 2018 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for limit_write module.""" import unittest from apache_beam.testing.test_pipeline import TestPipeline from apache_beam.testing.util import assert_that from apache_beam.testing.util import equal_to from apache_beam.transforms import Create from gcp_variant_transforms.beam_io import vcfio from gcp_variant_transforms.transforms import limit_write class LimitWriteTest(unittest.TestCase): """Test cases for the ``LimitWrite`` PTransform.""" def _get_sample_variants(self): variant1 = vcfio.Variant( reference_name='chr19', start=11, end=12, reference_bases='C') variant2 = vcfio.Variant( reference_name='20', start=123, end=125, reference_bases='CT') variant3 = vcfio.Variant( reference_name='20', start=None, end=None, reference_bases=None) variant4 = vcfio.Variant( reference_name='20', start=123, end=125, reference_bases='CT') return [variant1, variant2, variant3, variant4] def test_limit_write_default_shard_limit(self): variants = self._get_sample_variants() input_pcoll = Create(variants) pipeline = TestPipeline() output_pcoll = ( pipeline | input_pcoll | 'LimitWrite' >> limit_write.LimitWrite(4500)) assert_that(output_pcoll, equal_to(variants)) pipeline.run() def test_limit_write_shard_limit_4(self): variants = self._get_sample_variants() input_pcoll = Create(variants) pipeline = TestPipeline() output_pcoll = ( pipeline | input_pcoll | 'LimitWrite' >> limit_write.LimitWrite(4)) assert_that(output_pcoll, equal_to(variants)) pipeline.run() def test_limit_write_shard_limit_1(self): variants = self._get_sample_variants() input_pcoll = Create(variants) pipeline = TestPipeline() output_pcoll = ( pipeline | input_pcoll | 'LimitWrite' >> limit_write.LimitWrite(1)) assert_that(output_pcoll, equal_to(variants)) pipeline.run()
TM1py/Services/ViewService.py
adscheevel/tm1py
113
133057
<reponame>adscheevel/tm1py # -*- coding: utf-8 -*- import collections from typing import List, Tuple, Union from requests import Response from TM1py.Exceptions.Exceptions import TM1pyRestException from TM1py.Objects import View from TM1py.Objects.MDXView import MDXView from TM1py.Objects.NativeView import NativeView from TM1py.Services.ObjectService import ObjectService from TM1py.Services.RestService import RestService from TM1py.Utils import format_url class ViewService(ObjectService): """ Service to handle Object Updates for cube views (NativeViews and MDXViews) """ def __init__(self, rest: RestService): super().__init__(rest) def create(self, view: Union[MDXView, NativeView], private: bool = False, **kwargs) -> Response: """ create a new view on TM1 Server :param view: instance of subclass of TM1py.View (TM1py.NativeView or TM1py.MDXView) :param private: boolean :return: Response """ view_type = "PrivateViews" if private else "Views" url = format_url("/api/v1/Cubes('{}')/{}", view.cube, view_type) return self._rest.POST(url, view.body, **kwargs) def exists(self, cube_name: str, view_name: str, private: bool = None, **kwargs): """ Checks if view exists as private, public or both :param cube_name: string, name of the cube :param view_name: string, name of the view :param private: boolean, if None: check for private and public :return boolean tuple """ url_template = "/api/v1/Cubes('{}')/{}('{}')" if private is not None: url = format_url(url_template, cube_name, "PrivateViews" if private else "Views", view_name) return self._exists(url, **kwargs) view_types = collections.OrderedDict() view_types['PrivateViews'] = False view_types['Views'] = False for view_type in view_types: try: url = format_url(url_template, cube_name, view_type, view_name) self._rest.GET(url, **kwargs) view_types[view_type] = True except TM1pyRestException as e: if e.status_code != 404: raise e return tuple(view_types.values()) def get(self, cube_name: str, view_name: str, private: bool = False, **kwargs) -> View: view_type = "PrivateViews" if private else "Views" url = format_url("/api/v1/Cubes('{}')/{}('{}')?$expand=*", cube_name, view_type, view_name) response = self._rest.GET(url, **kwargs) view_as_dict = response.json() if "MDX" in view_as_dict: return MDXView(cube_name=cube_name, view_name=view_name, MDX=view_as_dict["MDX"]) else: return self.get_native_view(cube_name=cube_name, view_name=view_name, private=private) def get_native_view(self, cube_name: str, view_name: str, private=False, **kwargs) -> NativeView: """ Get a NativeView from TM1 Server :param cube_name: string, name of the cube :param view_name: string, name of the native view :param private: boolean :return: instance of TM1py.NativeView """ view_type = "PrivateViews" if private else "Views" url = format_url( "/api/v1/Cubes('{}')/{}('{}')?$expand=" "tm1.NativeView/Rows/Subset($expand=Hierarchy($select=Name;" "$expand=Dimension($select=Name)),Elements($select=Name);" "$select=Expression,UniqueName,Name, Alias), " "tm1.NativeView/Columns/Subset($expand=Hierarchy($select=Name;" "$expand=Dimension($select=Name)),Elements($select=Name);" "$select=Expression,UniqueName,Name,Alias), " "tm1.NativeView/Titles/Subset($expand=Hierarchy($select=Name;" "$expand=Dimension($select=Name)),Elements($select=Name);" "$select=Expression,UniqueName,Name,Alias), " "tm1.NativeView/Titles/Selected($select=Name)", cube_name, view_type, view_name) response = self._rest.GET(url, **kwargs) native_view = NativeView.from_json(response.text, cube_name) return native_view def get_mdx_view(self, cube_name: str, view_name: str, private: bool = False, **kwargs) -> MDXView: """ Get an MDXView from TM1 Server :param cube_name: String, name of the cube :param view_name: String, name of the MDX view :param private: boolean :return: instance of TM1py.MDXView """ view_type = 'PrivateViews' if private else 'Views' url = format_url("/api/v1/Cubes('{}')/{}('{}')?$expand=*", cube_name, view_type, view_name) response = self._rest.GET(url, **kwargs) mdx_view = MDXView.from_json(view_as_json=response.text) return mdx_view def get_all(self, cube_name: str, **kwargs) -> Tuple[List[View], List[View]]: """ Get all public and private views from cube. :param cube_name: String, name of the cube. :return: 2 Lists of TM1py.View instances: private views, public views """ private_views, public_views = [], [] for view_type in ('PrivateViews', 'Views'): url = format_url( "/api/v1/Cubes('{}')/{}?$expand=" "tm1.NativeView/Rows/Subset($expand=Hierarchy($select=Name;" "$expand=Dimension($select=Name)),Elements($select=Name);" "$select=Expression,UniqueName,Name, Alias), " "tm1.NativeView/Columns/Subset($expand=Hierarchy($select=Name;" "$expand=Dimension($select=Name)),Elements($select=Name);" "$select=Expression,UniqueName,Name,Alias), " "tm1.NativeView/Titles/Subset($expand=Hierarchy($select=Name;" "$expand=Dimension($select=Name)),Elements($select=Name);" "$select=Expression,UniqueName,Name,Alias), " "tm1.NativeView/Titles/Selected($select=Name)", cube_name, view_type) response = self._rest.GET(url, **kwargs) response_as_list = response.json()['value'] for view_as_dict in response_as_list: if view_as_dict['@odata.type'] == '#ibm.tm1.api.v1.MDXView': view = MDXView.from_dict(view_as_dict, cube_name) else: view = NativeView.from_dict(view_as_dict, cube_name) if view_type == "PrivateViews": private_views.append(view) else: public_views.append(view) return private_views, public_views def get_all_names(self, cube_name: str, **kwargs) -> Tuple[List[str], List[str]]: """ :param cube_name: :return: """ private_views, public_views = [], [] for view_type in ('PrivateViews', 'Views'): url = format_url("/api/v1/Cubes('{}')/{}?$select=Name", cube_name, view_type) response = self._rest.GET(url, **kwargs) response_as_list = response.json()['value'] for view in response_as_list: if view_type == "PrivateViews": private_views.append(view['Name']) else: public_views.append(view['Name']) return private_views, public_views def update(self, view: Union[MDXView, NativeView], private: bool = False, **kwargs) -> Response: """ Update an existing view :param view: instance of TM1py.NativeView or TM1py.MDXView :param private: boolean :return: response """ view_type = 'PrivateViews' if private else 'Views' url = format_url("/api/v1/Cubes('{}')/{}('{}')", view.cube, view_type, view.name) response = self._rest.PATCH(url, view.body, **kwargs) return response def update_or_create(self, view: Union[MDXView, NativeView], private: bool = False, **kwargs) -> Response: """ update if exists, else create :param view: :param private: :param kwargs: :return: """ if self.exists(view.cube, view.name, private=private, **kwargs): return self.update(view, private=private, **kwargs) return self.create(view, private=private, **kwargs) def delete(self, cube_name: str, view_name: str, private: bool = False, **kwargs) -> Response: """ Delete an existing view (MDXView or NativeView) on the TM1 Server :param cube_name: String, name of the cube :param view_name: String, name of the view :param private: Boolean :return: String, the response """ view_type = 'PrivateViews' if private else 'Views' url = format_url("/api/v1/Cubes('{}')/{}('{}')", cube_name, view_type, view_name) response = self._rest.DELETE(url, **kwargs) return response
datasets/blog_authorship_corpus/blog_authorship_corpus.py
WojciechKusa/datasets
10,608
133062
<gh_stars>1000+ import glob import os import datasets logger = datasets.logging.get_logger(__name__) _CITATION = """\ @inproceedings{schler2006effects, title={Effects of age and gender on blogging.}, author={Schler, Jonathan and Koppel, Moshe and Argamon, Shlomo and Pennebaker, <NAME>}, booktitle={AAAI spring symposium: Computational approaches to analyzing weblogs}, volume={6}, pages={199--205}, year={2006} } """ _DESCRIPTION = """\ The Blog Authorship Corpus consists of the collected posts of 19,320 bloggers gathered from blogger.com in August 2004. The corpus incorporates a total of 681,288 posts and over 140 million words - or approximately 35 posts and 7250 words per person. Each blog is presented as a separate file, the name of which indicates a blogger id# and the blogger’s self-provided gender, age, industry and astrological sign. (All are labeled for gender and age but for many, industry and/or sign is marked as unknown.) All bloggers included in the corpus fall into one of three age groups: - 8240 "10s" blogs (ages 13-17), - 8086 "20s" blogs (ages 23-27), - 2994 "30s" blogs (ages 33-47). For each age group there are an equal number of male and female bloggers. Each blog in the corpus includes at least 200 occurrences of common English words. All formatting has been stripped with two exceptions. Individual posts within a single blogger are separated by the date of the following post and links within a post are denoted by the label urllink. The corpus may be freely used for non-commercial research purposes. """ _URL = "https://lingcog.blogspot.com/p/datasets.html" _DATA_URL = "https://drive.google.com/u/0/uc?id=1cGy4RNDV87ZHEXbiozABr9gsSrZpPaPz&export=download" class BlogAuthorshipCorpus(datasets.GeneratorBasedBuilder): """TODO(BlogAuthorship): Short description of my dataset.""" VERSION = datasets.Version("1.0.0") BUILDER_CONFIGS = [ datasets.BuilderConfig( name="blog_authorship_corpus", version=VERSION, description="word level dataset. No processing is needed other than replacing newlines with <eos> tokens.", ), ] def _info(self): return datasets.DatasetInfo( # This is the description that will appear on the datasets page. description=_DESCRIPTION, # datasets.features.FeatureConnectors features=datasets.Features( { "text": datasets.Value("string"), "date": datasets.Value("string"), "gender": datasets.Value("string"), "age": datasets.Value("int32"), "horoscope": datasets.Value("string"), "job": datasets.Value("string"), } ), # If there's a common (input, target) tuple from the features, # specify them here. They'll be used if as_supervised=True in # builder.as_dataset. supervised_keys=None, # Homepage of the dataset for documentation homepage=_URL, citation=_CITATION, ) def _split_generators(self, dl_manager): """Returns SplitGenerators.""" data = dl_manager.download_and_extract(_DATA_URL) data_dir = os.path.join(data, "blogs") files = sorted(glob.glob(os.path.join(data_dir, "*.xml"))) train_files = [] validation_files = [] for i, file_path in enumerate(files): # 95% / 5% (train / val) split if i % 20 == 0: validation_files.append(file_path) else: train_files.append(file_path) return [ datasets.SplitGenerator( name=datasets.Split.TRAIN, gen_kwargs={"files": train_files, "split": "train"}, ), datasets.SplitGenerator( name=datasets.Split.VALIDATION, gen_kwargs={"files": validation_files, "split": "validation"}, ), ] def _generate_examples(self, files, split): def parse_date(line): # parse line to date return line.strip().split("<date>")[-1].split("</date>")[0] key = 0 for file_path in files: file_name = os.path.basename(file_path) logger.info("generating examples from = %s", file_path) file_id, gender, age, job, horoscope = tuple(file_name.split(".")[:-1]) # TODO: yield also file_id? # Note: import xml.etree.ElementTree as etree does not work. File cannot be parsed # use open instead with open(file_path, encoding="latin_1") as f: date = "" for line in f: line = line.strip() if "<date>" in line: date = parse_date(line) elif line != "" and not line.startswith("<"): if date == "": logger.warning(f"Date missing for {line} in {file_name}") assert date is not None, f"Date is missing before {line}" yield key, { "text": line, "date": date, "gender": gender, "age": int(age), "job": job, "horoscope": horoscope, } key += 1 else: continue
docs/python/_includes/example_client.py
yeosujin/azure-sdk-korean
302
133102
"""Example client using some of the most common API patterns """ import models import azure.core.pipeline.transport as transports class Thing(object): """A simple model type representing a Thing. :ivar name: The name of the thing. :vartype name: str :ivar size: The size of the thing. :vartype size: int """ def __init__(self, name, size): # type: (str, number) -> None """Create a new Thing :param name: The name of the thing :type name: str :param size: The size of the thing :type size: int """ # Please note that we are using attributes rather than properties. self.name = name self.size = size @classmethod def from_response(self, response): # type: (azure.core.pipeline.HttpResponse) -> Thing """Factory method to, given a response, construct a ~Thing """ return Thing(**response.context['deserialized_data']) def __repr__(self): # For simple model types, we can just dump our __dict__ and # truncate the output at 1024 characters. return json.dumps(self.__dict__)[:1024] class ExampleClient(object): def __init__(self, endpoint, credential, **kwargs): # type: (str, azure.core.credential.TokenCredential, **Any) -> None """Create a new example client instance :param endpoint: Endpoint to connect to. :type endpoint str: :param credential: Credentials to use when connecting to the service. :type credential: ~azure.core.credentials.TokenCredential :keyword apiversion: API version to use when talking to the service. Default is '2020-12-31' :type apiversion: str :keyword transport: HttpTransport to use. Default is ~transports.RequestsHttpTransport. :type transport: ~transports.HttpTransport """ self._api_version = kwargs.pop('api_version', '2020-12-31') transport = kwargs.pop('transport', None) or transports.RequestsTransport(**kwargs) # continue to build up your client... self._pipeline = [ ..., # List of policies for this specific client transport ] @classmethod def from_connection_string(cls, connection_string, **kwargs): # type: (str, **Any) -> None """Optional factory method if the service supports connection strings :param connection_string: Connection string containing endpoint and credentials :type connection_string: str :returns: The newly created client. :rtype: ~ExampleClient """ endpoint, credential = _parse(connection_string) return cls(endpoint, credential, **kwargs) def get_thing(self, name, **kwargs): # type: (str, **Any) -> Thing """Get the Thing with name `name`. :param name: The name of the ~Thing to get :type name: str :rtype: ~Thing """ model_factory = kwargs.pop('cls', Thing.from_response) request = self._build_get_thing_request(name) # Pass along all policy parameters when making the request response = self._pipeline.send(request, **kwargs) return model_factory(response) def list_things(self, **kwargs): # type: (**Any) -> azure.core.ItemPaged[Thing] """List all things. :rtype: ~azure.core.ItemPaged[~Thing] """ ... return azure.core.paging.ItemPaged(...) def begin_restart_thing(self, name, **kwargs) -> azure.core.polling.LROPoller[bool]: # type: (str, **Any) -> azure.core.polling.LROPoller[bool] """Restart the thing :param name: The name of the thing to restart :type name: str """ model = kwargs.pop('cls', dict) request = self._build_begin_restart_thing(name) # Pass along all policy parameters when making the request response = self._pipeline.send(request, **kwargs) # TODO: show how to construct the poller instance return azure.core.polling.LROPoller(...)
vedadet/engines/infer_engine.py
jie311/vedadet
424
133105
import torch from vedacore.misc import registry from vedadet.bridge import build_converter, build_meshgrid from vedadet.misc.bbox import bbox2result, multiclass_nms from .base_engine import BaseEngine @registry.register_module('engine') class InferEngine(BaseEngine): def __init__(self, model, meshgrid, converter, num_classes, use_sigmoid, test_cfg): super().__init__(model) self.meshgrid = build_meshgrid(meshgrid) self.converter = build_converter(converter) if use_sigmoid: self.cls_out_channels = num_classes else: self.cls_out_channels = num_classes + 1 self.test_cfg = test_cfg def extract_feats(self, img): feats = self.model(img, train=False) return feats def _get_raw_dets(self, img, img_metas): """ Args: img(torch.Tensor): shape N*3*H*W, N is batch size img_metas(list): len(img_metas) = N Returns: dets(list): len(dets) is the batch size, len(dets[ii]) = #classes, dets[ii][jj] is an np.array whose shape is N*5 """ feats = self.extract_feats(img) featmap_sizes = [feat.shape[-2:] for feat in feats[0]] dtype = feats[0][0].dtype device = feats[0][0].device anchor_mesh = self.meshgrid.gen_anchor_mesh(featmap_sizes, img_metas, dtype, device) # bboxes, scores, score_factor dets = self.converter.get_bboxes(anchor_mesh, img_metas, *feats) return dets def _simple_infer(self, img, img_metas): """ Args: img(torch.Tensor): shape N*3*H*W, N is batch size img_metas(list): len(img_metas) = N Returns: dets(list): len(dets) is the batch size, len(dets[ii]) = #classes, dets[ii][jj] is an np.array whose shape is N*5 """ dets = self._get_raw_dets(img, img_metas) batch_size = len(dets) result_list = [] for ii in range(batch_size): bboxes, scores, centerness = dets[ii] det_bboxes, det_labels = multiclass_nms( bboxes, scores, self.test_cfg.score_thr, self.test_cfg.nms, self.test_cfg.max_per_img, score_factors=centerness) bbox_result = bbox2result(det_bboxes, det_labels, self.cls_out_channels) result_list.append(bbox_result) return result_list def _aug_infer(self, img_list, img_metas_list): assert len(img_list) == len(img_metas_list) dets = [] ntransforms = len(img_list) for idx in range(len(img_list)): img = img_list[idx] img_metas = img_metas_list[idx] tdets = self._get_raw_dets(img, img_metas) dets.append(tdets) batch_size = len(dets[0]) nclasses = len(dets[0][0]) merged_dets = [] for ii in range(batch_size): single_image = [] for kk in range(nclasses): single_class = [] for jj in range(ntransforms): single_class.append(dets[jj][ii][kk]) single_image.append(torch.cat(single_class, axis=0)) merged_dets.append(single_image) result_list = [] for ii in range(batch_size): bboxes, scores, centerness = merged_dets[ii] det_bboxes, det_labels = multiclass_nms( bboxes, scores, self.test_cfg.score_thr, self.test_cfg.nms, self.test_cfg.max_per_img, score_factors=centerness) bbox_result = bbox2result(det_bboxes, det_labels, self.cls_out_channels) result_list.append(bbox_result) return result_list def infer(self, img, img_metas): if len(img) == 1: return self._simple_infer(img[0], img_metas[0]) else: return self._aug_infer(img, img_metas)
tern/formats/spdx/spdx_common.py
KerinPithawala/tern
325
133115
# -*- coding: utf-8 -*- # # Copyright (c) 2021 VMware, Inc. All Rights Reserved. # SPDX-License-Identifier: BSD-2-Clause """ Common functions that are useful for both JSON and Tag-value document creation """ import datetime import hashlib import logging import re import uuid from tern.utils import constants from tern.formats.spdx.spdxtagvalue import formats as spdx_formats # global logger logger = logging.getLogger(constants.logger_name) ################### # General Helpers # ################### def get_uuid(): """ Return a UUID string""" return str(uuid.uuid4()) def get_timestamp(): """Return a timestamp""" return datetime.datetime.utcnow().strftime("%Y-%m-%dT%H:%M:%SZ") def get_string_id(string): """ Return a unique identifier for the given string""" return hashlib.sha256(string.encode('utf-8')).hexdigest()[-7:] def get_license_ref(license_string): """ For SPDX tag-value format, return a LicenseRef string """ return 'LicenseRef-' + get_string_id(license_string) ######################## # Common Image Helpers # ######################## def get_image_spdxref(image_obj): '''Given the image object, return an SPDX reference ID''' # here we return the image name, tag and id return 'SPDXRef-{}'.format(image_obj.get_human_readable_id()) ######################## # Common Layer Helpers # ######################## def get_file_licenses(filedata): '''Return a unique list of file licenses''' return list(set(filedata.licenses)) def get_layer_licenses(layer_obj): '''Return a list of unique licenses from the files analyzed in the layer object. It is assumed that the files were analyzed and there should be some license expressions. If there are not, an empty list is returned''' licenses = set() for filedata in layer_obj.files: # we will use the SPDX license expressions here as they will be # valid SPDX license identifiers if filedata.licenses: for lic in get_file_licenses(filedata): licenses.add(lic) return list(licenses) def get_layer_spdxref(layer_obj): '''Given the layer object, return an SPDX reference ID''' # here we return the shortened diff_id of the layer return 'SPDXRef-{}'.format(layer_obj.diff_id[:10]) def get_layer_spdxref_snapshot(timestamp): """Given the layer object created at container build time, return an SPDX reference ID. For this case, a layer's diff_id and filesystem hash is not known so we will provide a generic ID""" return 'SPDXRef-snapshot-{}'.format(timestamp) def get_layer_verification_code(layer_obj): '''Calculate the verification code from the files in an image layer. This assumes that layer_obj.files_analyzed is True. The implementation follows the algorithm in the SPDX spec v 2.1 which requires SHA1 to be used to calculate the checksums of the file and the final verification code''' sha1_list = [] for filedata in layer_obj.files: filesha = filedata.get_checksum('sha1') if not filesha: # we cannot create a verification code, hence file generation # is aborted logger.critical( 'File %s does not have a sha1 checksum. Failed to generate ' 'a SPDX tag-value report', filedata.path) return None sha1_list.append(filesha) sha1_list.sort() sha1s = ''.join(sha1_list) return hashlib.sha1(sha1s.encode('utf-8')).hexdigest() # nosec def get_layer_checksum(layer_obj): '''Return a SPDX formatted checksum value. It should be of the form: checksum_type: <checksum>''' return '{}: {}'.format(layer_obj.checksum_type.upper(), layer_obj.checksum) ########################## # Common Package Helpers # ########################## def get_package_spdxref(package_obj): '''Given the package object, return an SPDX reference ID''' pkg_ref = spdx_formats.package_id.format(name=package_obj.name, ver=package_obj.version) # replace all the strings that SPDX doesn't like clean_pkg_ref = re.sub(r'[:+~]', r'-', pkg_ref) return 'SPDXRef-{}'.format(clean_pkg_ref) ####################### # Common File Helpers # ####################### def get_file_spdxref(filedata, layer_id): '''Given a FileData object, return a unique identifier for the SPDX document. According to the spec, this should be of the form: SPDXRef-<id> We will use a combination of the file name, checksum and layer_id and calculate a hash of this string''' file_string = filedata.path + filedata.checksum[:7] + layer_id fileid = get_string_id(file_string) return 'SPDXRef-{}'.format(fileid) def get_file_checksum(filedata): '''Given a FileData object, return the checksum required by SPDX. This should be of the form: <checksum_type>: <checksum> Currently, the spec requires a SHA1 checksum''' return '{}: {}'.format('SHA1', filedata.get_checksum('sha1')) def get_file_notice(filedata): '''Return a formatted string with all copyrights found in a file. Return an empty string if there are no copyrights''' notice = '' for cp in filedata.copyrights: notice = notice + cp + '\n' return notice def get_file_comment(filedata): '''Return a formatted comment string with all file level notices. Return an empty string if no notices are present''' comment = '' for origin in filedata.origins.origins: comment = comment + '{}:'.format(origin.origin_str) + '\n' for notice in origin.notices: comment = comment + \ '{}: {}'.format(notice.level, notice.message) + '\n' return comment
tests/common/test_run/ascend/matmul_mul_run.py
tianjiashuo/akg
286
133135
<filename>tests/common/test_run/ascend/matmul_mul_run.py<gh_stars>100-1000 # Copyright 2021 Huawei Technologies Co., Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import numpy as np from akg.utils import kernel_exec as utils from akg.ops.math.ascend import MatMul from tests.common.test_run.ascend.matmul_run import * from akg.ops.math import Mul def matmul_mul(x, y, c, b, out_dtype, left_format="zZ", right_format="nZ", out_format="zN", transpose_x=False, transpose_y=False, attrs={}, target="cce"): matmul_res, attrs = MatMul(x, y, b, out_dtype, left_format, right_format, out_format, transpose_x, transpose_y, attrs=None) attr = {} print(matmul_res.shape) res = Mul(matmul_res, c, target='cce') return res, attrs def matmul_mul_execute(shape_x, shape_y, bias, cmul, left_format, right_format, out_format, adj_x, adj_y, dtype, bias_dtype, out_dtype, kernel_name, attrs={}): batch_tuple, m, k, n = extract_dim(shape_x, shape_y, adj_x, adj_y) m = (m + 15) // 16 * 16 n = (n + 15) // 16 * 16 k = (k + 15) // 16 * 16 _, _, _, out_shape, k = get_converted_shapes(m, n, k, batch_tuple, adj_x, adj_y, bias, left_format, right_format, out_format) if cmul == "scalar": cmul_shape = (1, ) else: cmul_shape = out_shape mod = matmul_mul_compile(shape_x, shape_y, bias, cmul_shape, left_format, right_format, out_format, adj_x, adj_y, dtype, bias_dtype, out_dtype, kernel_name, attrs={}) # Generate data m_x, m_y, bench_mark, bias_data = matmul_data(batch_tuple, m, k, n, dtype, bias_dtype, out_dtype, bias, adj_x, adj_y, left_format, right_format, out_format) cadd_data = random_gaussian(cmul_shape, miu=0.5, sigma=0.01).astype(out_dtype) bench_mark = bench_mark * cadd_data # mod launch output = np.full(out_shape, np.nan, out_dtype) if bias == 0: output = utils.mod_launch(mod, (m_x, m_y, cadd_data, output), expect=bench_mark) elif bias == 1: output = utils.mod_launch(mod, (m_x, m_y, cadd_data, bias_data, output), expect=bench_mark) # compare result rtol, atol = get_rtol_atol("matmul", dtype) compare_result = compare_tensor(output, bench_mark, rtol=rtol, atol=atol, equal_nan=True) return (m_x, m_y), output, bench_mark, compare_result def matmul_mul_compile(shape_x, shape_y, bias, cadd, left_format, right_format, output_format, adj_x, adj_y, dtype, bias_dtype, out_dtype, kernel_name, attrs, tuning=False): batch_tuple, m, k, n = extract_dim(shape_x, shape_y, adj_x, adj_y) m = (m + 15) // 16 * 16 n = (n + 15) // 16 * 16 k = (k + 15) // 16 * 16 shape_xx, shape_yy, bias_shape, out_shape, k = get_converted_shapes(m, n, k, batch_tuple, adj_x, adj_y, bias, left_format, right_format, output_format) input_shapes = [shape_xx, shape_yy, cadd, bias_shape] input_types = [dtype, dtype, out_dtype, bias_dtype] has_bias = False if bias == 1: has_bias = True op_attrs = [out_dtype, left_format, right_format, output_format, adj_x, adj_y, attrs] if has_bias == False: input_shapes = [shape_xx, shape_yy, cadd] input_types = [dtype, dtype, out_dtype] op_attrs = [None, out_dtype, left_format, right_format, output_format, adj_x, adj_y, attrs] return utils.op_build_test(matmul_mul, input_shapes, input_types, op_attrs, kernel_name, attrs=attrs, tuning=tuning)
python/taichi/examples/simulation/fractal.py
kxxt/taichi
11,699
133141
<filename>python/taichi/examples/simulation/fractal.py<gh_stars>1000+ import taichi as ti ti.init(arch=ti.gpu) n = 320 pixels = ti.field(dtype=float, shape=(n * 2, n)) @ti.func def complex_sqr(z): return ti.Vector([z[0]**2 - z[1]**2, z[1] * z[0] * 2]) @ti.kernel def paint(t: float): for i, j in pixels: # Parallelized over all pixels c = ti.Vector([-0.8, ti.cos(t) * 0.2]) z = ti.Vector([i / n - 1, j / n - 0.5]) * 2 iterations = 0 while z.norm() < 20 and iterations < 50: z = complex_sqr(z) + c iterations += 1 pixels[i, j] = 1 - iterations * 0.02 gui = ti.GUI("Julia Set", res=(n * 2, n)) for i in range(1000000): paint(i * 0.03) gui.set_image(pixels) gui.show()
interpolation/splines/eval_cubic.py
vishalbelsare/interpolation.py
110
133145
import numpy from .eval_splines import eval_cubic ## the functions in this file provide backward compatibility calls ## ## they can optionnally allocate memory for the result ## they work for any dimension, except the functions which compute the gradient ####################### # Compatibility calls # ####################### from numba import generated_jit from .codegen import source_to_function @generated_jit def get_grid(a, b, n, C): d = C.ndim s = "({},)".format(str.join(", ", [f"(a[{k}],b[{k}],n[{k}])" for k in range(d)])) txt = "def get_grid(a,b,n,C): return {}".format(s) f = source_to_function(txt) return f def eval_cubic_spline(a, b, orders, coefs, point): """Evaluates a cubic spline at one point Parameters: ----------- a : array of size d (float) Lower bounds of the cartesian grid. b : array of size d (float) Upper bounds of the cartesian grid. orders : array of size d (int) Number of nodes along each dimension (=(n1,...,nd) ) coefs : array of dimension d, and size (n1+2, ..., nd+2) Filtered coefficients. point : array of size d Coordinate of the point where the splines must be interpolated. Returns ------- value : float Interpolated value. """ grid = get_grid(a, b, orders, coefs) return eval_cubic(grid, coefs, point) def vec_eval_cubic_spline(a, b, orders, coefs, points, values=None): """Evaluates a cubic spline at many points Parameters: ----------- a : array of size d (float) Lower bounds of the cartesian grid. b : array of size d (float) Upper bounds of the cartesian grid. orders : array of size d (int) Number of nodes along each dimension. (=(n1,...,nd)) coefs : array of dimension d, and size (n1+2, ..., nd+2) Filtered coefficients. points : array of size N x d List of points where the splines must be interpolated. values (optional) : array of size (N) If not None, contains the result. Returns ------- values : array of size (N) Interpolated values. values[i] contains spline evaluated at point points[i,:]. """ grid = get_grid(a, b, orders, coefs) if values is None: return eval_cubic(grid, coefs, points) else: eval_cubic(grid, coefs, points, values) def eval_cubic_splines(a, b, orders, mcoefs, point, values=None): """Evaluates multi-splines at one point. Parameters: ----------- a : array of size d (float) Lower bounds of the cartesian grid. b : array of size d (float) Upper bounds of the cartesian grid. orders : array of size d (int) Number of nodes along each dimension. mcoefs : array of dimension d+1, and size (p, n1+2, ..., nd+2) Filtered coefficients. For i in 1:(mcoefs.shape[0]), mcoefs[i,...] contains the coefficients of spline number i. point : array of size d Point where the spline must be interpolated. values (optional) : array of size (p) If not None, contains the result. Returns ------- values : array of size (p) Interpolated values. values[j] contains spline n-j evaluated at point `point`. """ grid = get_grid(a, b, orders, mcoefs[..., 0]) if values is None: return eval_cubic(grid, mcoefs, point) else: eval_cubic(grid, mcoefs, point, values) def vec_eval_cubic_splines(a, b, orders, mcoefs, points, values=None): """Evaluates multi-splines on a series of points. Parameters: ----------- a : array of size d (float) Lower bounds of the cartesian grid. b : array of size d (float) Upper bounds of the cartesian grid. orders : array of size d (int) Number of nodes along each dimension. ( =(n1,...nd) ) mcoefs : array of dimension d+1, and size (n1+2, ..., nd+2, p) Filtered coefficients. coefs[i,...] contains the coefficients of spline number i. points : array of size N x d List of points where the splines must be interpolated. values (optional) : array of size (N x p) If not None, contains the result. Returns ------- values : array of size (N x p) Interpolated values. values[i,j] contains spline n-j evaluated at point points[i,:]. """ grid = get_grid(a, b, orders, mcoefs[..., 0]) if values is None: return eval_cubic(grid, mcoefs, points) else: eval_cubic(grid, mcoefs, points, values) ######### from .eval_cubic_numba import ( vec_eval_cubic_splines_G_1, vec_eval_cubic_splines_G_2, vec_eval_cubic_splines_G_3, vec_eval_cubic_splines_G_4, ) def vec_eval_cubic_splines_G(a, b, orders, mcoefs, points, values=None, dvalues=None): a = numpy.array(a, dtype=float) b = numpy.array(b, dtype=float) orders = numpy.array(orders, dtype=int) d = a.shape[0] N = points.shape[0] n_sp = mcoefs.shape[-1] if values is None: values = numpy.empty((N, n_sp)) if dvalues is None: dvalues = numpy.empty((N, d, n_sp)) if d == 1: vec_eval_cubic_splines_G_1(a, b, orders, mcoefs, points, values, dvalues) elif d == 2: vec_eval_cubic_splines_G_2(a, b, orders, mcoefs, points, values, dvalues) elif d == 3: vec_eval_cubic_splines_G_3(a, b, orders, mcoefs, points, values, dvalues) elif d == 4: vec_eval_cubic_splines_G_4(a, b, orders, mcoefs, points, values, dvalues) return [values, dvalues]
tools/console/plugins/plugin_generate/gen_libs.py
wzhengsen/engine-x
113
133146
#!/usr/bin/python #-*- coding: UTF-8 -*- import os import sys import shutil import json import utils from . import gen_prebuilt_mk import adxe from MultiLanguage import MultiLanguage from adxe import CCPluginError from adxe import Logging from argparse import ArgumentParser class LibsCompiler(adxe.CCPlugin): CFG_FILE = 'configs/gen_libs_config.json' KEY_LIBS_OUTPUT = 'libs_output_dir' KEY_XCODE_PROJS_INFO = 'xcode_projs_info' KEY_VS_PROJS_INFO = 'vs_projs_info' KEY_SUPPORT_VS_VERSIONS = 'support_vs_versions' KEY_ANDROID_MKS = "android_mks" CHECK_KEYS = [ KEY_LIBS_OUTPUT, KEY_XCODE_PROJS_INFO, KEY_VS_PROJS_INFO, KEY_SUPPORT_VS_VERSIONS, KEY_ANDROID_MKS ] KEY_XCODE_TARGETS = 'targets' KEY_VS_BUILD_TARGETS = 'build_targets' @staticmethod def plugin_name(): return "gen-libs" @staticmethod def brief_description(): return MultiLanguage.get_string('GEN_LIBS_BRIEF') def parse_args(self, argv): """Custom and check param list. """ parser = ArgumentParser(prog="cocos %s" % self.__class__.plugin_name(), description=self.__class__.brief_description()) parser.add_argument('-c', dest='clean', action="store_true", help=MultiLanguage.get_string('GEN_LIBS_ARG_CLEAN')) parser.add_argument('-e', dest='engine_path', help=MultiLanguage.get_string('GEN_LIBS_ARG_ENGINE')) parser.add_argument('-p', dest='platform', action="append", choices=['ios', 'mac', 'android', 'win32'], help=MultiLanguage.get_string('GEN_LIBS_ARG_PLATFORM')) parser.add_argument('-m', "--mode", dest='compile_mode', default='debug', choices=['debug', 'release'], help=MultiLanguage.get_string('GEN_LIBS_ARG_MODE')) parser.add_argument('--dis-strip', dest='disable_strip', action="store_true", help=MultiLanguage.get_string('GEN_LIBS_ARG_DISABLE_STRIP')) group = parser.add_argument_group(MultiLanguage.get_string('GEN_LIBS_GROUP_WIN')) group.add_argument('--vs', dest='vs_version', type=int, default=None, help=MultiLanguage.get_string('GEN_LIBS_ARG_VS')) group = parser.add_argument_group(MultiLanguage.get_string('GEN_LIBS_GROUP_ANDROID')) group.add_argument("--app-abi", dest="app_abi", help=MultiLanguage.get_string('GEN_LIBS_ARG_ABI')) group.add_argument("--ap", dest="android_platform", help=MultiLanguage.get_string('COMPILE_ARG_AP')) group.add_argument('-l', dest='language', choices=['cpp', 'lua', 'js'], default='cpp', help='set project type to build') (args, unknown) = parser.parse_known_args(argv) self.init(args) return args def run(self, argv, dependencies): self.parse_args(argv) self.compile() def init(self, args): if getattr(sys, 'frozen', None): self.cur_dir = os.path.realpath(os.path.dirname(sys.executable)) self.default_engine_path = os.path.join(self.cur_dir, os.pardir, os.pardir, os.pardir) else: self.cur_dir = os.path.realpath(os.path.dirname(__file__)) self.default_engine_path = os.path.join(self.cur_dir, os.pardir, os.pardir, os.pardir, os.pardir) self.default_engine_path = os.path.normpath(self.default_engine_path) if args.engine_path is None: self.repo_x = self.default_engine_path else: engine_path = os.path.expanduser(args.engine_path) if os.path.isabs(engine_path): self.repo_x = os.path.normpath(engine_path) else: self.repo_x = os.path.normpath(os.path.abspath(engine_path)) if not os.path.isdir(self.repo_x): raise CCPluginError(MultiLanguage.get_string('GEN_LIBS_ERROR_WRONG_PATH_FMT', self.repo_x), CCPluginError.ERROR_WRONG_ARGS) self.cfg_file_path = os.path.join(self.cur_dir, LibsCompiler.CFG_FILE) self.parse_config() # arguments check and set self.clean = args.clean self.mode = args.compile_mode self._verbose = True self.language = args.language if args.platform is None: self.build_ios = True self.build_mac = True self.build_win = True self.build_android = True else: self.build_ios = False self.build_mac = False self.build_win = False self.build_android = False if 'win32' in args.platform: self.build_win = True if 'ios' in args.platform: self.build_ios = True if 'mac' in args.platform: self.build_mac = True if 'android' in args.platform: self.build_android = True self.disable_strip = args.disable_strip self.vs_version = args.vs_version self.use_incredibuild = False if args.app_abi is None: self.app_abi = 'armeabi-v7a' else: self.app_abi = args.app_abi self.app_abi_list = self.app_abi.split(":") self.android_platform = args.android_platform self.lib_dir = os.path.normpath(os.path.join(self.repo_x, self.cfg_info[LibsCompiler.KEY_LIBS_OUTPUT])) def parse_config(self): if not os.path.isfile(self.cfg_file_path): raise CCPluginError(MultiLanguage.get_string('GEN_LIBS_ERROR_WRONG_FILE_FMT', self.cfg_file_path), CCPluginError.ERROR_PATH_NOT_FOUND) try: f = open(self.cfg_file_path) self.cfg_info = json.load(f) f.close() except: raise CCPluginError(MultiLanguage.get_string('GEN_LIBS_ERROR_PARSE_FILE_FMT', self.cfg_file_path), CCPluginError.ERROR_PARSE_FILE) for k in LibsCompiler.CHECK_KEYS: if k not in self.cfg_info.keys(): raise CCPluginError(MultiLanguage.get_string('GEN_LIBS_ERROR_KEY_NOT_FOUND_FMT', (k, self.cfg_file_path)), CCPluginError.ERROR_WRONG_CONFIG) def get_cfg_info(self): return self.cfg_info def compile(self): if self.clean: self.clean_libs() if adxe.os_is_mac(): if self.build_mac or self.build_ios: self.compile_mac_ios() if adxe.os_is_win32(): if self.build_win: self.compile_win() if self.build_android: self.compile_android() # generate prebuilt mk files # self.modify_binary_mk() def build_win32_proj(self, cmd_path, sln_path, proj_name, mode): build_cmd = " ".join([ "\"%s\"" % cmd_path, "\"%s\"" % sln_path, "/t:%s" % proj_name, "/property:Configuration=%s" % mode, "/m" ]) self._run_cmd(build_cmd) def compile_win(self): if self.mode == 'debug': mode_str = 'Debug' else: mode_str = 'Release' # get the VS versions will be used for compiling support_vs_versions = self.cfg_info[LibsCompiler.KEY_SUPPORT_VS_VERSIONS] compile_vs_versions = support_vs_versions if self.vs_version is not None: if self.vs_version not in support_vs_versions: raise CCPluginError(MultiLanguage.get_string('GEN_LIBS_ERROR_NOT_SUPPORT_VS_FMT', self.vs_version), CCPluginError.ERROR_WRONG_ARGS) else: compile_vs_versions = [ self.vs_version ] vs_cmd_info = {} for vs_version in compile_vs_versions: # get the vs command with specified version vs_command = utils.get_msbuild_path(vs_version) if vs_command is None: Logging.warning(MultiLanguage.get_string('GEN_LIBS_WARNING_VS_NOT_FOUND_FMT', vs_version)) else: vs_cmd_info[vs_version] = vs_command if len(vs_cmd_info) == 0: raise CCPluginError(MultiLanguage.get_string('GEN_LIBS_ERROR_VS_NOT_FOUND'), CCPluginError.ERROR_TOOLS_NOT_FOUND) cocos2d_proj_file = os.path.join(self.repo_x, 'cocos/2d/libcocos2d.vcxproj') # get the VS projects info win32_proj_info = self.cfg_info[LibsCompiler.KEY_VS_PROJS_INFO] proj_path = win32_proj_info['proj_path'] for vs_version in compile_vs_versions: if not vs_version in vs_cmd_info.keys(): continue try: vs_command = vs_cmd_info[vs_version] # clean solutions full_proj_path = os.path.join(self.repo_x, proj_path) clean_cmd = " ".join([ "\"%s\"" % vs_command, "\"%s\"" % full_proj_path, "/t:Clean /p:Configuration=%s" % mode_str ]) self._run_cmd(clean_cmd) output_dir = os.path.join(self.lib_dir, "win32") # get the build folder & win32 output folder build_folder_path = os.path.join(os.path.dirname(proj_path), "%s.win32" % mode_str) win32_output_dir = os.path.join(self.repo_x, output_dir) if not os.path.exists(win32_output_dir): os.makedirs(win32_output_dir) # build project if self.use_incredibuild: # use incredibuild, build whole sln build_cmd = " ".join([ "BuildConsole", "%s" % proj_path, "/build", "/cfg=\"%s|Win32\"" % mode_str ]) self._run_cmd(build_cmd) else: for proj_name in win32_proj_info[self.language][LibsCompiler.KEY_VS_BUILD_TARGETS]: # build the projects self.build_win32_proj(vs_command, proj_path, proj_name, mode_str) # copy the libs into prebuilt dir for file_name in os.listdir(build_folder_path): name, ext = os.path.splitext(file_name) if ext != ".lib" and ext != ".dll": continue file_path = os.path.join(build_folder_path, file_name) shutil.copy(file_path, win32_output_dir) except Exception as e: raise e def compile_mac_ios(self): xcode_proj_info = self.cfg_info[LibsCompiler.KEY_XCODE_PROJS_INFO] if self.mode == 'debug': mode_str = 'Debug' else: mode_str = 'Release' XCODE_CMD_FMT = "xcodebuild -project \"%s\" -configuration %s -target \"%s\" %s CONFIGURATION_BUILD_DIR=%s" ios_out_dir = os.path.join(self.lib_dir, "ios") mac_out_dir = os.path.join(self.lib_dir, "mac") ios_sim_libs_dir = os.path.join(ios_out_dir, "simulator") ios_dev_libs_dir = os.path.join(ios_out_dir, "device") cocos_cmd = self._get_cocos_cmd_path() if self.language == 'cpp': build_types = ['cpp'] if self.language == 'lua': build_types = ['cpp', 'lua'] if self.language == 'js': build_types = ['cpp', 'js'] for key in build_types: proj_info = xcode_proj_info[key] proj_path = os.path.join(self.repo_x, proj_info['proj_path']) target = proj_info['targets'] if self.build_mac: # compile mac build_cmd = XCODE_CMD_FMT % (proj_path, mode_str, "%s Mac" % target, "", mac_out_dir) self._run_cmd(build_cmd) if self.build_ios: # compile ios simulator build_cmd = XCODE_CMD_FMT % (proj_path, mode_str, "%s iOS" % target, "-sdk iphonesimulator ARCHS=\"i386 x86_64\" VALID_ARCHS=\"i386 x86_64\"", ios_sim_libs_dir) self._run_cmd(build_cmd) # compile ios device build_cmd = XCODE_CMD_FMT % (proj_path, mode_str, "%s iOS" % target, "-sdk iphoneos", ios_dev_libs_dir) self._run_cmd(build_cmd) if self.build_ios: # generate fat libs for iOS for lib in os.listdir(ios_sim_libs_dir): sim_lib = os.path.join(ios_sim_libs_dir, lib) dev_lib = os.path.join(ios_dev_libs_dir, lib) output_lib = os.path.join(ios_out_dir, lib) lipo_cmd = "lipo -create -output \"%s\" \"%s\" \"%s\"" % (output_lib, sim_lib, dev_lib) self._run_cmd(lipo_cmd) # remove the simulator & device libs in iOS utils.rmdir(ios_sim_libs_dir) utils.rmdir(ios_dev_libs_dir) if not self.disable_strip: # strip the libs if self.build_ios: ios_strip_cmd = "xcrun -sdk iphoneos strip -S %s/*.a" % ios_out_dir self._run_cmd(ios_strip_cmd) if self.build_mac: mac_strip_cmd = "xcrun strip -S %s/*.a" % mac_out_dir self._run_cmd(mac_strip_cmd) def compile_android(self): # build .so for android cmd_path = self._get_cocos_cmd_path() engine_dir = self.repo_x # build the simulator project proj_path = os.path.join(engine_dir, 'tests/cpp-empty-test') if self.language == 'lua': proj_path = os.path.join(engine_dir, 'tests/lua-empty-test') elif self.language == 'js': proj_path = os.path.join(engine_dir, 'tests/js-tests') for app_abi_item in self.app_abi_list: build_cmd = "%s compile -s %s -p android --no-sign --mode %s --app-abi %s" % (cmd_path, proj_path, self.mode, app_abi_item) if self.android_platform is not None: build_cmd += ' --ap %s' % self.android_platform self._run_cmd(build_cmd) # copy .a to prebuilt dir ANDROID_A_PATH = "proj.android/app/build/intermediates/ndkBuild/%s/obj/local/%s" % (self.mode, app_abi_item) if self.language != 'cpp': ANDROID_A_PATH = 'project/' + ANDROID_A_PATH android_out_dir = os.path.join(self.lib_dir, "android", app_abi_item) obj_dir = os.path.join(proj_path, ANDROID_A_PATH) copy_cfg = { "from": obj_dir, "to": android_out_dir, "include": [ "*.a$" ] } adxe.copy_files_with_config(copy_cfg, obj_dir, android_out_dir) if not self.disable_strip: # strip the android libs ndk_root = os.environ["ANDROID_NDK"] if adxe.os_is_win32(): if adxe.os_is_32bit_windows(): check_bits = [ "", "-x86_64" ] else: check_bits = [ "-x86_64", "" ] sys_folder_name = "windows" for bit_str in check_bits: check_folder_name = "windows%s" % bit_str check_path = os.path.join(ndk_root, "toolchains/arm-linux-androideabi-4.9/prebuilt/%s" % check_folder_name) if os.path.isdir(check_path): sys_folder_name = check_folder_name break elif adxe.os_is_mac(): sys_folder_name = "darwin-x86_64" else: sys_folder_name = "linux-x86_64" # set strip execute file name if adxe.os_is_win32(): strip_execute_name = "strip.exe" else: strip_execute_name = "strip" # strip arm libs strip_cmd_path = os.path.join(ndk_root, "toolchains/arm-linux-androideabi-4.9/prebuilt/%s/arm-linux-androideabi/bin/%s" % (sys_folder_name, strip_execute_name)) if os.path.exists(strip_cmd_path): self.trip_libs(strip_cmd_path, os.path.join(android_out_dir, "armeabi-v7a")) # strip arm64-v8a libs strip_cmd_path = os.path.join(ndk_root, "toolchains/aarch64-linux-android-4.9/prebuilt/%s/aarch64-linux-android/bin/%s" % (sys_folder_name, strip_execute_name)) if os.path.exists(strip_cmd_path) and os.path.exists(os.path.join(android_out_dir, "arm64-v8a")): self.trip_libs(strip_cmd_path, os.path.join(android_out_dir, 'arm64-v8a')) # strip x86 libs strip_cmd_path = os.path.join(ndk_root, "toolchains/x86-4.8/prebuilt/%s/i686-linux-android/bin/%s" % (sys_folder_name, strip_execute_name)) if os.path.exists(strip_cmd_path) and os.path.exists(os.path.join(android_out_dir, "x86")): self.trip_libs(strip_cmd_path, os.path.join(android_out_dir, 'x86')) def _get_cocos_cmd_path(self): CONSOLE_PATH = "tools/console/bin" engine_dir = self.repo_x console_dir = os.path.join(engine_dir, CONSOLE_PATH) if adxe.os_is_win32(): cmd_path = os.path.join(console_dir, "adxe.bat") else: cmd_path = os.path.join(console_dir, "adxe") return cmd_path def trip_libs(self, strip_cmd, folder): if not os.path.isdir(folder): return if adxe.os_is_win32(): for name in os.listdir(folder): basename, ext = os.path.splitext(name) if ext == ".a": full_name = os.path.join(folder, name) command = "%s -S %s" % (strip_cmd, full_name) self._run_cmd(command) else: strip_cmd = "%s -S %s/*.a" % (strip_cmd, folder) self._run_cmd(strip_cmd) def modify_binary_mk(self): android_libs = os.path.join(self.lib_dir, "android") android_mks = self.cfg_info[LibsCompiler.KEY_ANDROID_MKS] for mk_file in android_mks: mk_file_path = os.path.normpath(os.path.join(self.repo_x, mk_file)) if not os.path.isfile(mk_file_path): Logging.warning(MultiLanguage.get_string('COMPILE_ERROR_GRALEW_NOT_EXIST_FMT', mk_file_path)) continue dst_file_path = os.path.join(os.path.dirname(mk_file_path), "prebuilt-mk", os.path.basename(mk_file_path)) tmp_obj = gen_prebuilt_mk.MKGenerator(mk_file_path, android_libs, dst_file_path) tmp_obj.do_generate() def clean_libs(self): utils.rmdir(self.lib_dir)
pred_vae.py
hugochan/K-Competitive-Autoencoder-for-Text-Analytics
133
133152
''' Created on Jan, 2017 @author: hugo ''' from __future__ import absolute_import import argparse import numpy as np from autoencoder.core.vae import VarAutoEncoder, load_vae_model from autoencoder.core.deepae import DeepAutoEncoder from autoencoder.preprocessing.preprocessing import load_corpus, doc2vec from autoencoder.utils.op_utils import vecnorm, revdict from autoencoder.utils.io_utils import dump_json, write_file # def get_topics(vae, vocab, topn=10): # topics = [] # weights = vae.encoder.get_weights()[0] # for idx in range(ae.dim): # token_idx = np.argsort(weights[:, idx])[::-1][:topn] # topics.append([vocab[x] for x in token_idx]) # return topics # def print_topics(topics): # for i in range(len(topics)): # str_topic = ' + '.join(['%s * %s' % (prob, token) for token, prob in topics[i]]) # print 'topic %s:' % i # print str_topic # print def test(args): corpus = load_corpus(args.input) vocab, docs = corpus['vocab'], corpus['docs'] n_vocab = len(vocab) doc_keys = docs.keys() X_docs = [] for k in doc_keys: X_docs.append(vecnorm(doc2vec(docs[k], n_vocab), 'logmax1', 0)) del docs[k] X_docs = np.r_[X_docs] vae = load_vae_model(args.load_model) doc_codes = vae.predict(X_docs) dump_json(dict(zip(doc_keys, doc_codes.tolist())), args.output) print 'Saved doc codes file to %s' % args.output # if args.save_topics: # topics = get_topics(vae, revdict(vocab), topn=10) # write_file(topics, args.save_topics) # print 'Saved topics file to %s' % args.save_topics def main(): parser = argparse.ArgumentParser() parser.add_argument('-i', '--input', type=str, required=True, help='path to the input corpus file') parser.add_argument('-o', '--output', type=str, required=True, help='path to the output doc codes file') parser.add_argument('-lm', '--load_model', type=str, required=True, help='path to the trained model file') # parser.add_argument('-st', '--save_topics', type=str, help='path to the output topics file') args = parser.parse_args() test(args) if __name__ == '__main__': main()
networkx/algorithms/community/community_generators.py
rakschahsa/networkx
445
133164
<filename>networkx/algorithms/community/community_generators.py # generators.py - functions for generating graphs with community structure # # Copyright 2011 <NAME> <<EMAIL>>. # Copyright 2011 <NAME> <<EMAIL>> # Copyright 2015 NetworkX developers. # # This file is part of NetworkX. # # NetworkX is distributed under a BSD license; see LICENSE.txt for more # information. """Functions for generating graphs with community structure.""" from __future__ import division # HACK In order to accommodate both SciPy and non-SciPy implementations, # we need to wrap the SciPy implementation of the zeta function with an # extra parameter, `tolerance`, which will be ignored. try: from scipy.special import zeta as _zeta def zeta(x, q, tolerance): return _zeta(x, q) except ImportError: def zeta(x, q, tolerance): """The Hurwitz zeta function, or the Riemann zeta function of two arguments. ``x`` must be greater than one and ``q`` must be positive. This function repeatedly computes subsequent partial sums until convergence, as decided by ``tolerance``. """ z = 0 z_prev = -float('inf') k = 0 while abs(z - z_prev) > tolerance: z_prev = z z += 1 / ((k + q) ** x) k += 1 return z import networkx as nx from networkx.utils import py_random_state __all__ = ['LFR_benchmark_graph'] def _zipf_rv_below(gamma, xmin, threshold, seed): """Returns a random value chosen from the Zipf distribution, guaranteed to be less than or equal to the value ``threshold``. Repeatedly draws values from the Zipf distribution until the threshold is met, then returns that value. """ result = nx.utils.zipf_rv(gamma, xmin, seed) while result > threshold: result = nx.utils.zipf_rv(gamma, xmin, seed) return result def _powerlaw_sequence(gamma, low, high, condition, length, max_iters, seed): """Returns a list of numbers obeying a power law distribution, with some additional restrictions. ``gamma`` and ``low`` are the parameters for the Zipf distribution. ``high`` is the maximum allowed value for values draw from the Zipf distribution. For more information, see :func:`_zipf_rv_below`. ``condition`` and ``length`` are Boolean-valued functions on lists. While generating the list, random values are drawn and appended to the list until ``length`` is satisfied by the created list. Once ``condition`` is satisfied, the sequence generated in this way is returned. ``max_iters`` indicates the number of times to generate a list satisfying ``length``. If the number of iterations exceeds this value, :exc:`~networkx.exception.ExceededMaxIterations` is raised. seed : integer, random_state, or None (default) Indicator of random number generation state. See :ref:`Randomness<randomness>`. """ for i in range(max_iters): seq = [] while not length(seq): seq.append(_zipf_rv_below(gamma, low, high, seed)) if condition(seq): return seq raise nx.ExceededMaxIterations("Could not create power law sequence") # TODO Needs documentation. def _generate_min_degree(gamma, average_degree, max_degree, tolerance, max_iters): """Returns a minimum degree from the given average degree.""" min_deg_top = max_degree min_deg_bot = 1 min_deg_mid = (min_deg_top - min_deg_bot) / 2 + min_deg_bot itrs = 0 mid_avg_deg = 0 while abs(mid_avg_deg - average_degree) > tolerance: if itrs > max_iters: raise nx.ExceededMaxIterations("Could not match average_degree") mid_avg_deg = 0 for x in range(int(min_deg_mid), max_degree + 1): mid_avg_deg += (x ** (-gamma + 1)) / zeta(gamma, min_deg_mid, tolerance) if mid_avg_deg > average_degree: min_deg_top = min_deg_mid min_deg_mid = (min_deg_top - min_deg_bot) / 2 + min_deg_bot else: min_deg_bot = min_deg_mid min_deg_mid = (min_deg_top - min_deg_bot) / 2 + min_deg_bot itrs += 1 # return int(min_deg_mid + 0.5) return round(min_deg_mid) def _generate_communities(degree_seq, community_sizes, mu, max_iters, seed): """Returns a list of sets, each of which represents a community in the graph. ``degree_seq`` is the degree sequence that must be met by the graph. ``community_sizes`` is the community size distribution that must be met by the generated list of sets. ``mu`` is a float in the interval [0, 1] indicating the fraction of intra-community edges incident to each node. ``max_iters`` is the number of times to try to add a node to a community. This must be greater than the length of ``degree_seq``, otherwise this function will always fail. If the number of iterations exceeds this value, :exc:`~networkx.exception.ExceededMaxIterations` is raised. seed : integer, random_state, or None (default) Indicator of random number generation state. See :ref:`Randomness<randomness>`. The communities returned by this are sets of integers in the set {0, ..., *n* - 1}, where *n* is the length of ``degree_seq``. """ # This assumes the nodes in the graph will be natural numbers. result = [set() for _ in community_sizes] n = len(degree_seq) free = list(range(n)) for i in range(max_iters): v = free.pop() c = seed.choice(range(len(community_sizes))) # s = int(degree_seq[v] * (1 - mu) + 0.5) s = round(degree_seq[v] * (1 - mu)) # If the community is large enough, add the node to the chosen # community. Otherwise, return it to the list of unaffiliated # nodes. if s < community_sizes[c]: result[c].add(v) else: free.append(v) # If the community is too big, remove a node from it. if len(result[c]) > community_sizes[c]: free.append(result[c].pop()) if not free: return result msg = 'Could not assign communities; try increasing min_community' raise nx.ExceededMaxIterations(msg) @py_random_state(11) def LFR_benchmark_graph(n, tau1, tau2, mu, average_degree=None, min_degree=None, max_degree=None, min_community=None, max_community=None, tol=1.0e-7, max_iters=500, seed=None): r"""Returns the LFR benchmark graph for testing community-finding algorithms. This algorithm proceeds as follows: 1) Find a degree sequence with a power law distribution, and minimum value ``min_degree``, which has approximate average degree ``average_degree``. This is accomplished by either a) specifying ``min_degree`` and not ``average_degree``, b) specifying ``average_degree`` and not ``min_degree``, in which case a suitable minimum degree will be found. ``max_degree`` can also be specified, otherwise it will be set to ``n``. Each node *u* will have `\mu \mathrm{deg}(u)` edges joining it to nodes in communities other than its own and `(1 - \mu) \mathrm{deg}(u)` edges joining it to nodes in its own community. 2) Generate community sizes according to a power law distribution with exponent ``tau2``. If ``min_community`` and ``max_community`` are not specified they will be selected to be ``min_degree`` and ``max_degree``, respectively. Community sizes are generated until the sum of their sizes equals ``n``. 3) Each node will be randomly assigned a community with the condition that the community is large enough for the node's intra-community degree, `(1 - \mu) \mathrm{deg}(u)` as described in step 2. If a community grows too large, a random node will be selected for reassignment to a new community, until all nodes have been assigned a community. 4) Each node *u* then adds `(1 - \mu) \mathrm{deg}(u)` intra-community edges and `\mu \mathrm{deg}(u)` inter-community edges. Parameters ---------- n : int Number of nodes in the created graph. tau1 : float Power law exponent for the degree distribution of the created graph. This value must be strictly greater than one. tau2 : float Power law exponent for the community size distribution in the created graph. This value must be strictly greater than one. mu : float Fraction of intra-community edges incident to each node. This value must be in the interval [0, 1]. average_degree : float Desired average degree of nodes in the created graph. This value must be in the interval [0, *n*]. Exactly one of this and ``min_degree`` must be specified, otherwise a :exc:`NetworkXError` is raised. min_degree : int Minimum degree of nodes in the created graph. This value must be in the interval [0, *n*]. Exactly one of this and ``average_degree`` must be specified, otherwise a :exc:`NetworkXError` is raised. max_degree : int Maximum degree of nodes in the created graph. If not specified, this is set to ``n``, the total number of nodes in the graph. min_community : int Minimum size of communities in the graph. If not specified, this is set to ``min_degree``. max_community : int Maximum size of communities in the graph. If not specified, this is set to ``n``, the total number of nodes in the graph. tol : float Tolerance when comparing floats, specifically when comparing average degree values. max_iters : int Maximum number of iterations to try to create the community sizes, degree distribution, and community affiliations. seed : integer, random_state, or None (default) Indicator of random number generation state. See :ref:`Randomness<randomness>`. Returns ------- G : NetworkX graph The LFR benchmark graph generated according to the specified parameters. Each node in the graph has a node attribute ``'community'`` that stores the community (that is, the set of nodes) that includes it. Raises ------ NetworkXError If any of the parameters do not meet their upper and lower bounds: - ``tau1`` and ``tau2`` must be strictly greater than 1. - ``mu`` must be in [0, 1]. - ``max_degree`` must be in {1, ..., *n*}. - ``min_community`` and ``max_community`` must be in {0, ..., *n*}. If not exactly one of ``average_degree`` and ``min_degree`` is specified. If ``min_degree`` is not specified and a suitable ``min_degree`` cannot be found. ExceededMaxIterations If a valid degree sequence cannot be created within ``max_iters`` number of iterations. If a valid set of community sizes cannot be created within ``max_iters`` number of iterations. If a valid community assignment cannot be created within ``10 * n * max_iters`` number of iterations. Examples -------- Basic usage:: >>> from networkx.algorithms.community import LFR_benchmark_graph >>> n = 250 >>> tau1 = 3 >>> tau2 = 1.5 >>> mu = 0.1 >>> G = LFR_benchmark_graph(n, tau1, tau2, mu, average_degree=5, ... min_community=20, seed=10) Continuing the example above, you can get the communities from the node attributes of the graph:: >>> communities = {frozenset(G.nodes[v]['community']) for v in G} Notes ----- This algorithm differs slightly from the original way it was presented in [1]. 1) Rather than connecting the graph via a configuration model then rewiring to match the intra-community and inter-community degrees, we do this wiring explicitly at the end, which should be equivalent. 2) The code posted on the author's website [2] calculates the random power law distributed variables and their average using continuous approximations, whereas we use the discrete distributions here as both degree and community size are discrete. Though the authors describe the algorithm as quite robust, testing during development indicates that a somewhat narrower parameter set is likely to successfully produce a graph. Some suggestions have been provided in the event of exceptions. References ---------- .. [1] "Benchmark graphs for testing community detection algorithms", <NAME>, <NAME>, and <NAME>, Phys. Rev. E 78, 046110 2008 .. [2] http://santo.fortunato.googlepages.com/inthepress2 """ # Perform some basic parameter validation. if not tau1 > 1: raise nx.NetworkXError("tau1 must be greater than one") if not tau2 > 1: raise nx.NetworkXError("tau2 must be greater than one") if not 0 <= mu <= 1: raise nx.NetworkXError("mu must be in the interval [0, 1]") # Validate parameters for generating the degree sequence. if max_degree is None: max_degree = n elif not 0 < max_degree <= n: raise nx.NetworkXError("max_degree must be in the interval (0, n]") if not ((min_degree is None) ^ (average_degree is None)): raise nx.NetworkXError("Must assign exactly one of min_degree and" " average_degree") if min_degree is None: min_degree = _generate_min_degree(tau1, average_degree, max_degree, tol, max_iters) # Generate a degree sequence with a power law distribution. low, high = min_degree, max_degree def condition(seq): return sum(seq) % 2 == 0 def length(seq): return len(seq) >= n deg_seq = _powerlaw_sequence(tau1, low, high, condition, length, max_iters, seed) # Validate parameters for generating the community size sequence. if min_community is None: min_community = min(deg_seq) if max_community is None: max_community = max(deg_seq) # Generate a community size sequence with a power law distribution. # # TODO The original code incremented the number of iterations each # time a new Zipf random value was drawn from the distribution. This # differed from the way the number of iterations was incremented in # `_powerlaw_degree_sequence`, so this code was changed to match # that one. As a result, this code is allowed many more chances to # generate a valid community size sequence. low, high = min_community, max_community def condition(seq): return sum(seq) == n def length(seq): return sum(seq) >= n comms = _powerlaw_sequence(tau2, low, high, condition, length, max_iters, seed) # Generate the communities based on the given degree sequence and # community sizes. max_iters *= 10 * n communities = _generate_communities(deg_seq, comms, mu, max_iters, seed) # Finally, generate the benchmark graph based on the given # communities, joining nodes according to the intra- and # inter-community degrees. G = nx.Graph() G.add_nodes_from(range(n)) for c in communities: for u in c: while G.degree(u) < round(deg_seq[u] * (1 - mu)): v = seed.choice(list(c)) G.add_edge(u, v) while G.degree(u) < deg_seq[u]: v = seed.choice(range(n)) if v not in c: G.add_edge(u, v) G.nodes[u]['community'] = c return G
thrift/lib/py3lite/test/structs.py
sakibguy/fbthrift
2,112
133205
# Copyright (c) Facebook, Inc. and its affiliates. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import copy import math import unittest from testing.lite_types import ( Color, Integers, File, OptionalFile, Kind, Nested1, Nested2, Nested3, Optionals, Reserved, Runtime, UnusedError, easy, numerical, ) from thrift.py3lite.serializer import ( deserialize, serialize_iobuf, ) from thrift.py3lite.types import ( isset, update_nested_field, ) class StructTests(unittest.TestCase): def test_isset_Struct(self) -> None: to_serialize = OptionalFile(name="/dev/null", type=8) serialized = serialize_iobuf(to_serialize) file = deserialize(File, serialized) self.assertTrue(isset(file)["type"]) self.assertFalse(isset(file)["permissions"]) to_serialize = OptionalFile(name="/dev/null") serialized = serialize_iobuf(to_serialize) file = deserialize(File, serialized) self.assertEqual(file.type, Kind.REGULAR) self.assertFalse(isset(file)["type"]) def test_isset_Union(self) -> None: i = Integers(large=2) with self.assertRaises(TypeError): # pyre-ignore[6]: for test isset(i)["large"] def test_isset_Error(self) -> None: e = UnusedError(message="ACK") self.assertTrue(isset(e)["message"]) def test_copy(self) -> None: x = easy(val=1, an_int=Integers(small=300), name="foo", val_list=[1, 2, 3, 4]) dif_list = copy.copy(x.val_list) self.assertEqual(x.val_list, dif_list) dif_int = copy.copy(x.an_int) self.assertEqual(x.an_int, dif_int) def test_hashability(self) -> None: hash(easy()) def test_optional_struct_creation(self) -> None: with self.assertRaises(TypeError): # pyre-ignore[19]: for test easy(1, [1, 1], "test", Integers(tiny=1)) easy(val=1, an_int=Integers(small=500)) with self.assertRaises(TypeError): # pyre-ignore[6]: for test easy(name=b"binary") # Only Required Fields don't accept None easy(val=5, an_int=None) def test_call_replace(self) -> None: x = easy(val=1, an_int=Integers(small=300), name="foo") y = x(name="bar") self.assertNotEqual(x.name, y.name) z = y(an_int=None, val=4) self.assertNotEqual(x.an_int, z.an_int) self.assertNotEqual(x.val, z.val) self.assertIsNone(z.an_int.value) self.assertEqual(y.val, x.val) self.assertEqual(y.an_int, x.an_int) x = easy() self.assertIsNotNone(x.val) self.assertIsNotNone(x.val_list) self.assertIsNone(x.name) self.assertIsNotNone(x.an_int) def test_call_replace_container(self) -> None: x = Optionals(values=["a", "b", "c"]) z = x(values=["b", "c"]) y = z(values=None) self.assertIsNone(y.values) def test_runtime_checks(self) -> None: x = Runtime() with self.assertRaises(TypeError): # pyre-ignore[6]: for test x(bool_val=5) with self.assertRaises(TypeError): # pyre-ignore[6]: for test Runtime(bool_val=5) with self.assertRaises(TypeError): # pyre-ignore[6]: for test x(enum_val=2) with self.assertRaises(TypeError): # pyre-ignore[6]: for test Runtime(enum_val=2) with self.assertRaises(TypeError): # pyre-ignore[6]: for test x(int_list_val=["foo", "bar", "baz"]) with self.assertRaises(TypeError): # pyre-ignore[6]: for test Runtime(int_list_val=["foo", "bar", "baz"]) def test_reserved(self) -> None: x = Reserved( from_="hello", nonlocal_=3, ok="bye", is_cpdef=True, move="Qh4xe1", inst="foo", changes="bar", ) self.assertEqual(x.from_, "hello") self.assertEqual(x.nonlocal_, 3) self.assertEqual(x.ok, "bye") self.assertEqual(x.is_cpdef, True) self.assertEqual(x.move, "Qh4xe1") self.assertEqual(x.inst, "foo") self.assertEqual(x.changes, "bar") def test_ordering(self) -> None: x = Runtime(bool_val=False, enum_val=Color.red, int_list_val=[64, 128]) y = x(bool_val=True) self.assertLess(x, y) self.assertLessEqual(x, y) self.assertGreater(y, x) self.assertGreaterEqual(y, x) self.assertEqual([x, y], sorted([y, x])) def test_init_with_invalid_field(self) -> None: with self.assertRaises(TypeError): # pyre-ignore[28]: for test easy(val=1, an_int=Integers(small=300), name="foo", val_lists=[1, 2, 3, 4]) def test_iterate(self) -> None: x = Reserved( from_="hello", nonlocal_=3, ok="bye", is_cpdef=True, move="Qh4xe1", inst="foo", changes="bar", ) self.assertEqual( list(x), [ ("from_", "hello"), ("nonlocal_", 3), ("ok", "bye"), ("is_cpdef", True), ("move", "Qh4xe1"), ("inst", "foo"), ("changes", "bar"), ], ) def test_update_nested_fields(self) -> None: n = Nested1(a=Nested2(b=Nested3(c=easy(val=42, name="foo")))) n = update_nested_field(n, {"a.b.c": easy(val=128)}) self.assertEqual(n.a.b.c.val, 128) def test_update_multiple_nested_fields(self) -> None: n = Nested1(a=Nested2(b=Nested3(c=easy(val=42, name="foo")))) n = update_nested_field( n, { "a.b.c.name": "bar", "a.b.c.val": 256, }, ) self.assertEqual(n.a.b.c.name, "bar") self.assertEqual(n.a.b.c.val, 256) def test_update_invalid_nested_fields(self) -> None: n = Nested1(a=Nested2(b=Nested3(c=easy(val=42, name="foo")))) with self.assertRaises(ValueError): update_nested_field(n, {"": 0}) with self.assertRaises(ValueError): update_nested_field(n, {"e": 0}) with self.assertRaises(ValueError): update_nested_field(n, {"a.b.e": 0}) with self.assertRaises(ValueError): update_nested_field(n, {"a.e.f": 0}) def test_update_conflicting_nested_fields(self) -> None: n = Nested1(a=Nested2(b=Nested3(c=easy(val=42, name="foo")))) with self.assertRaises(ValueError): n = update_nested_field( n, { "a.b.c": easy(val=128), "a.b.c.val": 256, }, ) class NumericalConversionsTests(unittest.TestCase): def test_overflow(self) -> None: with self.assertRaises(OverflowError): numerical(float_val=5, int_val=2 ** 63 - 1) with self.assertRaises(OverflowError): numerical(float_val=5, int_val=2, int_list=[5, 2 ** 32]) def test_int_to_float(self) -> None: x = numerical(int_val=5, float_val=5, float_list=[1, 5, 6]) x(float_val=10) x(float_list=[6, 7, 8]) def test_int_to_i64(self) -> None: large = 2 ** 63 - 1 numerical(int_val=5, float_val=5, i64_val=int(large)) too_large = 2 ** 65 - 1 with self.assertRaises(OverflowError): numerical(int_val=5, float_val=5, i64_val=int(too_large)) def test_float_to_int_required_field(self) -> None: with self.assertRaises(TypeError): # pyre-ignore[6]: for test numerical(int_val=math.pi, float_val=math.pi) def test_float_to_int_unqualified_field(self) -> None: with self.assertRaises(TypeError): numerical( float_val=math.pi, # pyre-ignore[6]: for test int_val=math.pi, ) def test_float_to_int_list(self) -> None: with self.assertRaises(TypeError): numerical( int_val=5, float_val=math.pi, # pyre-ignore[6]: for test int_list=[math.pi, math.e], )
examples/mnist_branch.py
rsketine/neon
4,415
133211
#!/usr/bin/env python # ****************************************************************************** # Copyright 2014-2018 Intel Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ****************************************************************************** """ Example that trains a small multi-layer perceptron with multiple branches on MNIST data. Branch nodes are used to indicate points at which different layer sequences diverge The topology of the network is: cost1 cost3 | / m_l4 b2_l2 | / | ___b2_l1 |/ m_l3 cost2 | / m_l2 b1_l2 | / | ___b1_l1 |/ | m_l1 | | data Usage: python examples/mnist_branch.py """ from neon import logger as neon_logger from neon.callbacks.callbacks import Callbacks from neon.data import MNIST from neon.initializers import Gaussian from neon.layers import GeneralizedCost, Affine, BranchNode, Multicost, SingleOutputTree from neon.models import Model from neon.optimizers import GradientDescentMomentum from neon.transforms import Rectlin, Logistic, Softmax from neon.transforms import CrossEntropyBinary, CrossEntropyMulti, Misclassification from neon.util.argparser import NeonArgparser # parse the command line arguments parser = NeonArgparser(__doc__) args = parser.parse_args() # load up the mnist data set dataset = MNIST(path=args.data_dir) train_set = dataset.train_iter valid_set = dataset.valid_iter # setup weight initialization function init_norm = Gaussian(loc=0.0, scale=0.01) normrelu = dict(init=init_norm, activation=Rectlin()) normsigm = dict(init=init_norm, activation=Logistic(shortcut=True)) normsoft = dict(init=init_norm, activation=Softmax()) # setup model layers b1 = BranchNode(name="b1") b2 = BranchNode(name="b2") p1 = [Affine(nout=100, name="m_l1", **normrelu), b1, Affine(nout=32, name="m_l2", **normrelu), Affine(nout=16, name="m_l3", **normrelu), b2, Affine(nout=10, name="m_l4", **normsoft)] p2 = [b1, Affine(nout=16, name="b1_l1", **normrelu), Affine(nout=10, name="b1_l2", **normsigm)] p3 = [b2, Affine(nout=16, name="b2_l1", **normrelu), Affine(nout=10, name="b2_l2", **normsigm)] # setup cost function as CrossEntropy cost = Multicost(costs=[GeneralizedCost(costfunc=CrossEntropyMulti()), GeneralizedCost(costfunc=CrossEntropyBinary()), GeneralizedCost(costfunc=CrossEntropyBinary())], weights=[1, 0., 0.]) # setup optimizer optimizer = GradientDescentMomentum( 0.1, momentum_coef=0.9, stochastic_round=args.rounding) # initialize model object alphas = [1, 0.25, 0.25] mlp = Model(layers=SingleOutputTree([p1, p2, p3], alphas=alphas)) # setup standard fit callbacks callbacks = Callbacks(mlp, eval_set=valid_set, multicost=True, **args.callback_args) # run fit mlp.fit(train_set, optimizer=optimizer, num_epochs=args.epochs, cost=cost, callbacks=callbacks) # TODO: introduce Multicost metric support. The line below currently fails # since the Misclassification metric expects a single Tensor not a list of # Tensors neon_logger.display('Misclassification error = %.1f%%' % (mlp.eval(valid_set, metric=Misclassification()) * 100))
plugins/stock.py
daviddever/skybot
114
133221
#!/usr/bin/env python3 from __future__ import division, unicode_literals, print_function from past.utils import old_div import re from util import hook, http def human_price(x): if x > 1e9: return "{:,.2f}B".format(old_div(x, 1e9)) elif x > 1e6: return "{:,.2f}M".format(old_div(x, 1e6)) return "{:,.0f}".format(x) @hook.api_key("iexcloud") @hook.command def stock(inp, api_key=None): """.stock <symbol> [info] -- retrieves a weeks worth of stats for given symbol. Optionally displays information about the company.""" if not api_key: return "missing api key" arguments = inp.split(" ") symbol = arguments[0].upper() try: quote = http.get_json( "https://cloud.iexapis.com/stable/stock/{symbol}/quote".format( symbol=symbol ), token=api_key, ) except http.HTTPError: return "{} is not a valid stock symbol.".format(symbol) if ( quote["extendedPriceTime"] and quote["latestUpdate"] < quote["extendedPriceTime"] ): price = quote["extendedPrice"] change = quote["extendedChange"] elif quote["latestSource"] == "Close" and quote.get("iexRealtimePrice"): # NASDAQ stocks don't have extendedPrice anymore :( price = quote["iexRealtimePrice"] change = price - quote["previousClose"] else: price = quote["latestPrice"] change = quote["change"] def maybe(name, key, fmt=human_price): if quote.get(key): return " | {0}: {1}".format(name, fmt(float(quote[key]))) return "" response = { "name": quote["companyName"], "change": change, "percent_change": 100 * change / (price - change), "symbol": quote["symbol"], "price": price, "color": "05" if change < 0 else "03", "high": quote["high"], "low": quote["low"], "average_volume": maybe("Volume", "latestVolume"), "market_cap": maybe("MCAP", "marketCap"), "pe_ratio": maybe("P/E", "peRatio", fmt="{:.2f}".format), } return ( "{name} ({symbol}) ${price:,.2f} \x03{color}{change:,.2f} ({percent_change:,.2f}%)\x03" + ( " | Day Range: ${low:,.2f} - ${high:,.2f}" if response["high"] and response["low"] else "" ) + "{pe_ratio}{average_volume}{market_cap}" ).format(**response) if __name__ == "__main__": import os, sys for arg in sys.argv[1:]: print(stock(arg, api_key=os.getenv("KEY")))
Two Pointers/349. Intersection of Two Arrays.py
beckswu/Leetcode
138
133227
""" 349. Intersection of Two Arrays Example: Given nums1 = [1, 2, 2, 1], nums2 = [2, 2], return [2]. """ # binary search class Solution(object): def intersection(self, nums1, nums2): """ :type nums1: List[int] :type nums2: List[int] :rtype: List[int] """ nums1.sort() nums2.sort() res = [] left = 0 for i in nums1: left = bisect.bisect_left(nums2,i,lo=left) if left< len(nums2) and nums2[left] == i: res.append(i) left = bisect.bisect_right(nums2,i,lo=left) return res class Solution(object): def intersection(self, nums1, nums2): return list(set(nums1) & set(nums2)) # Two pointer class Solution(object): def intersection(self, nums1, nums2): nums1.sort() nums2.sort() res = [] l, r = 0, 0 while l<len(nums1) and r < len(nums2): if nums1[l] == nums2[r]: if not res or res[-1]!=nums1[l]: res+=[nums1[l]] l+=1 r+=1 elif nums1[l] < nums2[r]: l+=1 else: r+=1 return res
coveralls/cli.py
styleseat/coveralls-python
191
133231
""" Publish coverage results online via coveralls.io. Puts your coverage results on coveralls.io for everyone to see. This tool makes custom reports for data generated by coverage.py package and sends it to the coveralls.io service API. All Python files in your coverage analysis are posted to this service along with coverage stats, so please make sure you're not ruining your own security! Usage: coveralls [options] coveralls debug [options] Debug mode doesn't send anything, just outputs json to stdout. It also forces verbose output. Please use debug mode when submitting bug reports. Global options: --service=<name> Provide an alternative service name to submit. --rcfile=<file> Specify configuration file. [default: .coveragerc] --basedir=<dir> Base directory that is removed from reported paths. --output=<file> Write report to file. Doesn't send anything. --srcdir=<dir> Source directory added to reported paths. --submit=<file> Upload a previously generated file. --merge=<file> Merge report from file when submitting. --finish Finish parallel jobs. -h --help Display this help. -v --verbose Print extra info, always enabled when debugging. Example: ------- $ coveralls Submitting coverage to coveralls.io... Coverage submitted! Job #38.1 https://coveralls.io/jobs/92059 """ import logging import sys import docopt from .api import Coveralls from .exception import CoverallsException from .version import __version__ log = logging.getLogger('coveralls') def main(argv=None): options = docopt.docopt(__doc__, argv=argv, version=__version__) if options['debug']: options['--verbose'] = True level = logging.DEBUG if options['--verbose'] else logging.INFO log.addHandler(logging.StreamHandler()) log.setLevel(level) token_required = not options['debug'] and not options['--output'] try: coverallz = Coveralls(token_required, config_file=options['--rcfile'], service_name=options['--service'], base_dir=options.get('--basedir') or '', src_dir=options.get('--srcdir') or '') if options['--merge']: coverallz.merge(options['--merge']) if options['debug']: log.info('Testing coveralls-python...') coverallz.wear(dry_run=True) return if options['--output']: log.info('Write coverage report to file...') coverallz.save_report(options['--output']) return if options['--submit']: with open(options['--submit']) as report_file: coverallz.submit_report(report_file.read()) return if options['--finish']: log.info('Finishing parallel jobs...') coverallz.parallel_finish() log.info('Done') return log.info('Submitting coverage to coveralls.io...') result = coverallz.wear() log.info('Coverage submitted!') log.debug(result) if result: log.info(result.get('message')) log.info(result.get('url')) except KeyboardInterrupt: # pragma: no cover log.info('Aborted') except CoverallsException as e: log.exception(e) sys.exit(1)
mpf/tests/test_BallHold.py
Scottacus64/mpf
163
133248
<filename>mpf/tests/test_BallHold.py from mpf.tests.MpfTestCase import MpfTestCase from unittest.mock import MagicMock class TestBallHold(MpfTestCase): def get_config_file(self): return 'test_ball_holds.yaml' def get_machine_path(self): return 'tests/machine_files/ball_holds/' def _missing_ball(self, **kwargs): del kwargs self._missing += 1 def _ball_enter(self, new_balls, unclaimed_balls, **kwargs): del unclaimed_balls del kwargs self._enter += new_balls def _captured_from_pf(self, balls, **kwargs): del kwargs self._captured += balls def _collecting_balls_complete_handler(self, **kwargs): del kwargs self._collecting_balls_complete = 1 def test_ball_hold_in_mode(self): # start mode self.post_event("start_mode1") # mode loaded. ball_hold2 should be enabled self.assertTrue(self.machine.ball_holds["hold_test2"].enabled) # stop mode self.post_event("stop_mode1") # mode stopped. should ball_hold be disabled self.assertFalse(self.machine.ball_holds["hold_test2"].enabled) # start mode (again) self.post_event("start_mode1") # mode loaded. ball_hold2 should be enabled self.assertTrue(self.machine.ball_holds["hold_test2"].enabled) # stop mode self.post_event("stop_mode1") # mode stopped. should ball_hold be disabled self.assertFalse(self.machine.ball_holds["hold_test2"].enabled) def test_hold_and_release_at_game_end(self): coil1 = self.machine.coils['eject_coil1'] coil2 = self.machine.coils['eject_coil2'] coil3 = self.machine.coils['eject_coil3'] trough = self.machine.ball_devices['test_trough'] launcher = self.machine.ball_devices['test_launcher'] hold = self.machine.ball_devices['test_hold'] hold_logic = self.machine.ball_holds['hold_test'] playfield = self.machine.ball_devices['playfield'] self.machine.events.add_handler('balldevice_captured_from_playfield', self._captured_from_pf) self.machine.events.add_handler('balldevice_ball_missing', self._missing_ball) self._enter = 0 self._captured = 0 self._missing = 0 # add an initial ball to trough self.machine.switch_controller.process_switch("s_ball_switch1", 1) self.machine.switch_controller.process_switch("s_ball_switch2", 1) self.advance_time_and_run(1) self.assertEqual(2, self._captured) self._captured = 0 self.assertEqual(0, playfield.balls) self.assertEqual(2, self.machine.ball_controller.num_balls_known) # it should keep the ball coil1.pulse = MagicMock() coil2.pulse = MagicMock() coil3.pulse = MagicMock() self.assertEqual(2, trough.balls) assert not coil1.pulse.called assert not coil2.pulse.called assert not coil3.pulse.called # start a game self.machine.switch_controller.process_switch("s_start", 1) self.advance_time_and_run(0.1) self.machine.switch_controller.process_switch("s_start", 0) self.advance_time_and_run(1) # trough ejects self.assertTrue(coil1.pulse.called) assert not coil2.pulse.called assert not coil3.pulse.called self.machine.switch_controller.process_switch("s_ball_switch1", 0) self.advance_time_and_run(1) self.assertEqual(1, trough.balls) # launcher receives and ejects self.machine.switch_controller.process_switch("s_ball_switch_launcher", 1) self.advance_time_and_run(1) self.assertEqual(1, launcher.balls) self.assertTrue(coil1.pulse.called) self.assertTrue(coil2.pulse.called) assert not coil3.pulse.called # launcher shoots the ball self.machine.switch_controller.process_switch("s_ball_switch_launcher", 0) self.advance_time_and_run(1) self.assertEqual(0, launcher.balls) self.machine.switch_controller.process_switch("s_playfield_active", 1) self.advance_time_and_run(0.1) self.machine.switch_controller.process_switch("s_playfield_active", 0) self.advance_time_and_run(1) self.assertEqual(1, playfield.balls) self.assertEqual(0, self._captured) self.assertEqual(0, self._missing) self.mock_event("yes") self.mock_event("no") self.post_event("test_conditional_event") self.assertEventCalled("no") self.assertEventNotCalled("yes") coil1.pulse = MagicMock() coil2.pulse = MagicMock() coil3.pulse = MagicMock() # ball enters hold self.machine.switch_controller.process_switch("s_ball_switch_hold1", 1) self.advance_time_and_run(1) self.assertEqual(1, hold.balls) self.mock_event("yes") self.mock_event("no") self.post_event("test_conditional_event") self.assertEventNotCalled("no") self.assertEventCalled("yes") # request another ball self.machine.playfield.add_ball(1) self.advance_time_and_run() self.assertTrue(coil1.pulse.called) assert not coil2.pulse.called assert not coil3.pulse.called self.assertEqual(0, playfield.balls) self.assertEqual(1, self._captured) self.assertEqual(0, self._missing) self.assertEqual(1, hold_logic.balls_held) self._captured = 0 self.machine.switch_controller.process_switch("s_ball_switch2", 0) self.advance_time_and_run(1) self.assertEqual(0, trough.balls) # launcher receives and ejects self.machine.switch_controller.process_switch("s_ball_switch_launcher", 1) self.advance_time_and_run(1) self.assertEqual(1, launcher.balls) self.assertTrue(coil1.pulse.called) self.assertTrue(coil2.pulse.called) assert not coil3.pulse.called # launcher shoots the ball self.machine.switch_controller.process_switch("s_ball_switch_launcher", 0) self.advance_time_and_run(1) self.assertEqual(0, launcher.balls) self.machine.switch_controller.process_switch("s_playfield_active", 1) self.advance_time_and_run(0.1) self.machine.switch_controller.process_switch("s_playfield_active", 0) self.advance_time_and_run(1) self.assertEqual(1, playfield.balls) self.assertEqual(0, self._captured) self.assertEqual(0, self._missing) # ball drains self.machine.switch_controller.process_switch("s_ball_switch1", 1) self.advance_time_and_run(1) self.assertEqual(0, playfield.balls) self.assertEqual(1, self._captured) self.assertEqual(0, self._missing) self._captured = 0 self.assertEqual(2, self.machine.ball_controller.num_balls_known) # hold should eject all balls self.assertTrue(coil1.pulse.called) self.assertTrue(coil2.pulse.called) self.assertTrue(coil3.pulse.called) self.machine.switch_controller.process_switch("s_ball_switch_hold1", 0) self.advance_time_and_run(1) self.assertEqual(0, hold.balls) self.assertEqual(0, hold_logic.balls_held) self.assertEqual(0, self._captured) self.assertEqual(0, self._missing) # game did not end because ball has not drained self.assertIsNotNone(self.machine.game) # ball also drains self.machine.switch_controller.process_switch("s_ball_switch2", 1) self.advance_time_and_run(1) # game ends self.assertIsNone(self.machine.game) self.assertEqual(0, playfield.balls) self.assertEqual(1, self._captured) self.assertEqual(0, self._missing) self.assertEqual(2, self.machine.ball_controller.num_balls_known) self.advance_time_and_run(100) self.assertEqual(0, playfield.balls) self.assertEqual(1, self._captured) self.assertEqual(0, self._missing) self.assertEqual(2, self.machine.ball_controller.num_balls_known) def test_hold_full_and_release(self): coil1 = self.machine.coils['eject_coil1'] coil2 = self.machine.coils['eject_coil2'] coil3 = self.machine.coils['eject_coil3'] trough = self.machine.ball_devices['test_trough'] hold = self.machine.ball_devices['test_hold'] hold_logic = self.machine.ball_holds['hold_test'] playfield = self.machine.ball_devices['playfield'] self.machine.events.add_handler('balldevice_captured_from_playfield', self._captured_from_pf) self.machine.events.add_handler('balldevice_ball_missing', self._missing_ball) self.machine.events.add_handler('collecting_balls_complete', self._collecting_balls_complete_handler) hold_logic.enable() self._enter = 0 self._captured = 0 self._missing = 0 self._collecting_balls_complete = 0 # add an initial ball to trough self.machine.switch_controller.process_switch("s_ball_switch1", 1) self.machine.switch_controller.process_switch("s_ball_switch2", 1) self.advance_time_and_run(1) self.assertEqual(2, self._captured) self._captured = 0 self.assertEqual(0, playfield.balls) self.assertEqual(2, self.machine.ball_controller.num_balls_known) # it should keep the ball coil1.pulse = MagicMock() coil2.pulse = MagicMock() coil3.pulse = MagicMock() self.assertEqual(2, trough.balls) assert not coil1.pulse.called assert not coil2.pulse.called assert not coil3.pulse.called self.assertFalse(hold_logic.is_full()) self.assertEqual(2, trough.available_balls) # hold captures a first random ball self.machine.switch_controller.process_switch("s_ball_switch_hold1", 1) self.advance_time_and_run(1) assert not coil3.pulse.called self.assertFalse(hold_logic.is_full()) # self.assertEqual(1, trough.available_balls) self.assertEqual(1, hold.available_balls) # hold captures a second random ball self.machine.switch_controller.process_switch("s_ball_switch_hold2", 1) self.advance_time_and_run(1) assert not coil3.pulse.called self.assertTrue(hold_logic.is_full()) # self.assertEqual(0, trough.available_balls) self.assertEqual(2, hold.available_balls) # hold captures a third random ball self.machine.switch_controller.process_switch("s_ball_switch_hold3", 1) self.advance_time_and_run(1) # it should eject it right away self.assertTrue(coil3.pulse.called) coil3.pulse = MagicMock() self.assertTrue(hold_logic.is_full()) self.advance_time_and_run(1) self.machine.switch_controller.process_switch("s_ball_switch_hold3", 0) self.advance_time_and_run(11) self.assertTrue(hold_logic.is_full()) self.assertEqual(2, hold.available_balls) hold_logic.release_all() self.advance_time_and_run(1) self.assertEqual(0, hold.available_balls) self.assertTrue(coil3.pulse.called) coil3.pulse = MagicMock() self.advance_time_and_run(1) self.machine.switch_controller.process_switch("s_ball_switch_hold2", 0) self.advance_time_and_run(1) self.assertFalse(hold_logic.is_full()) self.advance_time_and_run(11) self.assertTrue(coil3.pulse.called) coil3.pulse = MagicMock() self.advance_time_and_run(1) self.machine.switch_controller.process_switch("s_ball_switch_hold1", 0) self.advance_time_and_run(11) assert not coil3.pulse.called def test_eject_to_hold(self): coil1 = self.machine.coils['eject_coil1'] coil2 = self.machine.coils['eject_coil2'] coil3 = self.machine.coils['eject_coil3'] trough = self.machine.ball_devices['test_trough'] launcher = self.machine.ball_devices['test_launcher'] hold = self.machine.ball_devices['test_hold'] hold_logic = self.machine.ball_holds['hold_test'] playfield = self.machine.ball_devices['playfield'] self.machine.events.add_handler('balldevice_captured_from_playfield', self._captured_from_pf) self.machine.events.add_handler('balldevice_ball_missing', self._missing_ball) self.machine.events.add_handler('collecting_balls_complete', self._collecting_balls_complete_handler) self._enter = 0 self._captured = 0 self._missing = 0 self._collecting_balls_complete = 0 # add an initial ball to trough self.machine.switch_controller.process_switch("s_ball_switch1", 1) self.machine.switch_controller.process_switch("s_ball_switch2", 1) self.advance_time_and_run(1) self.assertEqual(2, self._captured) self._captured = 0 self.assertEqual(0, playfield.balls) self.assertEqual(2, self.machine.ball_controller.num_balls_known) # it should keep the ball coil1.pulse = MagicMock() coil2.pulse = MagicMock() coil3.pulse = MagicMock() self.assertEqual(2, trough.balls) assert not coil1.pulse.called assert not coil2.pulse.called assert not coil3.pulse.called # start a game self.machine.switch_controller.process_switch("s_start", 1) self.advance_time_and_run(0.1) self.machine.switch_controller.process_switch("s_start", 0) self.advance_time_and_run(1) # trough ejects self.assertTrue(coil1.pulse.called) assert not coil2.pulse.called assert not coil3.pulse.called self.machine.switch_controller.process_switch("s_ball_switch1", 0) self.advance_time_and_run(1) self.assertEqual(1, trough.balls) # launcher receives and ejects self.machine.switch_controller.process_switch("s_ball_switch_launcher", 1) self.advance_time_and_run(1) self.assertEqual(1, launcher.balls) self.assertTrue(coil1.pulse.called) self.assertTrue(coil2.pulse.called) assert not coil3.pulse.called # launcher shoots the ball self.machine.switch_controller.process_switch("s_ball_switch_launcher", 0) self.advance_time_and_run(1) self.assertEqual(0, launcher.balls) coil1.pulse = MagicMock() coil2.pulse = MagicMock() coil3.pulse = MagicMock() # ball directly enters the hold self.machine.switch_controller.process_switch("s_ball_switch_hold1", 1) self.advance_time_and_run(1) self.assertEqual(1, hold.balls) # request another ball self.machine.playfield.add_ball(1) self.advance_time_and_run() self.assertTrue(coil1.pulse.called) assert not coil2.pulse.called assert not coil3.pulse.called self.assertEqual(0, playfield.balls) self.assertEqual(1, self._captured) self.assertEqual(0, self._missing) self.assertEqual(1, hold_logic.balls_held) self._captured = 0 self.machine.switch_controller.process_switch("s_ball_switch2", 0) self.advance_time_and_run(1) self.assertEqual(0, trough.balls) # launcher receives and ejects self.machine.switch_controller.process_switch("s_ball_switch_launcher", 1) self.advance_time_and_run(1) self.assertEqual(1, launcher.balls) self.assertTrue(coil1.pulse.called) self.assertTrue(coil2.pulse.called) assert not coil3.pulse.called # launcher shoots the ball self.machine.switch_controller.process_switch("s_ball_switch_launcher", 0) self.advance_time_and_run(1) self.assertEqual(0, launcher.balls) self.machine.switch_controller.process_switch("s_playfield_active", 1) self.advance_time_and_run(0.1) self.machine.switch_controller.process_switch("s_playfield_active", 0) self.advance_time_and_run(1) self.assertEqual(1, playfield.balls) self.assertEqual(0, self._captured) self.assertEqual(0, self._missing) self.assertEqual(1, hold.available_balls) # request a release of one ball from hold via event self.machine.events.post("release_test") # since we are not using a multi ball increase the balls_in_play manually self.assertEqual(1, self.machine.game.balls_in_play) self.machine.game.balls_in_play += 1 self.advance_time_and_run(1) # hold should eject a ball self.assertTrue(coil1.pulse.called) self.assertTrue(coil2.pulse.called) self.assertTrue(coil3.pulse.called) self.machine.switch_controller.process_switch("s_ball_switch_hold1", 0) self.advance_time_and_run(1) self.assertEqual(0, hold.balls) self.assertEqual(0, hold_logic.balls_held) self.assertEqual(0, self._captured) self.assertEqual(0, self._missing) # ball drains instantly. one left on pf self.machine.switch_controller.process_switch("s_ball_switch1", 1) self.advance_time_and_run(1) self.assertEqual(1, self.machine.game.balls_in_play) # other ball hits some pf switches self.machine.switch_controller.process_switch("s_playfield_active", 1) self.advance_time_and_run(0.1) self.machine.switch_controller.process_switch("s_playfield_active", 0) self.advance_time_and_run(1) self.assertEqual(1, playfield.balls) self.assertEqual(1, self._captured) self.assertEqual(0, self._missing) self._captured = 0 self.assertEqual(2, self.machine.ball_controller.num_balls_known) self.assertEqual(0, self._collecting_balls_complete) self.assertEqual(1, self.machine.game.balls_in_play) coil1.pulse = MagicMock() coil2.pulse = MagicMock() coil3.pulse = MagicMock() # we add another ball self.machine.game.balls_in_play += 1 playfield.add_ball() self.advance_time_and_run(1) self.assertEqual(2, self.machine.game.balls_in_play) self.machine.switch_controller.process_switch("s_ball_switch1", 0) self.advance_time_and_run(1) self.assertEqual(0, trough.balls) # launcher receives and ejects self.machine.switch_controller.process_switch("s_ball_switch_launcher", 1) self.advance_time_and_run(1) self.assertEqual(1, launcher.balls) self.assertTrue(coil1.pulse.called) self.assertTrue(coil2.pulse.called) assert not coil3.pulse.called # launcher shoots the ball self.machine.switch_controller.process_switch("s_ball_switch_launcher", 0) self.advance_time_and_run(1) self.assertEqual(0, launcher.balls) coil1.pulse = MagicMock() coil2.pulse = MagicMock() coil3.pulse = MagicMock() # ball directly enters the hold (again) self.machine.switch_controller.process_switch("s_ball_switch_hold1", 1) self.advance_time_and_run(1) self.assertEqual(1, hold.balls) # playfield count goes to 0 self.assertEqual(0, playfield.balls) self.assertEqual(1, self._captured) self.assertEqual(0, self._missing) self._captured = 0 # wait for eject confirm for hold self.advance_time_and_run(10) self.assertEqual(2, self.machine.ball_controller.num_balls_known) self.advance_time_and_run(100) self.assertEqual(1, playfield.balls) self.assertEqual(0, self._captured) self.assertEqual(0, self._missing) self.assertEqual(2, self.machine.ball_controller.num_balls_known) def test_auto_capacity(self): self.assertEqual(self.machine.ball_holds["hold_test3"].config['balls_to_hold'], 2) def test_enabled_state_in_placeholder(self): placeholder = self.machine.placeholder_manager.build_bool_template("device.ball_holds.hold_test.enabled") value, subscription = placeholder.evaluate_and_subscribe([]) self.assertFalse(value) self.assertFalse(subscription.done()) self.mock_event("should_post_when_enabled") self.mock_event("should_post_when_disabled") self.mock_event("should_not_post_when_enabled") self.mock_event("should_not_post_when_disabled") hold = self.machine.ball_holds['hold_test'] hold.enable() self.assertTrue(hold.enabled) self.post_event("test_event_when_enabled") self.assertEventCalled("should_post_when_enabled") self.assertEventNotCalled("should_not_post_when_enabled") self.assertTrue(subscription.done()) value, subscription = placeholder.evaluate_and_subscribe([]) self.assertTrue(value) hold.disable() self.assertFalse(hold.enabled) self.post_event("test_event_when_disabled") self.assertEventCalled("should_post_when_disabled") self.assertEventNotCalled("should_not_post_when_disabled") self.assertTrue(subscription.done()) value, subscription = placeholder.evaluate_and_subscribe([]) self.assertFalse(value) # we need to chancel the subscription or it will crash later subscription.cancel() class TestBallHoldSmart(MpfTestCase): def get_config_file(self): return 'test_ball_holds.yaml' def get_machine_path(self): return 'tests/machine_files/ball_holds/' def get_platform(self): return 'smart_virtual' def test_ball_end(self): self.machine.config['game']['balls_per_game'] = self.machine.placeholder_manager.build_int_template(3) # add an initial ball to trough self.hit_switch_and_run("s_ball_switch1", 1) self.hit_switch_and_run("s_ball_switch2", 1) self.advance_time_and_run(1) self.assertEqual(0, self.machine.playfield.balls) self.assertEqual(2, self.machine.ball_controller.num_balls_known) # press start self.hit_and_release_switch("s_start") # wait until ball is on pf self.advance_time_and_run(10) self.assertEqual(1, self.machine.game.player.ball) self.assertEqual(1, self.machine.playfield.balls) # hold one ball self.machine.ball_holds["hold_test"].enable() self.hit_switch_and_run("s_ball_switch_hold1", 1) # still held self.advance_time_and_run(10) self.assertEqual(1, self.machine.ball_holds["hold_test"].balls_held) self.assertEqual(0, self.machine.playfield.balls) # request another ball self.machine.playfield.add_ball(1) self.advance_time_and_run() self.hit_and_release_switch('s_playfield_active') self.advance_time_and_run() self.assertEqual(1, self.machine.playfield.balls) # drain ball on pf self.machine.default_platform.add_ball_to_device(self.machine.ball_devices["test_trough"]) self.advance_time_and_run(1) # machine waits for the hold self.advance_time_and_run(10) self.assertEqual(1, self.machine.game.player.ball) # release the hold self.post_event('release_test') self.assertEqual(0, self.machine.ball_holds["hold_test"].balls_held) self.assertEqual(1, self.machine.playfield.balls) # once the ball drains move on to the ball 2 self.machine.default_platform.add_ball_to_device(self.machine.ball_devices["test_trough"]) self.advance_time_and_run(1) self.assertEqual(2, self.machine.game.player.ball) # next ball self.advance_time_and_run(10) self.assertBallsOnPlayfield(1) # drain again self.machine.default_platform.add_ball_to_device(self.machine.ball_devices["test_trough"]) self.advance_time_and_run(1) # there should be no hold this time self.assertEqual(3, self.machine.game.player.ball)
src/osmo/config_osmo.py
jumperavocado/staketaxcsv
140
133252
<filename>src/osmo/config_osmo.py from common import ExporterTypes as et from common.config import config class localconfig(config): limit = 30000 # max txs # Treat LP deposits/withdrawals as "transfers"/"omit"/"trades" (ignored for koinly) lp_treatment = et.LP_TREATMENT_DEFAULT ibc_addresses = {} exponents = {}
stix/extensions/test_mechanism/open_ioc_2010_test_mechanism.py
saegel/python-stix
194
133259
# Copyright (c) 2017, The MITRE Corporation. All rights reserved. # See LICENSE.txt for complete terms. # external from lxml import etree import mixbox.xml from mixbox.fields import TypedField from mixbox.vendor.six import BytesIO, iteritems # internal import stix from stix.indicator.test_mechanism import _BaseTestMechanism import stix.bindings.extensions.test_mechanism.open_ioc_2010 as open_ioc_tm_binding @stix.register_extension class OpenIOCTestMechanism(_BaseTestMechanism): _namespace = "http://stix.mitre.org/extensions/TestMechanism#OpenIOC2010-1" _binding = open_ioc_tm_binding _binding_class = _binding.OpenIOC2010TestMechanismType _xml_ns_prefix = "stix-openioc" _XSI_TYPE = "stix-openioc:OpenIOC2010TestMechanismType" _TAG_IOC = "{%s}ioc" % _namespace ioc = TypedField("ioc") def __init__(self, id_=None, idref=None): super(OpenIOCTestMechanism, self).__init__(id_=id_, idref=idref) self.ioc = None self.__input_namespaces__ = {} self.__input_schemalocations__ = {} def _collect_schemalocs(self, node): try: schemaloc = mixbox.xml.get_schemaloc_pairs(node) self.__input_schemalocations__ = dict(schemaloc) except KeyError: self.__input_schemalocations__ = {} def _collect_namespaces(self, node): self.__input_namespaces__ = dict(iteritems(node.nsmap)) def _cast_ioc(self, node): ns_ioc = "http://schemas.mandiant.com/2010/ioc" node_ns = etree.QName(node).namespace if node_ns == ns_ioc: etree.register_namespace(self._xml_ns_prefix, self._namespace) node.tag = self._TAG_IOC else: error = "Cannot set ioc. Expected tag '{0}' found '{1}'." error = error.format(self._TAG_IOC, node.tag) raise ValueError(error) def _processed_ioc(self): if self.ioc is None: return None tree = mixbox.xml.get_etree(self.ioc) root = mixbox.xml.get_etree_root(tree) if root.tag != self._TAG_IOC: self._cast_ioc(root) self._collect_namespaces(root) self._collect_schemalocs(root) return tree @classmethod def from_obj(cls, obj): if not obj: return None return_obj = super(OpenIOCTestMechanism, cls).from_obj(obj) return_obj.ioc = obj.ioc return return_obj def to_obj(self, return_obj=None, ns_info=None): if not return_obj: return_obj = self._binding_class() super(OpenIOCTestMechanism, self).to_obj(ns_info=ns_info) return_obj.ioc = self._processed_ioc() return return_obj @classmethod def from_dict(cls, d): if not d: return None return_obj = super(OpenIOCTestMechanism, cls).from_dict(d) if 'ioc' in d: parser = mixbox.xml.get_xml_parser() return_obj.ioc = etree.parse(BytesIO(d['ioc']), parser=parser) return return_obj def to_dict(self): d = super(OpenIOCTestMechanism, self).to_dict() if self.ioc: d['ioc'] = etree.tostring(self._processed_ioc()) return d
h/services/search_index/__init__.py
pombredanne/h
2,103
133266
<reponame>pombredanne/h<filename>h/services/search_index/__init__.py from h.services.search_index.service import SearchIndexService from h.services.search_index.service_factory import factory
lib/mirror/cnn_wrapper/resnet.py
Yzhbuaa/DAGSfM
255
133274
<reponame>Yzhbuaa/DAGSfM from cnn_wrapper.network import Network class ResNet50(Network): def setup(self): (self.feed('data') .conv(7, 64, 2, padding=3, relu=False, name='conv1') .batch_normalization(relu=True, scale=True, center=True, name='bn_conv1') .max_pool(3, 2, name='pool1') .conv(1, 256, 1, biased=False, relu=False, name='res2a_branch1') .batch_normalization(scale=True, center=True, name='bn2a_branch1')) (self.feed('pool1') .conv(1, 64, 1, biased=False, relu=False, name='res2a_branch2a') .batch_normalization(scale=True, center=True, relu=True, name='bn2a_branch2a') .conv(3, 64, 1, biased=False, relu=False, name='res2a_branch2b') .batch_normalization(scale=True, center=True, relu=True, name='bn2a_branch2b') .conv(1, 256, 1, biased=False, relu=False, name='res2a_branch2c') .batch_normalization(scale=True, center=True, name='bn2a_branch2c')) (self.feed('bn2a_branch1', 'bn2a_branch2c') .add(name='res2a') .relu(name='res2a_relu') .conv(1, 64, 1, biased=False, relu=False, name='res2b_branch2a') .batch_normalization(scale=True, center=True, relu=True, name='bn2b_branch2a') .conv(3, 64, 1, biased=False, relu=False, name='res2b_branch2b') .batch_normalization(scale=True, center=True, relu=True, name='bn2b_branch2b') .conv(1, 256, 1, biased=False, relu=False, name='res2b_branch2c') .batch_normalization(scale=True, center=True, name='bn2b_branch2c')) (self.feed('res2a_relu', 'bn2b_branch2c') .add(name='res2b') .relu(name='res2b_relu') .conv(1, 64, 1, biased=False, relu=False, name='res2c_branch2a') .batch_normalization(scale=True, center=True, relu=True, name='bn2c_branch2a') .conv(3, 64, 1, biased=False, relu=False, name='res2c_branch2b') .batch_normalization(scale=True, center=True, relu=True, name='bn2c_branch2b') .conv(1, 256, 1, biased=False, relu=False, name='res2c_branch2c') .batch_normalization(scale=True, center=True, name='bn2c_branch2c')) (self.feed('res2b_relu', 'bn2c_branch2c') .add(name='res2c') .relu(name='res2c_relu') .conv(1, 512, 2, biased=False, relu=False, name='res3a_branch1') .batch_normalization(scale=True, center=True, name='bn3a_branch1')) (self.feed('res2c_relu') .conv(1, 128, 2, biased=False, relu=False, name='res3a_branch2a') .batch_normalization(scale=True, center=True, relu=True, name='bn3a_branch2a') .conv(3, 128, 1, biased=False, relu=False, name='res3a_branch2b') .batch_normalization(scale=True, center=True, relu=True, name='bn3a_branch2b') .conv(1, 512, 1, biased=False, relu=False, name='res3a_branch2c') .batch_normalization(scale=True, center=True, name='bn3a_branch2c')) (self.feed('bn3a_branch1', 'bn3a_branch2c') .add(name='res3a') .relu(name='res3a_relu') .conv(1, 128, 1, biased=False, relu=False, name='res3b_branch2a') .batch_normalization(scale=True, center=True, relu=True, name='bn3b_branch2a') .conv(3, 128, 1, biased=False, relu=False, name='res3b_branch2b') .batch_normalization(scale=True, center=True, relu=True, name='bn3b_branch2b') .conv(1, 512, 1, biased=False, relu=False, name='res3b_branch2c') .batch_normalization(scale=True, center=True, name='bn3b_branch2c')) (self.feed('res3a_relu', 'bn3b_branch2c') .add(name='res3b') .relu(name='res3b_relu') .conv(1, 128, 1, biased=False, relu=False, name='res3c_branch2a') .batch_normalization(scale=True, center=True, relu=True, name='bn3c_branch2a') .conv(3, 128, 1, biased=False, relu=False, name='res3c_branch2b') .batch_normalization(scale=True, center=True, relu=True, name='bn3c_branch2b') .conv(1, 512, 1, biased=False, relu=False, name='res3c_branch2c') .batch_normalization(scale=True, center=True, name='bn3c_branch2c')) (self.feed('res3b_relu', 'bn3c_branch2c') .add(name='res3c') .relu(name='res3c_relu') .conv(1, 128, 1, biased=False, relu=False, name='res3d_branch2a') .batch_normalization(scale=True, center=True, relu=True, name='bn3d_branch2a') .conv(3, 128, 1, biased=False, relu=False, name='res3d_branch2b') .batch_normalization(scale=True, center=True, relu=True, name='bn3d_branch2b') .conv(1, 512, 1, biased=False, relu=False, name='res3d_branch2c') .batch_normalization(scale=True, center=True, name='bn3d_branch2c')) (self.feed('res3c_relu', 'bn3d_branch2c') .add(name='res3d') .relu(name='res3d_relu') .conv(1, 1024, 2, biased=False, relu=False, name='res4a_branch1') .batch_normalization(scale=True, center=True, name='bn4a_branch1')) (self.feed('res3d_relu') .conv(1, 256, 2, biased=False, relu=False, name='res4a_branch2a') .batch_normalization(scale=True, center=True, relu=True, name='bn4a_branch2a') .conv(3, 256, 1, biased=False, relu=False, name='res4a_branch2b') .batch_normalization(scale=True, center=True, relu=True, name='bn4a_branch2b') .conv(1, 1024, 1, biased=False, relu=False, name='res4a_branch2c') .batch_normalization(scale=True, center=True, name='bn4a_branch2c')) (self.feed('bn4a_branch1', 'bn4a_branch2c') .add(name='res4a') .relu(name='res4a_relu') .conv(1, 256, 1, biased=False, relu=False, name='res4b_branch2a') .batch_normalization(scale=True, center=True, relu=True, name='bn4b_branch2a') .conv(3, 256, 1, biased=False, relu=False, name='res4b_branch2b') .batch_normalization(scale=True, center=True, relu=True, name='bn4b_branch2b') .conv(1, 1024, 1, biased=False, relu=False, name='res4b_branch2c') .batch_normalization(scale=True, center=True, name='bn4b_branch2c')) (self.feed('res4a_relu', 'bn4b_branch2c') .add(name='res4b') .relu(name='res4b_relu') .conv(1, 256, 1, biased=False, relu=False, name='res4c_branch2a') .batch_normalization(scale=True, center=True, relu=True, name='bn4c_branch2a') .conv(3, 256, 1, biased=False, relu=False, name='res4c_branch2b') .batch_normalization(scale=True, center=True, relu=True, name='bn4c_branch2b') .conv(1, 1024, 1, biased=False, relu=False, name='res4c_branch2c') .batch_normalization(scale=True, center=True, name='bn4c_branch2c')) (self.feed('res4b_relu', 'bn4c_branch2c') .add(name='res4c') .relu(name='res4c_relu') .conv(1, 256, 1, biased=False, relu=False, name='res4d_branch2a') .batch_normalization(scale=True, center=True, relu=True, name='bn4d_branch2a') .conv(3, 256, 1, biased=False, relu=False, name='res4d_branch2b') .batch_normalization(scale=True, center=True, relu=True, name='bn4d_branch2b') .conv(1, 1024, 1, biased=False, relu=False, name='res4d_branch2c') .batch_normalization(scale=True, center=True, name='bn4d_branch2c')) (self.feed('res4c_relu', 'bn4d_branch2c') .add(name='res4d') .relu(name='res4d_relu') .conv(1, 256, 1, biased=False, relu=False, name='res4e_branch2a') .batch_normalization(scale=True, center=True, relu=True, name='bn4e_branch2a') .conv(3, 256, 1, biased=False, relu=False, name='res4e_branch2b') .batch_normalization(scale=True, center=True, relu=True, name='bn4e_branch2b') .conv(1, 1024, 1, biased=False, relu=False, name='res4e_branch2c') .batch_normalization(scale=True, center=True, name='bn4e_branch2c')) (self.feed('res4d_relu', 'bn4e_branch2c') .add(name='res4e') .relu(name='res4e_relu') .conv(1, 256, 1, biased=False, relu=False, name='res4f_branch2a') .batch_normalization(scale=True, center=True, relu=True, name='bn4f_branch2a') .conv(3, 256, 1, biased=False, relu=False, name='res4f_branch2b') .batch_normalization(scale=True, center=True, relu=True, name='bn4f_branch2b') .conv(1, 1024, 1, biased=False, relu=False, name='res4f_branch2c') .batch_normalization(scale=True, center=True, name='bn4f_branch2c')) (self.feed('res4e_relu', 'bn4f_branch2c') .add(name='res4f') .relu(name='res4f_relu') .conv(1, 2048, 2, biased=False, relu=False, name='res5a_branch1') .batch_normalization(scale=True, center=True, name='bn5a_branch1')) (self.feed('res4f_relu') .conv(1, 512, 2, biased=False, relu=False, name='res5a_branch2a') .batch_normalization(scale=True, center=True, relu=True, name='bn5a_branch2a') .conv(3, 512, 1, biased=False, relu=False, name='res5a_branch2b') .batch_normalization(scale=True, center=True, relu=True, name='bn5a_branch2b') .conv(1, 2048, 1, biased=False, relu=False, name='res5a_branch2c') .batch_normalization(scale=True, center=True, name='bn5a_branch2c')) (self.feed('bn5a_branch1', 'bn5a_branch2c') .add(name='res5a') .relu(name='res5a_relu') .conv(1, 512, 1, biased=False, relu=False, name='res5b_branch2a') .batch_normalization(scale=True, center=True, relu=True, name='bn5b_branch2a') .conv(3, 512, 1, biased=False, relu=False, name='res5b_branch2b') .batch_normalization(scale=True, center=True, relu=True, name='bn5b_branch2b') .conv(1, 2048, 1, biased=False, relu=False, name='res5b_branch2c') .batch_normalization(scale=True, center=True, name='bn5b_branch2c')) (self.feed('res5a_relu', 'bn5b_branch2c') .add(name='res5b') .relu(name='res5b_relu') .conv(1, 512, 1, biased=False, relu=False, name='res5c_branch2a') .batch_normalization(scale=True, center=True, relu=True, name='bn5c_branch2a') .conv(3, 512, 1, biased=False, relu=False, name='res5c_branch2b') .batch_normalization(scale=True, center=True, relu=True, name='bn5c_branch2b') .conv(1, 2048, 1, biased=False, relu=False, name='res5c_branch2c') .batch_normalization(scale=True, center=True, name='bn5c_branch2c')) (self.feed('res5b_relu', 'bn5c_branch2c') .add(name='res5c') .relu(name='res5c_relu') .avg_pool(7, 1, padding='VALID', name='pool5')) if not self.fcn: (self.feed('pool5') .fc(1000, relu=False, name='fc1000') .softmax(name='prob')) class ResNet101(Network): def setup(self): (self.feed('data') .conv(7, 64, 2, biased=False, relu=False, name='conv1') .batch_normalization(scale=True, center=True, relu=True, name='bn_conv1') .max_pool(3, 2, name='pool1') .conv(1, 256, 1, biased=False, relu=False, name='res2a_branch1') .batch_normalization(scale=True, center=True, name='bn2a_branch1')) (self.feed('pool1') .conv(1, 64, 1, biased=False, relu=False, name='res2a_branch2a') .batch_normalization(scale=True, center=True, relu=True, name='bn2a_branch2a') .conv(3, 64, 1, biased=False, relu=False, name='res2a_branch2b') .batch_normalization(scale=True, center=True, relu=True, name='bn2a_branch2b') .conv(1, 256, 1, biased=False, relu=False, name='res2a_branch2c') .batch_normalization(scale=True, center=True, name='bn2a_branch2c')) (self.feed('bn2a_branch1', 'bn2a_branch2c') .add(name='res2a') .relu(name='res2a_relu') .conv(1, 64, 1, biased=False, relu=False, name='res2b_branch2a') .batch_normalization(scale=True, center=True, relu=True, name='bn2b_branch2a') .conv(3, 64, 1, biased=False, relu=False, name='res2b_branch2b') .batch_normalization(scale=True, center=True, relu=True, name='bn2b_branch2b') .conv(1, 256, 1, biased=False, relu=False, name='res2b_branch2c') .batch_normalization(scale=True, center=True, name='bn2b_branch2c')) (self.feed('res2a_relu', 'bn2b_branch2c') .add(name='res2b') .relu(name='res2b_relu') .conv(1, 64, 1, biased=False, relu=False, name='res2c_branch2a') .batch_normalization(scale=True, center=True, relu=True, name='bn2c_branch2a') .conv(3, 64, 1, biased=False, relu=False, name='res2c_branch2b') .batch_normalization(scale=True, center=True, relu=True, name='bn2c_branch2b') .conv(1, 256, 1, biased=False, relu=False, name='res2c_branch2c') .batch_normalization(scale=True, center=True, name='bn2c_branch2c')) (self.feed('res2b_relu', 'bn2c_branch2c') .add(name='res2c') .relu(name='res2c_relu') .conv(1, 512, 2, biased=False, relu=False, name='res3a_branch1') .batch_normalization(scale=True, center=True, name='bn3a_branch1')) (self.feed('res2c_relu') .conv(1, 128, 2, biased=False, relu=False, name='res3a_branch2a') .batch_normalization(scale=True, center=True, relu=True, name='bn3a_branch2a') .conv(3, 128, 1, biased=False, relu=False, name='res3a_branch2b') .batch_normalization(scale=True, center=True, relu=True, name='bn3a_branch2b') .conv(1, 512, 1, biased=False, relu=False, name='res3a_branch2c') .batch_normalization(scale=True, center=True, name='bn3a_branch2c')) (self.feed('bn3a_branch1', 'bn3a_branch2c') .add(name='res3a') .relu(name='res3a_relu') .conv(1, 128, 1, biased=False, relu=False, name='res3b1_branch2a') .batch_normalization(scale=True, center=True, relu=True, name='bn3b1_branch2a') .conv(3, 128, 1, biased=False, relu=False, name='res3b1_branch2b') .batch_normalization(scale=True, center=True, relu=True, name='bn3b1_branch2b') .conv(1, 512, 1, biased=False, relu=False, name='res3b1_branch2c') .batch_normalization(scale=True, center=True, name='bn3b1_branch2c')) (self.feed('res3a_relu', 'bn3b1_branch2c') .add(name='res3b1') .relu(name='res3b1_relu') .conv(1, 128, 1, biased=False, relu=False, name='res3b2_branch2a') .batch_normalization(scale=True, center=True, relu=True, name='bn3b2_branch2a') .conv(3, 128, 1, biased=False, relu=False, name='res3b2_branch2b') .batch_normalization(scale=True, center=True, relu=True, name='bn3b2_branch2b') .conv(1, 512, 1, biased=False, relu=False, name='res3b2_branch2c') .batch_normalization(scale=True, center=True, name='bn3b2_branch2c')) (self.feed('res3b1_relu', 'bn3b2_branch2c') .add(name='res3b2') .relu(name='res3b2_relu') .conv(1, 128, 1, biased=False, relu=False, name='res3b3_branch2a') .batch_normalization(scale=True, center=True, relu=True, name='bn3b3_branch2a') .conv(3, 128, 1, biased=False, relu=False, name='res3b3_branch2b') .batch_normalization(scale=True, center=True, relu=True, name='bn3b3_branch2b') .conv(1, 512, 1, biased=False, relu=False, name='res3b3_branch2c') .batch_normalization(scale=True, center=True, name='bn3b3_branch2c')) (self.feed('res3b2_relu', 'bn3b3_branch2c') .add(name='res3b3') .relu(name='res3b3_relu') .conv(1, 1024, 2, biased=False, relu=False, name='res4a_branch1') .batch_normalization(scale=True, center=True, name='bn4a_branch1')) (self.feed('res3b3_relu') .conv(1, 256, 2, biased=False, relu=False, name='res4a_branch2a') .batch_normalization(scale=True, center=True, relu=True, name='bn4a_branch2a') .conv(3, 256, 1, biased=False, relu=False, name='res4a_branch2b') .batch_normalization(scale=True, center=True, relu=True, name='bn4a_branch2b') .conv(1, 1024, 1, biased=False, relu=False, name='res4a_branch2c') .batch_normalization(scale=True, center=True, name='bn4a_branch2c')) (self.feed('bn4a_branch1', 'bn4a_branch2c') .add(name='res4a') .relu(name='res4a_relu') .conv(1, 256, 1, biased=False, relu=False, name='res4b1_branch2a') .batch_normalization(scale=True, center=True, relu=True, name='bn4b1_branch2a') .conv(3, 256, 1, biased=False, relu=False, name='res4b1_branch2b') .batch_normalization(scale=True, center=True, relu=True, name='bn4b1_branch2b') .conv(1, 1024, 1, biased=False, relu=False, name='res4b1_branch2c') .batch_normalization(scale=True, center=True, name='bn4b1_branch2c')) (self.feed('res4a_relu', 'bn4b1_branch2c') .add(name='res4b1') .relu(name='res4b1_relu') .conv(1, 256, 1, biased=False, relu=False, name='res4b2_branch2a') .batch_normalization(scale=True, center=True, relu=True, name='bn4b2_branch2a') .conv(3, 256, 1, biased=False, relu=False, name='res4b2_branch2b') .batch_normalization(scale=True, center=True, relu=True, name='bn4b2_branch2b') .conv(1, 1024, 1, biased=False, relu=False, name='res4b2_branch2c') .batch_normalization(scale=True, center=True, name='bn4b2_branch2c')) (self.feed('res4b1_relu', 'bn4b2_branch2c') .add(name='res4b2') .relu(name='res4b2_relu') .conv(1, 256, 1, biased=False, relu=False, name='res4b3_branch2a') .batch_normalization(scale=True, center=True, relu=True, name='bn4b3_branch2a') .conv(3, 256, 1, biased=False, relu=False, name='res4b3_branch2b') .batch_normalization(scale=True, center=True, relu=True, name='bn4b3_branch2b') .conv(1, 1024, 1, biased=False, relu=False, name='res4b3_branch2c') .batch_normalization(scale=True, center=True, name='bn4b3_branch2c')) (self.feed('res4b2_relu', 'bn4b3_branch2c') .add(name='res4b3') .relu(name='res4b3_relu') .conv(1, 256, 1, biased=False, relu=False, name='res4b4_branch2a') .batch_normalization(scale=True, center=True, relu=True, name='bn4b4_branch2a') .conv(3, 256, 1, biased=False, relu=False, name='res4b4_branch2b') .batch_normalization(scale=True, center=True, relu=True, name='bn4b4_branch2b') .conv(1, 1024, 1, biased=False, relu=False, name='res4b4_branch2c') .batch_normalization(scale=True, center=True, name='bn4b4_branch2c')) (self.feed('res4b3_relu', 'bn4b4_branch2c') .add(name='res4b4') .relu(name='res4b4_relu') .conv(1, 256, 1, biased=False, relu=False, name='res4b5_branch2a') .batch_normalization(scale=True, center=True, relu=True, name='bn4b5_branch2a') .conv(3, 256, 1, biased=False, relu=False, name='res4b5_branch2b') .batch_normalization(scale=True, center=True, relu=True, name='bn4b5_branch2b') .conv(1, 1024, 1, biased=False, relu=False, name='res4b5_branch2c') .batch_normalization(scale=True, center=True, name='bn4b5_branch2c')) (self.feed('res4b4_relu', 'bn4b5_branch2c') .add(name='res4b5') .relu(name='res4b5_relu') .conv(1, 256, 1, biased=False, relu=False, name='res4b6_branch2a') .batch_normalization(scale=True, center=True, relu=True, name='bn4b6_branch2a') .conv(3, 256, 1, biased=False, relu=False, name='res4b6_branch2b') .batch_normalization(scale=True, center=True, relu=True, name='bn4b6_branch2b') .conv(1, 1024, 1, biased=False, relu=False, name='res4b6_branch2c') .batch_normalization(scale=True, center=True, name='bn4b6_branch2c')) (self.feed('res4b5_relu', 'bn4b6_branch2c') .add(name='res4b6') .relu(name='res4b6_relu') .conv(1, 256, 1, biased=False, relu=False, name='res4b7_branch2a') .batch_normalization(scale=True, center=True, relu=True, name='bn4b7_branch2a') .conv(3, 256, 1, biased=False, relu=False, name='res4b7_branch2b') .batch_normalization(scale=True, center=True, relu=True, name='bn4b7_branch2b') .conv(1, 1024, 1, biased=False, relu=False, name='res4b7_branch2c') .batch_normalization(scale=True, center=True, name='bn4b7_branch2c')) (self.feed('res4b6_relu', 'bn4b7_branch2c') .add(name='res4b7') .relu(name='res4b7_relu') .conv(1, 256, 1, biased=False, relu=False, name='res4b8_branch2a') .batch_normalization(scale=True, center=True, relu=True, name='bn4b8_branch2a') .conv(3, 256, 1, biased=False, relu=False, name='res4b8_branch2b') .batch_normalization(scale=True, center=True, relu=True, name='bn4b8_branch2b') .conv(1, 1024, 1, biased=False, relu=False, name='res4b8_branch2c') .batch_normalization(scale=True, center=True, name='bn4b8_branch2c')) (self.feed('res4b7_relu', 'bn4b8_branch2c') .add(name='res4b8') .relu(name='res4b8_relu') .conv(1, 256, 1, biased=False, relu=False, name='res4b9_branch2a') .batch_normalization(scale=True, center=True, relu=True, name='bn4b9_branch2a') .conv(3, 256, 1, biased=False, relu=False, name='res4b9_branch2b') .batch_normalization(scale=True, center=True, relu=True, name='bn4b9_branch2b') .conv(1, 1024, 1, biased=False, relu=False, name='res4b9_branch2c') .batch_normalization(scale=True, center=True, name='bn4b9_branch2c')) (self.feed('res4b8_relu', 'bn4b9_branch2c') .add(name='res4b9') .relu(name='res4b9_relu') .conv(1, 256, 1, biased=False, relu=False, name='res4b10_branch2a') .batch_normalization(scale=True, center=True, relu=True, name='bn4b10_branch2a') .conv(3, 256, 1, biased=False, relu=False, name='res4b10_branch2b') .batch_normalization(scale=True, center=True, relu=True, name='bn4b10_branch2b') .conv(1, 1024, 1, biased=False, relu=False, name='res4b10_branch2c') .batch_normalization(scale=True, center=True, name='bn4b10_branch2c')) (self.feed('res4b9_relu', 'bn4b10_branch2c') .add(name='res4b10') .relu(name='res4b10_relu') .conv(1, 256, 1, biased=False, relu=False, name='res4b11_branch2a') .batch_normalization(scale=True, center=True, relu=True, name='bn4b11_branch2a') .conv(3, 256, 1, biased=False, relu=False, name='res4b11_branch2b') .batch_normalization(scale=True, center=True, relu=True, name='bn4b11_branch2b') .conv(1, 1024, 1, biased=False, relu=False, name='res4b11_branch2c') .batch_normalization(scale=True, center=True, name='bn4b11_branch2c')) (self.feed('res4b10_relu', 'bn4b11_branch2c') .add(name='res4b11') .relu(name='res4b11_relu') .conv(1, 256, 1, biased=False, relu=False, name='res4b12_branch2a') .batch_normalization(scale=True, center=True, relu=True, name='bn4b12_branch2a') .conv(3, 256, 1, biased=False, relu=False, name='res4b12_branch2b') .batch_normalization(scale=True, center=True, relu=True, name='bn4b12_branch2b') .conv(1, 1024, 1, biased=False, relu=False, name='res4b12_branch2c') .batch_normalization(scale=True, center=True, name='bn4b12_branch2c')) (self.feed('res4b11_relu', 'bn4b12_branch2c') .add(name='res4b12') .relu(name='res4b12_relu') .conv(1, 256, 1, biased=False, relu=False, name='res4b13_branch2a') .batch_normalization(scale=True, center=True, relu=True, name='bn4b13_branch2a') .conv(3, 256, 1, biased=False, relu=False, name='res4b13_branch2b') .batch_normalization(scale=True, center=True, relu=True, name='bn4b13_branch2b') .conv(1, 1024, 1, biased=False, relu=False, name='res4b13_branch2c') .batch_normalization(scale=True, center=True, name='bn4b13_branch2c')) (self.feed('res4b12_relu', 'bn4b13_branch2c') .add(name='res4b13') .relu(name='res4b13_relu') .conv(1, 256, 1, biased=False, relu=False, name='res4b14_branch2a') .batch_normalization(scale=True, center=True, relu=True, name='bn4b14_branch2a') .conv(3, 256, 1, biased=False, relu=False, name='res4b14_branch2b') .batch_normalization(scale=True, center=True, relu=True, name='bn4b14_branch2b') .conv(1, 1024, 1, biased=False, relu=False, name='res4b14_branch2c') .batch_normalization(scale=True, center=True, name='bn4b14_branch2c')) (self.feed('res4b13_relu', 'bn4b14_branch2c') .add(name='res4b14') .relu(name='res4b14_relu') .conv(1, 256, 1, biased=False, relu=False, name='res4b15_branch2a') .batch_normalization(scale=True, center=True, relu=True, name='bn4b15_branch2a') .conv(3, 256, 1, biased=False, relu=False, name='res4b15_branch2b') .batch_normalization(scale=True, center=True, relu=True, name='bn4b15_branch2b') .conv(1, 1024, 1, biased=False, relu=False, name='res4b15_branch2c') .batch_normalization(scale=True, center=True, name='bn4b15_branch2c')) (self.feed('res4b14_relu', 'bn4b15_branch2c') .add(name='res4b15') .relu(name='res4b15_relu') .conv(1, 256, 1, biased=False, relu=False, name='res4b16_branch2a') .batch_normalization(scale=True, center=True, relu=True, name='bn4b16_branch2a') .conv(3, 256, 1, biased=False, relu=False, name='res4b16_branch2b') .batch_normalization(scale=True, center=True, relu=True, name='bn4b16_branch2b') .conv(1, 1024, 1, biased=False, relu=False, name='res4b16_branch2c') .batch_normalization(scale=True, center=True, name='bn4b16_branch2c')) (self.feed('res4b15_relu', 'bn4b16_branch2c') .add(name='res4b16') .relu(name='res4b16_relu') .conv(1, 256, 1, biased=False, relu=False, name='res4b17_branch2a') .batch_normalization(scale=True, center=True, relu=True, name='bn4b17_branch2a') .conv(3, 256, 1, biased=False, relu=False, name='res4b17_branch2b') .batch_normalization(scale=True, center=True, relu=True, name='bn4b17_branch2b') .conv(1, 1024, 1, biased=False, relu=False, name='res4b17_branch2c') .batch_normalization(scale=True, center=True, name='bn4b17_branch2c')) (self.feed('res4b16_relu', 'bn4b17_branch2c') .add(name='res4b17') .relu(name='res4b17_relu') .conv(1, 256, 1, biased=False, relu=False, name='res4b18_branch2a') .batch_normalization(scale=True, center=True, relu=True, name='bn4b18_branch2a') .conv(3, 256, 1, biased=False, relu=False, name='res4b18_branch2b') .batch_normalization(scale=True, center=True, relu=True, name='bn4b18_branch2b') .conv(1, 1024, 1, biased=False, relu=False, name='res4b18_branch2c') .batch_normalization(scale=True, center=True, name='bn4b18_branch2c')) (self.feed('res4b17_relu', 'bn4b18_branch2c') .add(name='res4b18') .relu(name='res4b18_relu') .conv(1, 256, 1, biased=False, relu=False, name='res4b19_branch2a') .batch_normalization(scale=True, center=True, relu=True, name='bn4b19_branch2a') .conv(3, 256, 1, biased=False, relu=False, name='res4b19_branch2b') .batch_normalization(scale=True, center=True, relu=True, name='bn4b19_branch2b') .conv(1, 1024, 1, biased=False, relu=False, name='res4b19_branch2c') .batch_normalization(scale=True, center=True, name='bn4b19_branch2c')) (self.feed('res4b18_relu', 'bn4b19_branch2c') .add(name='res4b19') .relu(name='res4b19_relu') .conv(1, 256, 1, biased=False, relu=False, name='res4b20_branch2a') .batch_normalization(scale=True, center=True, relu=True, name='bn4b20_branch2a') .conv(3, 256, 1, biased=False, relu=False, name='res4b20_branch2b') .batch_normalization(scale=True, center=True, relu=True, name='bn4b20_branch2b') .conv(1, 1024, 1, biased=False, relu=False, name='res4b20_branch2c') .batch_normalization(scale=True, center=True, name='bn4b20_branch2c')) (self.feed('res4b19_relu', 'bn4b20_branch2c') .add(name='res4b20') .relu(name='res4b20_relu') .conv(1, 256, 1, biased=False, relu=False, name='res4b21_branch2a') .batch_normalization(scale=True, center=True, relu=True, name='bn4b21_branch2a') .conv(3, 256, 1, biased=False, relu=False, name='res4b21_branch2b') .batch_normalization(scale=True, center=True, relu=True, name='bn4b21_branch2b') .conv(1, 1024, 1, biased=False, relu=False, name='res4b21_branch2c') .batch_normalization(scale=True, center=True, name='bn4b21_branch2c')) (self.feed('res4b20_relu', 'bn4b21_branch2c') .add(name='res4b21') .relu(name='res4b21_relu') .conv(1, 256, 1, biased=False, relu=False, name='res4b22_branch2a') .batch_normalization(scale=True, center=True, relu=True, name='bn4b22_branch2a') .conv(3, 256, 1, biased=False, relu=False, name='res4b22_branch2b') .batch_normalization(scale=True, center=True, relu=True, name='bn4b22_branch2b') .conv(1, 1024, 1, biased=False, relu=False, name='res4b22_branch2c') .batch_normalization(scale=True, center=True, name='bn4b22_branch2c')) (self.feed('res4b21_relu', 'bn4b22_branch2c') .add(name='res4b22') .relu(name='res4b22_relu') .conv(1, 2048, 2, biased=False, relu=False, name='res5a_branch1') .batch_normalization(scale=True, center=True, name='bn5a_branch1')) (self.feed('res4b22_relu') .conv(1, 512, 2, biased=False, relu=False, name='res5a_branch2a') .batch_normalization(scale=True, center=True, relu=True, name='bn5a_branch2a') .conv(3, 512, 1, biased=False, relu=False, name='res5a_branch2b') .batch_normalization(scale=True, center=True, relu=True, name='bn5a_branch2b') .conv(1, 2048, 1, biased=False, relu=False, name='res5a_branch2c') .batch_normalization(scale=True, center=True, name='bn5a_branch2c')) (self.feed('bn5a_branch1', 'bn5a_branch2c') .add(name='res5a') .relu(name='res5a_relu') .conv(1, 512, 1, biased=False, relu=False, name='res5b_branch2a') .batch_normalization(scale=True, center=True, relu=True, name='bn5b_branch2a') .conv(3, 512, 1, biased=False, relu=False, name='res5b_branch2b') .batch_normalization(scale=True, center=True, relu=True, name='bn5b_branch2b') .conv(1, 2048, 1, biased=False, relu=False, name='res5b_branch2c') .batch_normalization(scale=True, center=True, name='bn5b_branch2c')) (self.feed('res5a_relu', 'bn5b_branch2c') .add(name='res5b') .relu(name='res5b_relu') .conv(1, 512, 1, biased=False, relu=False, name='res5c_branch2a') .batch_normalization(scale=True, center=True, relu=True, name='bn5c_branch2a') .conv(3, 512, 1, biased=False, relu=False, name='res5c_branch2b') .batch_normalization(scale=True, center=True, relu=True, name='bn5c_branch2b') .conv(1, 2048, 1, biased=False, relu=False, name='res5c_branch2c') .batch_normalization(scale=True, center=True, name='bn5c_branch2c')) (self.feed('res5b_relu', 'bn5c_branch2c') .add(name='res5c') .relu(name='res5c_relu') .avg_pool(7, 1, padding='VALID', name='pool5')) if not self.fcn: (self.feed('pool5') .fc(1000, relu=False, name='fc1000') .softmax(name='prob')) class ResNet152(Network): def setup(self): (self.feed('data') .conv(7, 64, 2, biased=False, relu=False, name='conv1') .batch_normalization(scale=True, center=True, relu=True, name='bn_conv1') .max_pool(3, 2, name='pool1') .conv(1, 256, 1, biased=False, relu=False, name='res2a_branch1') .batch_normalization(scale=True, center=True, name='bn2a_branch1')) (self.feed('pool1') .conv(1, 64, 1, biased=False, relu=False, name='res2a_branch2a') .batch_normalization(scale=True, center=True, relu=True, name='bn2a_branch2a') .conv(3, 64, 1, biased=False, relu=False, name='res2a_branch2b') .batch_normalization(scale=True, center=True, relu=True, name='bn2a_branch2b') .conv(1, 256, 1, biased=False, relu=False, name='res2a_branch2c') .batch_normalization(scale=True, center=True, name='bn2a_branch2c')) (self.feed('bn2a_branch1', 'bn2a_branch2c') .add(name='res2a') .relu(name='res2a_relu') .conv(1, 64, 1, biased=False, relu=False, name='res2b_branch2a') .batch_normalization(scale=True, center=True, relu=True, name='bn2b_branch2a') .conv(3, 64, 1, biased=False, relu=False, name='res2b_branch2b') .batch_normalization(scale=True, center=True, relu=True, name='bn2b_branch2b') .conv(1, 256, 1, biased=False, relu=False, name='res2b_branch2c') .batch_normalization(scale=True, center=True, name='bn2b_branch2c')) (self.feed('res2a_relu', 'bn2b_branch2c') .add(name='res2b') .relu(name='res2b_relu') .conv(1, 64, 1, biased=False, relu=False, name='res2c_branch2a') .batch_normalization(scale=True, center=True, relu=True, name='bn2c_branch2a') .conv(3, 64, 1, biased=False, relu=False, name='res2c_branch2b') .batch_normalization(scale=True, center=True, relu=True, name='bn2c_branch2b') .conv(1, 256, 1, biased=False, relu=False, name='res2c_branch2c') .batch_normalization(scale=True, center=True, name='bn2c_branch2c')) (self.feed('res2b_relu', 'bn2c_branch2c') .add(name='res2c') .relu(name='res2c_relu') .conv(1, 512, 2, biased=False, relu=False, name='res3a_branch1') .batch_normalization(scale=True, center=True, name='bn3a_branch1')) (self.feed('res2c_relu') .conv(1, 128, 2, biased=False, relu=False, name='res3a_branch2a') .batch_normalization(scale=True, center=True, relu=True, name='bn3a_branch2a') .conv(3, 128, 1, biased=False, relu=False, name='res3a_branch2b') .batch_normalization(scale=True, center=True, relu=True, name='bn3a_branch2b') .conv(1, 512, 1, biased=False, relu=False, name='res3a_branch2c') .batch_normalization(scale=True, center=True, name='bn3a_branch2c')) (self.feed('bn3a_branch1', 'bn3a_branch2c') .add(name='res3a') .relu(name='res3a_relu') .conv(1, 128, 1, biased=False, relu=False, name='res3b1_branch2a') .batch_normalization(scale=True, center=True, relu=True, name='bn3b1_branch2a') .conv(3, 128, 1, biased=False, relu=False, name='res3b1_branch2b') .batch_normalization(scale=True, center=True, relu=True, name='bn3b1_branch2b') .conv(1, 512, 1, biased=False, relu=False, name='res3b1_branch2c') .batch_normalization(scale=True, center=True, name='bn3b1_branch2c')) (self.feed('res3a_relu', 'bn3b1_branch2c') .add(name='res3b1') .relu(name='res3b1_relu') .conv(1, 128, 1, biased=False, relu=False, name='res3b2_branch2a') .batch_normalization(scale=True, center=True, relu=True, name='bn3b2_branch2a') .conv(3, 128, 1, biased=False, relu=False, name='res3b2_branch2b') .batch_normalization(scale=True, center=True, relu=True, name='bn3b2_branch2b') .conv(1, 512, 1, biased=False, relu=False, name='res3b2_branch2c') .batch_normalization(scale=True, center=True, name='bn3b2_branch2c')) (self.feed('res3b1_relu', 'bn3b2_branch2c') .add(name='res3b2') .relu(name='res3b2_relu') .conv(1, 128, 1, biased=False, relu=False, name='res3b3_branch2a') .batch_normalization(scale=True, center=True, relu=True, name='bn3b3_branch2a') .conv(3, 128, 1, biased=False, relu=False, name='res3b3_branch2b') .batch_normalization(scale=True, center=True, relu=True, name='bn3b3_branch2b') .conv(1, 512, 1, biased=False, relu=False, name='res3b3_branch2c') .batch_normalization(scale=True, center=True, name='bn3b3_branch2c')) (self.feed('res3b2_relu', 'bn3b3_branch2c') .add(name='res3b3') .relu(name='res3b3_relu') .conv(1, 128, 1, biased=False, relu=False, name='res3b4_branch2a') .batch_normalization(scale=True, center=True, relu=True, name='bn3b4_branch2a') .conv(3, 128, 1, biased=False, relu=False, name='res3b4_branch2b') .batch_normalization(scale=True, center=True, relu=True, name='bn3b4_branch2b') .conv(1, 512, 1, biased=False, relu=False, name='res3b4_branch2c') .batch_normalization(scale=True, center=True, name='bn3b4_branch2c')) (self.feed('res3b3_relu', 'bn3b4_branch2c') .add(name='res3b4') .relu(name='res3b4_relu') .conv(1, 128, 1, biased=False, relu=False, name='res3b5_branch2a') .batch_normalization(scale=True, center=True, relu=True, name='bn3b5_branch2a') .conv(3, 128, 1, biased=False, relu=False, name='res3b5_branch2b') .batch_normalization(scale=True, center=True, relu=True, name='bn3b5_branch2b') .conv(1, 512, 1, biased=False, relu=False, name='res3b5_branch2c') .batch_normalization(scale=True, center=True, name='bn3b5_branch2c')) (self.feed('res3b4_relu', 'bn3b5_branch2c') .add(name='res3b5') .relu(name='res3b5_relu') .conv(1, 128, 1, biased=False, relu=False, name='res3b6_branch2a') .batch_normalization(scale=True, center=True, relu=True, name='bn3b6_branch2a') .conv(3, 128, 1, biased=False, relu=False, name='res3b6_branch2b') .batch_normalization(scale=True, center=True, relu=True, name='bn3b6_branch2b') .conv(1, 512, 1, biased=False, relu=False, name='res3b6_branch2c') .batch_normalization(scale=True, center=True, name='bn3b6_branch2c')) (self.feed('res3b5_relu', 'bn3b6_branch2c') .add(name='res3b6') .relu(name='res3b6_relu') .conv(1, 128, 1, biased=False, relu=False, name='res3b7_branch2a') .batch_normalization(scale=True, center=True, relu=True, name='bn3b7_branch2a') .conv(3, 128, 1, biased=False, relu=False, name='res3b7_branch2b') .batch_normalization(scale=True, center=True, relu=True, name='bn3b7_branch2b') .conv(1, 512, 1, biased=False, relu=False, name='res3b7_branch2c') .batch_normalization(scale=True, center=True, name='bn3b7_branch2c')) (self.feed('res3b6_relu', 'bn3b7_branch2c') .add(name='res3b7') .relu(name='res3b7_relu') .conv(1, 1024, 2, biased=False, relu=False, name='res4a_branch1') .batch_normalization(scale=True, center=True, name='bn4a_branch1')) (self.feed('res3b7_relu') .conv(1, 256, 2, biased=False, relu=False, name='res4a_branch2a') .batch_normalization(scale=True, center=True, relu=True, name='bn4a_branch2a') .conv(3, 256, 1, biased=False, relu=False, name='res4a_branch2b') .batch_normalization(scale=True, center=True, relu=True, name='bn4a_branch2b') .conv(1, 1024, 1, biased=False, relu=False, name='res4a_branch2c') .batch_normalization(scale=True, center=True, name='bn4a_branch2c')) (self.feed('bn4a_branch1', 'bn4a_branch2c') .add(name='res4a') .relu(name='res4a_relu') .conv(1, 256, 1, biased=False, relu=False, name='res4b1_branch2a') .batch_normalization(scale=True, center=True, relu=True, name='bn4b1_branch2a') .conv(3, 256, 1, biased=False, relu=False, name='res4b1_branch2b') .batch_normalization(scale=True, center=True, relu=True, name='bn4b1_branch2b') .conv(1, 1024, 1, biased=False, relu=False, name='res4b1_branch2c') .batch_normalization(scale=True, center=True, name='bn4b1_branch2c')) (self.feed('res4a_relu', 'bn4b1_branch2c') .add(name='res4b1') .relu(name='res4b1_relu') .conv(1, 256, 1, biased=False, relu=False, name='res4b2_branch2a') .batch_normalization(scale=True, center=True, relu=True, name='bn4b2_branch2a') .conv(3, 256, 1, biased=False, relu=False, name='res4b2_branch2b') .batch_normalization(scale=True, center=True, relu=True, name='bn4b2_branch2b') .conv(1, 1024, 1, biased=False, relu=False, name='res4b2_branch2c') .batch_normalization(scale=True, center=True, name='bn4b2_branch2c')) (self.feed('res4b1_relu', 'bn4b2_branch2c') .add(name='res4b2') .relu(name='res4b2_relu') .conv(1, 256, 1, biased=False, relu=False, name='res4b3_branch2a') .batch_normalization(scale=True, center=True, relu=True, name='bn4b3_branch2a') .conv(3, 256, 1, biased=False, relu=False, name='res4b3_branch2b') .batch_normalization(scale=True, center=True, relu=True, name='bn4b3_branch2b') .conv(1, 1024, 1, biased=False, relu=False, name='res4b3_branch2c') .batch_normalization(scale=True, center=True, name='bn4b3_branch2c')) (self.feed('res4b2_relu', 'bn4b3_branch2c') .add(name='res4b3') .relu(name='res4b3_relu') .conv(1, 256, 1, biased=False, relu=False, name='res4b4_branch2a') .batch_normalization(scale=True, center=True, relu=True, name='bn4b4_branch2a') .conv(3, 256, 1, biased=False, relu=False, name='res4b4_branch2b') .batch_normalization(scale=True, center=True, relu=True, name='bn4b4_branch2b') .conv(1, 1024, 1, biased=False, relu=False, name='res4b4_branch2c') .batch_normalization(scale=True, center=True, name='bn4b4_branch2c')) (self.feed('res4b3_relu', 'bn4b4_branch2c') .add(name='res4b4') .relu(name='res4b4_relu') .conv(1, 256, 1, biased=False, relu=False, name='res4b5_branch2a') .batch_normalization(scale=True, center=True, relu=True, name='bn4b5_branch2a') .conv(3, 256, 1, biased=False, relu=False, name='res4b5_branch2b') .batch_normalization(scale=True, center=True, relu=True, name='bn4b5_branch2b') .conv(1, 1024, 1, biased=False, relu=False, name='res4b5_branch2c') .batch_normalization(scale=True, center=True, name='bn4b5_branch2c')) (self.feed('res4b4_relu', 'bn4b5_branch2c') .add(name='res4b5') .relu(name='res4b5_relu') .conv(1, 256, 1, biased=False, relu=False, name='res4b6_branch2a') .batch_normalization(scale=True, center=True, relu=True, name='bn4b6_branch2a') .conv(3, 256, 1, biased=False, relu=False, name='res4b6_branch2b') .batch_normalization(scale=True, center=True, relu=True, name='bn4b6_branch2b') .conv(1, 1024, 1, biased=False, relu=False, name='res4b6_branch2c') .batch_normalization(scale=True, center=True, name='bn4b6_branch2c')) (self.feed('res4b5_relu', 'bn4b6_branch2c') .add(name='res4b6') .relu(name='res4b6_relu') .conv(1, 256, 1, biased=False, relu=False, name='res4b7_branch2a') .batch_normalization(scale=True, center=True, relu=True, name='bn4b7_branch2a') .conv(3, 256, 1, biased=False, relu=False, name='res4b7_branch2b') .batch_normalization(scale=True, center=True, relu=True, name='bn4b7_branch2b') .conv(1, 1024, 1, biased=False, relu=False, name='res4b7_branch2c') .batch_normalization(scale=True, center=True, name='bn4b7_branch2c')) (self.feed('res4b6_relu', 'bn4b7_branch2c') .add(name='res4b7') .relu(name='res4b7_relu') .conv(1, 256, 1, biased=False, relu=False, name='res4b8_branch2a') .batch_normalization(scale=True, center=True, relu=True, name='bn4b8_branch2a') .conv(3, 256, 1, biased=False, relu=False, name='res4b8_branch2b') .batch_normalization(scale=True, center=True, relu=True, name='bn4b8_branch2b') .conv(1, 1024, 1, biased=False, relu=False, name='res4b8_branch2c') .batch_normalization(scale=True, center=True, name='bn4b8_branch2c')) (self.feed('res4b7_relu', 'bn4b8_branch2c') .add(name='res4b8') .relu(name='res4b8_relu') .conv(1, 256, 1, biased=False, relu=False, name='res4b9_branch2a') .batch_normalization(scale=True, center=True, relu=True, name='bn4b9_branch2a') .conv(3, 256, 1, biased=False, relu=False, name='res4b9_branch2b') .batch_normalization(scale=True, center=True, relu=True, name='bn4b9_branch2b') .conv(1, 1024, 1, biased=False, relu=False, name='res4b9_branch2c') .batch_normalization(scale=True, center=True, name='bn4b9_branch2c')) (self.feed('res4b8_relu', 'bn4b9_branch2c') .add(name='res4b9') .relu(name='res4b9_relu') .conv(1, 256, 1, biased=False, relu=False, name='res4b10_branch2a') .batch_normalization(scale=True, center=True, relu=True, name='bn4b10_branch2a') .conv(3, 256, 1, biased=False, relu=False, name='res4b10_branch2b') .batch_normalization(scale=True, center=True, relu=True, name='bn4b10_branch2b') .conv(1, 1024, 1, biased=False, relu=False, name='res4b10_branch2c') .batch_normalization(scale=True, center=True, name='bn4b10_branch2c')) (self.feed('res4b9_relu', 'bn4b10_branch2c') .add(name='res4b10') .relu(name='res4b10_relu') .conv(1, 256, 1, biased=False, relu=False, name='res4b11_branch2a') .batch_normalization(scale=True, center=True, relu=True, name='bn4b11_branch2a') .conv(3, 256, 1, biased=False, relu=False, name='res4b11_branch2b') .batch_normalization(scale=True, center=True, relu=True, name='bn4b11_branch2b') .conv(1, 1024, 1, biased=False, relu=False, name='res4b11_branch2c') .batch_normalization(scale=True, center=True, name='bn4b11_branch2c')) (self.feed('res4b10_relu', 'bn4b11_branch2c') .add(name='res4b11') .relu(name='res4b11_relu') .conv(1, 256, 1, biased=False, relu=False, name='res4b12_branch2a') .batch_normalization(scale=True, center=True, relu=True, name='bn4b12_branch2a') .conv(3, 256, 1, biased=False, relu=False, name='res4b12_branch2b') .batch_normalization(scale=True, center=True, relu=True, name='bn4b12_branch2b') .conv(1, 1024, 1, biased=False, relu=False, name='res4b12_branch2c') .batch_normalization(scale=True, center=True, name='bn4b12_branch2c')) (self.feed('res4b11_relu', 'bn4b12_branch2c') .add(name='res4b12') .relu(name='res4b12_relu') .conv(1, 256, 1, biased=False, relu=False, name='res4b13_branch2a') .batch_normalization(scale=True, center=True, relu=True, name='bn4b13_branch2a') .conv(3, 256, 1, biased=False, relu=False, name='res4b13_branch2b') .batch_normalization(scale=True, center=True, relu=True, name='bn4b13_branch2b') .conv(1, 1024, 1, biased=False, relu=False, name='res4b13_branch2c') .batch_normalization(scale=True, center=True, name='bn4b13_branch2c')) (self.feed('res4b12_relu', 'bn4b13_branch2c') .add(name='res4b13') .relu(name='res4b13_relu') .conv(1, 256, 1, biased=False, relu=False, name='res4b14_branch2a') .batch_normalization(scale=True, center=True, relu=True, name='bn4b14_branch2a') .conv(3, 256, 1, biased=False, relu=False, name='res4b14_branch2b') .batch_normalization(scale=True, center=True, relu=True, name='bn4b14_branch2b') .conv(1, 1024, 1, biased=False, relu=False, name='res4b14_branch2c') .batch_normalization(scale=True, center=True, name='bn4b14_branch2c')) (self.feed('res4b13_relu', 'bn4b14_branch2c') .add(name='res4b14') .relu(name='res4b14_relu') .conv(1, 256, 1, biased=False, relu=False, name='res4b15_branch2a') .batch_normalization(scale=True, center=True, relu=True, name='bn4b15_branch2a') .conv(3, 256, 1, biased=False, relu=False, name='res4b15_branch2b') .batch_normalization(scale=True, center=True, relu=True, name='bn4b15_branch2b') .conv(1, 1024, 1, biased=False, relu=False, name='res4b15_branch2c') .batch_normalization(scale=True, center=True, name='bn4b15_branch2c')) (self.feed('res4b14_relu', 'bn4b15_branch2c') .add(name='res4b15') .relu(name='res4b15_relu') .conv(1, 256, 1, biased=False, relu=False, name='res4b16_branch2a') .batch_normalization(scale=True, center=True, relu=True, name='bn4b16_branch2a') .conv(3, 256, 1, biased=False, relu=False, name='res4b16_branch2b') .batch_normalization(scale=True, center=True, relu=True, name='bn4b16_branch2b') .conv(1, 1024, 1, biased=False, relu=False, name='res4b16_branch2c') .batch_normalization(scale=True, center=True, name='bn4b16_branch2c')) (self.feed('res4b15_relu', 'bn4b16_branch2c') .add(name='res4b16') .relu(name='res4b16_relu') .conv(1, 256, 1, biased=False, relu=False, name='res4b17_branch2a') .batch_normalization(scale=True, center=True, relu=True, name='bn4b17_branch2a') .conv(3, 256, 1, biased=False, relu=False, name='res4b17_branch2b') .batch_normalization(scale=True, center=True, relu=True, name='bn4b17_branch2b') .conv(1, 1024, 1, biased=False, relu=False, name='res4b17_branch2c') .batch_normalization(scale=True, center=True, name='bn4b17_branch2c')) (self.feed('res4b16_relu', 'bn4b17_branch2c') .add(name='res4b17') .relu(name='res4b17_relu') .conv(1, 256, 1, biased=False, relu=False, name='res4b18_branch2a') .batch_normalization(scale=True, center=True, relu=True, name='bn4b18_branch2a') .conv(3, 256, 1, biased=False, relu=False, name='res4b18_branch2b') .batch_normalization(scale=True, center=True, relu=True, name='bn4b18_branch2b') .conv(1, 1024, 1, biased=False, relu=False, name='res4b18_branch2c') .batch_normalization(scale=True, center=True, name='bn4b18_branch2c')) (self.feed('res4b17_relu', 'bn4b18_branch2c') .add(name='res4b18') .relu(name='res4b18_relu') .conv(1, 256, 1, biased=False, relu=False, name='res4b19_branch2a') .batch_normalization(scale=True, center=True, relu=True, name='bn4b19_branch2a') .conv(3, 256, 1, biased=False, relu=False, name='res4b19_branch2b') .batch_normalization(scale=True, center=True, relu=True, name='bn4b19_branch2b') .conv(1, 1024, 1, biased=False, relu=False, name='res4b19_branch2c') .batch_normalization(scale=True, center=True, name='bn4b19_branch2c')) (self.feed('res4b18_relu', 'bn4b19_branch2c') .add(name='res4b19') .relu(name='res4b19_relu') .conv(1, 256, 1, biased=False, relu=False, name='res4b20_branch2a') .batch_normalization(scale=True, center=True, relu=True, name='bn4b20_branch2a') .conv(3, 256, 1, biased=False, relu=False, name='res4b20_branch2b') .batch_normalization(scale=True, center=True, relu=True, name='bn4b20_branch2b') .conv(1, 1024, 1, biased=False, relu=False, name='res4b20_branch2c') .batch_normalization(scale=True, center=True, name='bn4b20_branch2c')) (self.feed('res4b19_relu', 'bn4b20_branch2c') .add(name='res4b20') .relu(name='res4b20_relu') .conv(1, 256, 1, biased=False, relu=False, name='res4b21_branch2a') .batch_normalization(scale=True, center=True, relu=True, name='bn4b21_branch2a') .conv(3, 256, 1, biased=False, relu=False, name='res4b21_branch2b') .batch_normalization(scale=True, center=True, relu=True, name='bn4b21_branch2b') .conv(1, 1024, 1, biased=False, relu=False, name='res4b21_branch2c') .batch_normalization(scale=True, center=True, name='bn4b21_branch2c')) (self.feed('res4b20_relu', 'bn4b21_branch2c') .add(name='res4b21') .relu(name='res4b21_relu') .conv(1, 256, 1, biased=False, relu=False, name='res4b22_branch2a') .batch_normalization(scale=True, center=True, relu=True, name='bn4b22_branch2a') .conv(3, 256, 1, biased=False, relu=False, name='res4b22_branch2b') .batch_normalization(scale=True, center=True, relu=True, name='bn4b22_branch2b') .conv(1, 1024, 1, biased=False, relu=False, name='res4b22_branch2c') .batch_normalization(scale=True, center=True, name='bn4b22_branch2c')) (self.feed('res4b21_relu', 'bn4b22_branch2c') .add(name='res4b22') .relu(name='res4b22_relu') .conv(1, 256, 1, biased=False, relu=False, name='res4b23_branch2a') .batch_normalization(scale=True, center=True, relu=True, name='bn4b23_branch2a') .conv(3, 256, 1, biased=False, relu=False, name='res4b23_branch2b') .batch_normalization(scale=True, center=True, relu=True, name='bn4b23_branch2b') .conv(1, 1024, 1, biased=False, relu=False, name='res4b23_branch2c') .batch_normalization(scale=True, center=True, name='bn4b23_branch2c')) (self.feed('res4b22_relu', 'bn4b23_branch2c') .add(name='res4b23') .relu(name='res4b23_relu') .conv(1, 256, 1, biased=False, relu=False, name='res4b24_branch2a') .batch_normalization(scale=True, center=True, relu=True, name='bn4b24_branch2a') .conv(3, 256, 1, biased=False, relu=False, name='res4b24_branch2b') .batch_normalization(scale=True, center=True, relu=True, name='bn4b24_branch2b') .conv(1, 1024, 1, biased=False, relu=False, name='res4b24_branch2c') .batch_normalization(scale=True, center=True, name='bn4b24_branch2c')) (self.feed('res4b23_relu', 'bn4b24_branch2c') .add(name='res4b24') .relu(name='res4b24_relu') .conv(1, 256, 1, biased=False, relu=False, name='res4b25_branch2a') .batch_normalization(scale=True, center=True, relu=True, name='bn4b25_branch2a') .conv(3, 256, 1, biased=False, relu=False, name='res4b25_branch2b') .batch_normalization(scale=True, center=True, relu=True, name='bn4b25_branch2b') .conv(1, 1024, 1, biased=False, relu=False, name='res4b25_branch2c') .batch_normalization(scale=True, center=True, name='bn4b25_branch2c')) (self.feed('res4b24_relu', 'bn4b25_branch2c') .add(name='res4b25') .relu(name='res4b25_relu') .conv(1, 256, 1, biased=False, relu=False, name='res4b26_branch2a') .batch_normalization(scale=True, center=True, relu=True, name='bn4b26_branch2a') .conv(3, 256, 1, biased=False, relu=False, name='res4b26_branch2b') .batch_normalization(scale=True, center=True, relu=True, name='bn4b26_branch2b') .conv(1, 1024, 1, biased=False, relu=False, name='res4b26_branch2c') .batch_normalization(scale=True, center=True, name='bn4b26_branch2c')) (self.feed('res4b25_relu', 'bn4b26_branch2c') .add(name='res4b26') .relu(name='res4b26_relu') .conv(1, 256, 1, biased=False, relu=False, name='res4b27_branch2a') .batch_normalization(scale=True, center=True, relu=True, name='bn4b27_branch2a') .conv(3, 256, 1, biased=False, relu=False, name='res4b27_branch2b') .batch_normalization(scale=True, center=True, relu=True, name='bn4b27_branch2b') .conv(1, 1024, 1, biased=False, relu=False, name='res4b27_branch2c') .batch_normalization(scale=True, center=True, name='bn4b27_branch2c')) (self.feed('res4b26_relu', 'bn4b27_branch2c') .add(name='res4b27') .relu(name='res4b27_relu') .conv(1, 256, 1, biased=False, relu=False, name='res4b28_branch2a') .batch_normalization(scale=True, center=True, relu=True, name='bn4b28_branch2a') .conv(3, 256, 1, biased=False, relu=False, name='res4b28_branch2b') .batch_normalization(scale=True, center=True, relu=True, name='bn4b28_branch2b') .conv(1, 1024, 1, biased=False, relu=False, name='res4b28_branch2c') .batch_normalization(scale=True, center=True, name='bn4b28_branch2c')) (self.feed('res4b27_relu', 'bn4b28_branch2c') .add(name='res4b28') .relu(name='res4b28_relu') .conv(1, 256, 1, biased=False, relu=False, name='res4b29_branch2a') .batch_normalization(scale=True, center=True, relu=True, name='bn4b29_branch2a') .conv(3, 256, 1, biased=False, relu=False, name='res4b29_branch2b') .batch_normalization(scale=True, center=True, relu=True, name='bn4b29_branch2b') .conv(1, 1024, 1, biased=False, relu=False, name='res4b29_branch2c') .batch_normalization(scale=True, center=True, name='bn4b29_branch2c')) (self.feed('res4b28_relu', 'bn4b29_branch2c') .add(name='res4b29') .relu(name='res4b29_relu') .conv(1, 256, 1, biased=False, relu=False, name='res4b30_branch2a') .batch_normalization(scale=True, center=True, relu=True, name='bn4b30_branch2a') .conv(3, 256, 1, biased=False, relu=False, name='res4b30_branch2b') .batch_normalization(scale=True, center=True, relu=True, name='bn4b30_branch2b') .conv(1, 1024, 1, biased=False, relu=False, name='res4b30_branch2c') .batch_normalization(scale=True, center=True, name='bn4b30_branch2c')) (self.feed('res4b29_relu', 'bn4b30_branch2c') .add(name='res4b30') .relu(name='res4b30_relu') .conv(1, 256, 1, biased=False, relu=False, name='res4b31_branch2a') .batch_normalization(scale=True, center=True, relu=True, name='bn4b31_branch2a') .conv(3, 256, 1, biased=False, relu=False, name='res4b31_branch2b') .batch_normalization(scale=True, center=True, relu=True, name='bn4b31_branch2b') .conv(1, 1024, 1, biased=False, relu=False, name='res4b31_branch2c') .batch_normalization(scale=True, center=True, name='bn4b31_branch2c')) (self.feed('res4b30_relu', 'bn4b31_branch2c') .add(name='res4b31') .relu(name='res4b31_relu') .conv(1, 256, 1, biased=False, relu=False, name='res4b32_branch2a') .batch_normalization(scale=True, center=True, relu=True, name='bn4b32_branch2a') .conv(3, 256, 1, biased=False, relu=False, name='res4b32_branch2b') .batch_normalization(scale=True, center=True, relu=True, name='bn4b32_branch2b') .conv(1, 1024, 1, biased=False, relu=False, name='res4b32_branch2c') .batch_normalization(scale=True, center=True, name='bn4b32_branch2c')) (self.feed('res4b31_relu', 'bn4b32_branch2c') .add(name='res4b32') .relu(name='res4b32_relu') .conv(1, 256, 1, biased=False, relu=False, name='res4b33_branch2a') .batch_normalization(scale=True, center=True, relu=True, name='bn4b33_branch2a') .conv(3, 256, 1, biased=False, relu=False, name='res4b33_branch2b') .batch_normalization(scale=True, center=True, relu=True, name='bn4b33_branch2b') .conv(1, 1024, 1, biased=False, relu=False, name='res4b33_branch2c') .batch_normalization(scale=True, center=True, name='bn4b33_branch2c')) (self.feed('res4b32_relu', 'bn4b33_branch2c') .add(name='res4b33') .relu(name='res4b33_relu') .conv(1, 256, 1, biased=False, relu=False, name='res4b34_branch2a') .batch_normalization(scale=True, center=True, relu=True, name='bn4b34_branch2a') .conv(3, 256, 1, biased=False, relu=False, name='res4b34_branch2b') .batch_normalization(scale=True, center=True, relu=True, name='bn4b34_branch2b') .conv(1, 1024, 1, biased=False, relu=False, name='res4b34_branch2c') .batch_normalization(scale=True, center=True, name='bn4b34_branch2c')) (self.feed('res4b33_relu', 'bn4b34_branch2c') .add(name='res4b34') .relu(name='res4b34_relu') .conv(1, 256, 1, biased=False, relu=False, name='res4b35_branch2a') .batch_normalization(scale=True, center=True, relu=True, name='bn4b35_branch2a') .conv(3, 256, 1, biased=False, relu=False, name='res4b35_branch2b') .batch_normalization(scale=True, center=True, relu=True, name='bn4b35_branch2b') .conv(1, 1024, 1, biased=False, relu=False, name='res4b35_branch2c') .batch_normalization(scale=True, center=True, name='bn4b35_branch2c')) (self.feed('res4b34_relu', 'bn4b35_branch2c') .add(name='res4b35') .relu(name='res4b35_relu') .conv(1, 2048, 2, biased=False, relu=False, name='res5a_branch1') .batch_normalization(scale=True, center=True, name='bn5a_branch1')) (self.feed('res4b35_relu') .conv(1, 512, 2, biased=False, relu=False, name='res5a_branch2a') .batch_normalization(scale=True, center=True, relu=True, name='bn5a_branch2a') .conv(3, 512, 1, biased=False, relu=False, name='res5a_branch2b') .batch_normalization(scale=True, center=True, relu=True, name='bn5a_branch2b') .conv(1, 2048, 1, biased=False, relu=False, name='res5a_branch2c') .batch_normalization(scale=True, center=True, name='bn5a_branch2c')) (self.feed('bn5a_branch1', 'bn5a_branch2c') .add(name='res5a') .relu(name='res5a_relu') .conv(1, 512, 1, biased=False, relu=False, name='res5b_branch2a') .batch_normalization(scale=True, center=True, relu=True, name='bn5b_branch2a') .conv(3, 512, 1, biased=False, relu=False, name='res5b_branch2b') .batch_normalization(scale=True, center=True, relu=True, name='bn5b_branch2b') .conv(1, 2048, 1, biased=False, relu=False, name='res5b_branch2c') .batch_normalization(scale=True, center=True, name='bn5b_branch2c')) (self.feed('res5a_relu', 'bn5b_branch2c') .add(name='res5b') .relu(name='res5b_relu') .conv(1, 512, 1, biased=False, relu=False, name='res5c_branch2a') .batch_normalization(scale=True, center=True, relu=True, name='bn5c_branch2a') .conv(3, 512, 1, biased=False, relu=False, name='res5c_branch2b') .batch_normalization(scale=True, center=True, relu=True, name='bn5c_branch2b') .conv(1, 2048, 1, biased=False, relu=False, name='res5c_branch2c') .batch_normalization(scale=True, center=True, name='bn5c_branch2c')) (self.feed('res5b_relu', 'bn5c_branch2c') .add(name='res5c') .relu(name='res5c_relu') .avg_pool(7, 1, padding='VALID', name='pool5')) if not self.fcn: (self.feed('pool5') .fc(1000, relu=False, name='fc1000') .softmax(name='prob'))
tests/components/plex/test_services.py
andersop91/core
22,481
133277
<filename>tests/components/plex/test_services.py<gh_stars>1000+ """Tests for various Plex services.""" from http import HTTPStatus from unittest.mock import patch from plexapi.exceptions import NotFound import pytest from homeassistant.components.media_player.const import MEDIA_TYPE_MUSIC from homeassistant.components.plex.const import ( CONF_SERVER, CONF_SERVER_IDENTIFIER, DOMAIN, PLEX_SERVER_CONFIG, SERVICE_REFRESH_LIBRARY, SERVICE_SCAN_CLIENTS, ) from homeassistant.components.plex.services import play_on_sonos from homeassistant.const import CONF_URL from homeassistant.exceptions import HomeAssistantError from .const import DEFAULT_OPTIONS, SECONDARY_DATA from tests.common import MockConfigEntry async def test_refresh_library( hass, mock_plex_server, setup_plex_server, requests_mock, empty_payload, plex_server_accounts, plex_server_base, ): """Test refresh_library service call.""" url = mock_plex_server.url_in_use refresh = requests_mock.get( f"{url}/library/sections/1/refresh", status_code=HTTPStatus.OK ) # Test with non-existent server with pytest.raises(HomeAssistantError): assert await hass.services.async_call( DOMAIN, SERVICE_REFRESH_LIBRARY, {"server_name": "Not a Server", "library_name": "Movies"}, True, ) assert not refresh.called # Test with non-existent library assert await hass.services.async_call( DOMAIN, SERVICE_REFRESH_LIBRARY, {"library_name": "Not a Library"}, True, ) assert not refresh.called # Test with valid library assert await hass.services.async_call( DOMAIN, SERVICE_REFRESH_LIBRARY, {"library_name": "Movies"}, True, ) assert refresh.call_count == 1 # Add a second configured server secondary_url = SECONDARY_DATA[PLEX_SERVER_CONFIG][CONF_URL] secondary_name = SECONDARY_DATA[CONF_SERVER] secondary_id = SECONDARY_DATA[CONF_SERVER_IDENTIFIER] requests_mock.get( secondary_url, text=plex_server_base.format( name=secondary_name, machine_identifier=secondary_id ), ) requests_mock.get(f"{secondary_url}/accounts", text=plex_server_accounts) requests_mock.get(f"{secondary_url}/clients", text=empty_payload) requests_mock.get(f"{secondary_url}/status/sessions", text=empty_payload) entry_2 = MockConfigEntry( domain=DOMAIN, data=SECONDARY_DATA, options=DEFAULT_OPTIONS, unique_id=SECONDARY_DATA["server_id"], ) await setup_plex_server(config_entry=entry_2) # Test multiple servers available but none specified with pytest.raises(HomeAssistantError) as excinfo: assert await hass.services.async_call( DOMAIN, SERVICE_REFRESH_LIBRARY, {"library_name": "Movies"}, True, ) assert "Multiple Plex servers configured" in str(excinfo.value) assert refresh.call_count == 1 async def test_scan_clients(hass, mock_plex_server): """Test scan_for_clients service call.""" assert await hass.services.async_call( DOMAIN, SERVICE_SCAN_CLIENTS, blocking=True, ) async def test_sonos_play_media( hass, entry, setup_plex_server, requests_mock, empty_payload, playqueue_1234, playqueue_created, plextv_account, sonos_resources, ): """Test playback from a Sonos media_player.play_media call.""" media_content_id = ( '{"library_name": "Music", "artist_name": "Artist", "album_name": "Album"}' ) sonos_speaker_name = "Zone A" requests_mock.get("https://plex.tv/users/account", text=plextv_account) requests_mock.post("/playqueues", text=playqueue_created) playback_mock = requests_mock.get( "/player/playback/playMedia", status_code=HTTPStatus.OK ) # Test with no Plex integration available with pytest.raises(HomeAssistantError) as excinfo: play_on_sonos(hass, MEDIA_TYPE_MUSIC, media_content_id, sonos_speaker_name) assert "Plex integration not configured" in str(excinfo.value) with patch( "homeassistant.components.plex.PlexServer.connect", side_effect=NotFound ): # Initialize Plex integration without setting up a server with pytest.raises(AssertionError): await setup_plex_server() # Test with no Plex servers available with pytest.raises(HomeAssistantError) as excinfo: play_on_sonos(hass, MEDIA_TYPE_MUSIC, media_content_id, sonos_speaker_name) assert "No Plex servers available" in str(excinfo.value) # Complete setup of a Plex server await hass.config_entries.async_unload(entry.entry_id) mock_plex_server = await setup_plex_server() # Test with unlinked Plex/Sonos accounts requests_mock.get("https://sonos.plex.tv/resources", status_code=403) with pytest.raises(HomeAssistantError) as excinfo: play_on_sonos(hass, MEDIA_TYPE_MUSIC, media_content_id, sonos_speaker_name) assert "Sonos speakers not linked to Plex account" in str(excinfo.value) assert playback_mock.call_count == 0 # Test with no speakers available requests_mock.get("https://sonos.plex.tv/resources", text=empty_payload) with pytest.raises(HomeAssistantError) as excinfo: play_on_sonos(hass, MEDIA_TYPE_MUSIC, media_content_id, sonos_speaker_name) assert f"Sonos speaker '{sonos_speaker_name}' is not associated with" in str( excinfo.value ) assert playback_mock.call_count == 0 # Test with speakers available requests_mock.get("https://sonos.plex.tv/resources", text=sonos_resources) with patch.object(mock_plex_server.account, "_sonos_cache_timestamp", 0): play_on_sonos(hass, MEDIA_TYPE_MUSIC, media_content_id, sonos_speaker_name) assert playback_mock.call_count == 1 # Test with speakers available and media key payload play_on_sonos(hass, MEDIA_TYPE_MUSIC, "100", sonos_speaker_name) assert playback_mock.call_count == 2 # Test with speakers available and Plex server specified content_id_with_server = '{"plex_server": "Plex Server 1", "library_name": "Music", "artist_name": "Artist", "album_name": "Album"}' play_on_sonos(hass, MEDIA_TYPE_MUSIC, content_id_with_server, sonos_speaker_name) assert playback_mock.call_count == 3 # Test with speakers available but media not found content_id_bad_media = '{"library_name": "Music", "artist_name": "Not an Artist"}' with patch("plexapi.library.LibrarySection.search", return_value=None): with pytest.raises(HomeAssistantError) as excinfo: play_on_sonos( hass, MEDIA_TYPE_MUSIC, content_id_bad_media, sonos_speaker_name ) assert "Plex media not found" in str(excinfo.value) assert playback_mock.call_count == 3 # Test with speakers available and playqueue requests_mock.get("https://1.2.3.4:32400/playQueues/1234", text=playqueue_1234) content_id_with_playqueue = '{"playqueue_id": 1234}' play_on_sonos(hass, MEDIA_TYPE_MUSIC, content_id_with_playqueue, sonos_speaker_name) assert playback_mock.call_count == 4 # Test with speakers available and invalid playqueue requests_mock.get( "https://1.2.3.4:32400/playQueues/1235", status_code=HTTPStatus.NOT_FOUND ) content_id_with_playqueue = '{"playqueue_id": 1235}' with pytest.raises(HomeAssistantError) as excinfo: play_on_sonos( hass, MEDIA_TYPE_MUSIC, content_id_with_playqueue, sonos_speaker_name ) assert "PlayQueue '1235' could not be found" in str(excinfo.value) assert playback_mock.call_count == 4
exporter/SynthesisFusionGltfExporter/gltf/utils/PyUtils.py
Autodesk/synthesis
136
133292
<reponame>Autodesk/synthesis from typing import * def appendGetIndex(inList: List[any], value: any) -> int: inList.append(value) return len(inList) - 1
data_structures/trie/trie.py
NavpreetDevpuri/Python
145,614
133303
""" A Trie/Prefix Tree is a kind of search tree used to provide quick lookup of words/patterns in a set of words. A basic Trie however has O(n^2) space complexity making it impractical in practice. It however provides O(max(search_string, length of longest word)) lookup time making it an optimal approach when space is not an issue. """ class TrieNode: def __init__(self) -> None: self.nodes: dict[str, TrieNode] = dict() # Mapping from char to TrieNode self.is_leaf = False def insert_many(self, words: list[str]) -> None: """ Inserts a list of words into the Trie :param words: list of string words :return: None """ for word in words: self.insert(word) def insert(self, word: str) -> None: """ Inserts a word into the Trie :param word: word to be inserted :return: None """ curr = self for char in word: if char not in curr.nodes: curr.nodes[char] = TrieNode() curr = curr.nodes[char] curr.is_leaf = True def find(self, word: str) -> bool: """ Tries to find word in a Trie :param word: word to look for :return: Returns True if word is found, False otherwise """ curr = self for char in word: if char not in curr.nodes: return False curr = curr.nodes[char] return curr.is_leaf def delete(self, word: str) -> None: """ Deletes a word in a Trie :param word: word to delete :return: None """ def _delete(curr: TrieNode, word: str, index: int) -> bool: if index == len(word): # If word does not exist if not curr.is_leaf: return False curr.is_leaf = False return len(curr.nodes) == 0 char = word[index] char_node = curr.nodes.get(char) # If char not in current trie node if not char_node: return False # Flag to check if node can be deleted delete_curr = _delete(char_node, word, index + 1) if delete_curr: del curr.nodes[char] return len(curr.nodes) == 0 return delete_curr _delete(self, word, 0) def print_words(node: TrieNode, word: str) -> None: """ Prints all the words in a Trie :param node: root node of Trie :param word: Word variable should be empty at start :return: None """ if node.is_leaf: print(word, end=" ") for key, value in node.nodes.items(): print_words(value, word + key) def test_trie() -> bool: words = "banana bananas bandana band apple all beast".split() root = TrieNode() root.insert_many(words) # print_words(root, "") assert all(root.find(word) for word in words) assert root.find("banana") assert not root.find("bandanas") assert not root.find("apps") assert root.find("apple") assert root.find("all") root.delete("all") assert not root.find("all") root.delete("banana") assert not root.find("banana") assert root.find("bananas") return True def print_results(msg: str, passes: bool) -> None: print(str(msg), "works!" if passes else "doesn't work :(") def pytests() -> None: assert test_trie() def main() -> None: """ >>> pytests() """ print_results("Testing trie functionality", test_trie()) if __name__ == "__main__": main()
SimCalorimetry/EcalElectronicsEmulation/python/EcalFEtoDigi_cfi.py
ckamtsikis/cmssw
852
133310
import FWCore.ParameterSet.Config as cms tccFlatToDigi = cms.EDProducer("EcalFEtoDigi", FileEventOffset = cms.untracked.int32(0), UseIdentityLUT = cms.untracked.bool(False), SuperModuleId = cms.untracked.int32(-1), debugPrintFlag = cms.untracked.bool(False), FlatBaseName = cms.untracked.string('ecal_tcc_') )
pyabsa/core/apc/dataset_utils/data_utils_for_inferring.py
yangheng95/PyABSA
199
133331
# -*- coding: utf-8 -*- # file: data_utils_for_inferring.py # time: 2021/4/22 0022 # author: yangheng <<EMAIL>> # github: https://github.com/yangheng95 # Copyright (C) 2021. All Rights Reserved. import numpy as np from pyabsa.utils.pyabsa_utils import check_and_fix_labels, validate_example from torch.utils.data import Dataset from tqdm import tqdm from .apc_utils import (build_sentiment_window, build_spc_mask_vec, load_apc_datasets, prepare_input_for_apc, LABEL_PADDING, configure_spacy_model) from .apc_utils_for_dlcf_dca import prepare_input_for_dlcf_dca, configure_dlcf_spacy_model class ABSADataset(Dataset): def __init__(self, tokenizer, opt): configure_spacy_model(opt) self.tokenizer = tokenizer self.opt = opt self.all_data = [] def parse_sample(self, text): _text = text samples = [] if '!sent!' not in text: text += '!sent!' text, _, ref_sent = text.partition('!sent!') ref_sent = ref_sent.split(',') if ref_sent else None text = '[PADDING] ' + text + ' [PADDING]' splits = text.split('[ASP]') if ref_sent and int((len(splits) - 1) / 2) == len(ref_sent): for i in range(0, len(splits) - 1, 2): sample = text.replace('[ASP]' + splits[i + 1] + '[ASP]', '[TEMP]' + splits[i + 1] + '[TEMP]', 1).replace('[ASP]', '') sample += ' !sent! ' + str(ref_sent[int(i / 2)]) samples.append(sample.replace('[TEMP]', '[ASP]')) elif not ref_sent or int((len(splits) - 1) / 2) != len(ref_sent): if not ref_sent: print(_text, ' -> No the reference sentiment found') else: print(_text, ' -> Unequal length of reference sentiment and aspects, ignore the reference sentiment.') for i in range(0, len(splits) - 1, 2): sample = text.replace('[ASP]' + splits[i + 1] + '[ASP]', '[TEMP]' + splits[i + 1] + '[TEMP]', 1).replace('[ASP]', '') samples.append(sample.replace('[TEMP]', '[ASP]')) else: raise ValueError('Invalid Input:{}'.format(text)) return samples def prepare_infer_sample(self, text: str): self.process_data(self.parse_sample(text)) def prepare_infer_dataset(self, infer_file, ignore_error): lines = load_apc_datasets(infer_file) samples = [] for sample in lines: if sample: samples.extend(self.parse_sample(sample)) self.process_data(samples, ignore_error) def process_data(self, samples, ignore_error=True): all_data = [] label_set = set() ex_id = 0 if len(samples) > 1: it = tqdm(samples, postfix='building word indices...') else: it = samples for i, text in enumerate(it): try: # handle for empty lines in inferring_tutorials dataset_utils if text is None or '' == text.strip(): raise RuntimeError('Invalid Input!') # check for given polarity if '!sent!' in text: text, polarity = text.split('!sent!')[0].strip(), text.split('!sent!')[1].strip() polarity = polarity if polarity else LABEL_PADDING text = text.replace('[PADDING]', '') else: polarity = str(LABEL_PADDING) # simply add padding in case of some aspect is at the beginning or ending of a sentence text_left, aspect, text_right = text.split('[ASP]') text_left = text_left.replace('[PADDING] ', '') text_right = text_right.replace(' [PADDING]', '') text = text_left + ' ' + aspect + ' ' + text_right prepared_inputs = prepare_input_for_apc(self.opt, self.tokenizer, text_left, text_right, aspect, input_demands=self.opt.inputs_cols) text_raw = prepared_inputs['text_raw'] aspect = prepared_inputs['aspect'] aspect_position = prepared_inputs['aspect_position'] text_bert_indices = prepared_inputs['text_bert_indices'] text_raw_bert_indices = prepared_inputs['text_raw_bert_indices'] aspect_bert_indices = prepared_inputs['aspect_bert_indices'] lcfs_vec = prepared_inputs['lcfs_vec'] lcf_vec = prepared_inputs['lcf_vec'] validate_example(text_raw, aspect, polarity) if self.opt.model_name == 'dlcf_dca_bert' or self.opt.model_name == 'dlcfs_dca_bert': configure_dlcf_spacy_model(self.opt) prepared_inputs = prepare_input_for_dlcf_dca(self.opt, self.tokenizer, text_left, text_right, aspect) dlcf_vec = prepared_inputs['dlcf_cdm_vec'] if self.opt.lcf == 'cdm' else prepared_inputs['dlcf_cdw_vec'] dlcfs_vec = prepared_inputs['dlcfs_cdm_vec'] if self.opt.lcf == 'cdm' else prepared_inputs['dlcfs_cdw_vec'] depend_vec = prepared_inputs['depend_vec'] depended_vec = prepared_inputs['depended_vec'] data = { 'ex_id': ex_id, 'text_raw': text_raw, 'aspect': aspect, 'aspect_position': aspect_position, 'lca_ids': lcf_vec, # the lca indices are the same as the refactored CDM (lcf != CDW or Fusion) lcf vec 'lcf_vec': lcf_vec if 'lcf_vec' in self.opt.inputs_cols else 0, 'lcfs_vec': lcfs_vec if 'lcfs_vec' in self.opt.inputs_cols else 0, 'dlcf_vec': dlcf_vec if 'dlcf_vec' in self.opt.inputs_cols else 0, 'dlcfs_vec': dlcfs_vec if 'dlcfs_vec' in self.opt.inputs_cols else 0, 'depend_vec': depend_vec if 'depend_vec' in self.opt.inputs_cols else 0, 'depended_vec': depended_vec if 'depended_vec' in self.opt.inputs_cols else 0, 'spc_mask_vec': build_spc_mask_vec(self.opt, text_raw_bert_indices) if 'spc_mask_vec' in self.opt.inputs_cols else 0, 'text_bert_indices': text_bert_indices if 'text_bert_indices' in self.opt.inputs_cols else 0, 'aspect_bert_indices': aspect_bert_indices if 'aspect_bert_indices' in self.opt.inputs_cols else 0, 'text_raw_bert_indices': text_raw_bert_indices if 'text_raw_bert_indices' in self.opt.inputs_cols else 0, 'polarity': polarity, } label_set.add(polarity) ex_id += 1 all_data.append(data) except Exception as e: if ignore_error: print('Ignore error while processing: {} Error info:{}'.format(text, e)) else: raise RuntimeError('Catch Exception: {}, use ignore_error=True to remove error samples.'.format(e)) self.opt.polarities_dim = len(label_set) if 'left_lcf_vec' in self.opt.inputs_cols or 'right_lcf_vec' in self.opt.inputs_cols \ or 'left_lcfs_vec' in self.opt.inputs_cols or 'right_lcfs_vec' in self.opt.inputs_cols: all_data = build_sentiment_window(all_data, self.tokenizer, self.opt.similarity_threshold) for data in all_data: cluster_ids = [] for pad_idx in range(self.opt.max_seq_len): if pad_idx in data['cluster_ids']: # print(data['polarity']) cluster_ids.append(self.opt.label_to_index.get(self.opt.index_to_label.get(data['polarity'], 'N.A.'), -999)) else: cluster_ids.append(-100) # cluster_ids.append(3) data['cluster_ids'] = np.asarray(cluster_ids, dtype=np.int64) data['side_ex_ids'] = np.array(0) data['aspect_position'] = np.array(0) else: for data in all_data: data['aspect_position'] = np.array(0) self.all_data = all_data return all_data def __getitem__(self, index): return self.all_data[index] def __len__(self): return len(self.all_data)
recipes/Python/52553_RoadRunner_recursively_updates_contents/recipe-52553.py
tdiprima/code
2,023
133334
#!/usr/bin/env python # $Id. RoadRunner.py Thu Apr 5 10:49:48 EST 2001 JuanCarlos.Leon $ import os import string import re import sys _files_lst=[] def pluggit(readlines,regx,addstrng): """ Append a string to desire regex pattern """ lst=readlines cregex=re.compile(regx) for eachLine in lst: match=cregex.search(eachLine) if match: rplacement=match.group()+addstrng f=re.sub(regx,rplacement,eachLine) lindex=lst.index(eachLine) lst.remove(eachLine) lst.insert(lindex,f) return lst def compose(strng,regx,listoffiles=[]): log=open('log.txt','w') for xfile in listoffiles: # Open file for read readlines=open(xfile,'r').readlines() buffer=pluggit(readlines,regx,strng) # We are good too open the file for writting write_file=open(xfile,'w') # Stuffed all buffer lines back to the original file for line in buffer: write_file.write(line) write_file.close() log.write('file %s updated\n' % (xfile)) log.close() def fetch(path=None,ext='.html'): if not path: path=os.getcwd() if os.path.isdir(path): lst = os.listdir(path) if lst: for each in lst: spath=path+'/'+each if os.path.isfile(spath): fileobj=string.find(spath,ext) if fileobj != -1: _files_lst.append(spath) fetch(spath) def RoadRunner(): while 1: strngToInsert = raw_input ( '\nEnter string to incorporate : ' ) if strngToInsert: break file_extension = raw_input ( 'Enter extension of the files to be updated [.html] : ') if not file_extension: file_extension = '.html' while 1: regexp = raw_input ( 'Enter regex string [e.g: <[Bb][oO][dD][yY].*>] : ' ) if regexp: break while 1: update_path = raw_input ( 'Enter path to update [%s] : ' % (os.getcwd()) ) if not update_path: update_path = os.getcwd() if os.path.isdir(update_path): break print "\nYou have entered the following : \n" print "String to incorporate: %s" % (strngToInsert) print "Regex string: %s" % (regexp) print "Path to update: %s\n" % (update_path) ok = raw_input("Is this ok?[Y/n] : " ) if not ok: ok='Y' if string.lower(ok)!='y': sys.exit(1) else: fetch(update_path) compose(strngToInsert,regexp,_files_lst) if __name__ == '__main__' : RoadRunner()
Lib/test/test_frozen.py
arvindm95/unladen-swallow
2,293
133348
# Test the frozen module defined in frozen.c. from test.test_support import captured_stdout, run_unittest import unittest import sys, os class FrozenTests(unittest.TestCase): def test_frozen(self): with captured_stdout() as stdout: try: import __hello__ except ImportError, x: self.fail("import __hello__ failed:" + str(x)) try: import __phello__ except ImportError, x: self.fail("import __phello__ failed:" + str(x)) try: import __phello__.spam except ImportError, x: self.fail("import __phello__.spam failed:" + str(x)) if sys.platform != "mac": # On the Mac this import does succeed. try: import __phello__.foo except ImportError: pass else: self.fail("import __phello__.foo should have failed") self.assertEquals(stdout.getvalue(), 'Hello world...\nHello world...\nHello world...\n') del sys.modules['__hello__'] del sys.modules['__phello__'] del sys.modules['__phello__.spam'] def test_main(): run_unittest(FrozenTests) if __name__ == '__main__': test_main()
AppServer/lib/django-1.4/tests/regressiontests/localflavor/mx/tests.py
loftwah/appscale
790
133351
<gh_stars>100-1000 # -*- coding: utf-8 -*- from __future__ import absolute_import from django.contrib.localflavor.mx.forms import (MXZipCodeField, MXRFCField, MXStateSelect, MXCURPField) from django.test import SimpleTestCase from .forms import MXPersonProfileForm class MXLocalFlavorTests(SimpleTestCase): def setUp(self): self.form = MXPersonProfileForm({ 'state': 'MIC', 'rfc': 'toma880125kv3', 'curp': 'toma880125hmnrrn02', 'zip_code': '58120', }) def test_get_display_methods(self): """Test that the get_*_display() methods are added to the model instances.""" place = self.form.save() self.assertEqual(place.get_state_display(), u'Michoacán') def test_errors(self): """Test that required MXFields throw appropriate errors.""" form = MXPersonProfileForm({ 'state': 'Invalid state', 'rfc': 'invalid rfc', 'curp': 'invalid curp', 'zip_code': 'xxx', }) self.assertFalse(form.is_valid()) self.assertEqual(form.errors['state'], [u'Select a valid choice. Invalid state is not one of the available choices.']) self.assertEqual(form.errors['rfc'], [u'Enter a valid RFC.']) self.assertEqual(form.errors['curp'], [u'Ensure this value has at least 18 characters (it has 12).', u'Enter a valid CURP.']) self.assertEqual(form.errors['zip_code'], [u'Enter a valid zip code in the format XXXXX.']) def test_field_blank_option(self): """Test that the empty option is there.""" state_select_html = """\ <select name="state" id="id_state"> <option value="">---------</option> <option value="AGU">Aguascalientes</option> <option value="BCN">Baja California</option> <option value="BCS">Baja California Sur</option> <option value="CAM">Campeche</option> <option value="CHH">Chihuahua</option> <option value="CHP">Chiapas</option> <option value="COA">Coahuila</option> <option value="COL">Colima</option> <option value="DIF">Distrito Federal</option> <option value="DUR">Durango</option> <option value="GRO">Guerrero</option> <option value="GUA">Guanajuato</option> <option value="HID">Hidalgo</option> <option value="JAL">Jalisco</option> <option value="MEX">Estado de México</option> <option value="MIC" selected="selected">Michoacán</option> <option value="MOR">Morelos</option> <option value="NAY">Nayarit</option> <option value="NLE">Nuevo León</option> <option value="OAX">Oaxaca</option> <option value="PUE">Puebla</option> <option value="QUE">Querétaro</option> <option value="ROO">Quintana Roo</option> <option value="SIN">Sinaloa</option> <option value="SLP">San Luis Potosí</option> <option value="SON">Sonora</option> <option value="TAB">Tabasco</option> <option value="TAM">Tamaulipas</option> <option value="TLA">Tlaxcala</option> <option value="VER">Veracruz</option> <option value="YUC">Yucatán</option> <option value="ZAC">Zacatecas</option> </select>""" self.assertHTMLEqual(str(self.form['state']), state_select_html) def test_MXStateSelect(self): f = MXStateSelect() out = u'''<select name="state"> <option value="AGU">Aguascalientes</option> <option value="BCN">Baja California</option> <option value="BCS">Baja California Sur</option> <option value="CAM">Campeche</option> <option value="CHH">Chihuahua</option> <option value="CHP">Chiapas</option> <option value="COA">Coahuila</option> <option value="COL">Colima</option> <option value="DIF">Distrito Federal</option> <option value="DUR">Durango</option> <option value="GRO">Guerrero</option> <option value="GUA">Guanajuato</option> <option value="HID">Hidalgo</option> <option value="JAL">Jalisco</option> <option value="MEX">Estado de México</option> <option value="MIC" selected="selected">Michoacán</option> <option value="MOR">Morelos</option> <option value="NAY">Nayarit</option> <option value="NLE">Nuevo León</option> <option value="OAX">Oaxaca</option> <option value="PUE">Puebla</option> <option value="QUE">Querétaro</option> <option value="ROO">Quintana Roo</option> <option value="SIN">Sinaloa</option> <option value="SLP">San Luis Potosí</option> <option value="SON">Sonora</option> <option value="TAB">Tabasco</option> <option value="TAM">Tamaulipas</option> <option value="TLA">Tlaxcala</option> <option value="VER">Veracruz</option> <option value="YUC">Yucatán</option> <option value="ZAC">Zacatecas</option> </select>''' self.assertHTMLEqual(f.render('state', 'MIC'), out) def test_MXZipCodeField(self): error_format = [u'Enter a valid zip code in the format XXXXX.'] valid = { '58120': u'58120', '58502': u'58502', '59310': u'59310', '99999': u'99999', } invalid = { '17000': error_format, '18000': error_format, '19000': error_format, '00000': error_format, } self.assertFieldOutput(MXZipCodeField, valid, invalid) def test_MXRFCField(self): error_format = [u'Enter a valid RFC.'] error_checksum = [u'Invalid checksum for RFC.'] valid = { 'MoFN641205eX5': u'MOFN641205EX5', 'ICa060120873': u'ICA060120873', 'eUcG751104rT0': u'EUCG751104RT0', 'GME08100195A': u'GME08100195A', 'AA&060524KX5': u'AA&060524KX5', 'CAÑ0708045P7': u'CAÑ0708045P7', 'aaa000101aa9': u'AAA000101AA9', } invalid = { 'MED0000000XA': error_format, '0000000000XA': error_format, 'AAA000000AA6': error_format, # Dates 'XXX880002XXX': error_format, 'XXX880200XXX': error_format, 'XXX880132XXX': error_format, 'XXX880230XXX': error_format, 'XXX880431XXX': error_format, # Incorrect checksum 'MOGR650524E73': error_checksum, 'HVA7810058F1': error_checksum, 'MoFN641205eX2': error_checksum, 'ICa060120871': error_checksum, 'eUcG751104rT7': error_checksum, 'GME081001955': error_checksum, 'AA&060524KX9': error_checksum, 'CAÑ0708045P2': error_checksum, } self.assertFieldOutput(MXRFCField, valid, invalid) def test_MXCURPField(self): error_format = [u'Enter a valid CURP.'] error_checksum = [u'Invalid checksum for CURP.'] valid = { 'AaMG890608HDFLJL00': u'AAMG890608HDFLJL00', 'BAAd890419HMNRRV07': u'BAAD890419HMNRRV07', 'VIAA900930MMNClL08': u'VIAA900930MMNCLL08', 'HEGR891009HMNRRD09': u'HEGR891009HMNRRD09', 'MARR890512HMNRMN09': u'MARR890512HMNRMN09', 'MESJ890928HMNZNS00': u'MESJ890928HMNZNS00', 'BAAA890317HDFRLL03': u'BAAA890317HDFRLL03', 'TOMA880125HMNRRNO2': u'TOMA880125HMNRRNO2', 'OOMG890727HMNRSR06': u'OOMG890727HMNRSR06', 'AAAA000101HDFCCC09': u'AAAA000101HDFCCC09', } invalid = { 'AAAA000000HDFCCC09': error_format, 'AAAA000000HDFAAA03': error_format, 'AAAA000000HXXCCC08': error_format, 'AAAA000000XMNCCC02': error_format, 'HEGR891009HMNRRD0A': error_format, 'MARR890512HMNRMN0A': error_format, 'AaMG890608HDFLJL01': error_checksum, 'BAAd890419HMNRRV08': error_checksum, 'VIAA900930MMNClL09': error_checksum, 'MESJ890928HMNZNS01': error_checksum, 'BAAA890317HDFRLL04': error_checksum, 'TOMA880125HMNRRNO3': error_checksum, 'OOMG890727HMNRSR07': error_checksum, } self.assertFieldOutput(MXCURPField, valid, invalid)
AutotestWebD/all_urls/adminUrl.py
yangjourney/sosotest
422
133366
from django.conf.urls import url from apps.myadmin.views import login, user, team, role, teamUserRelation, userRole, adminUser, businessLine, \ interfaceModule, interfacePermission, moduleManage, source, changeLog, businessLineModule, configService, \ jiraModule, modulePlatform, jiraBusinessLine, jiraBusinessLinePlatform, configUri, configHttp, httpInterfaceDebug, \ httpTestcaseDebug, exePython, standardTask, openApiBusinessLine, openApiUri, unitTestService, uiMobileServer, \ versionManage, userLog, adminManagePermission, standardEnv, cacheManage, webportalBusinessLine, dataStorage,adminServiceConf '''url规则:myadmin/***/check, 中间件会过滤中间的module''' urlpatterns = [ url(r'^myadmin/$', login.loginPage, name="admin_login"), url(r'^myadmin/login$', login.loginPage,name="admin_login"), url(r'^myadmin/doLogin$', login.doLogin,name="admin_doLogin"), url(r'^myadmin/changePassword$', login.changePassword, name="admin_changePassword"), url(r'^myadmin/logout$', login.logout, name="admin_logout"), url(r'^myadmin/home$', login.home, name="admin_home"), #user url(r'^myadmin/user/check$', user.userCheckPage, name="admin_user_check"), url(r'^myadmin/user/getUserSubPage$', user.getUser, name="admin_user_get_user_sub_page"), url(r'^myadmin/user/addUser$', user.addUser, name="admin_add_user"), url(r'^myadmin/user/getUserForId$', user.getUserForId, name="getUserForId"), url(r'^myadmin/user/editUser$', user.editUser, name="admin_edit_user"), url(r'^myadmin/user/delUser$', user.delUser, name="admin_del_user"), url(r'^myadmin/user/addPermissionsToUser$', user.addPermissionsToUser, name="admin_add_permissions_to_user"), url(r'^myadmin/user/addPermissionsToAllUsers$', user.addPermissionsToAllUsers, name="admin_add_permissions_to_all_users"), url(r'^myadmin/interfacePermission/getUserPermissionKeys$', user.getUserPermission), #team url(r'^myadmin/team/check$', team.teamCheckPage, name="admin_team_check"), url(r'^myadmin/team/getTeamSubPage$', team.getTeam, name="admin_team_get_team_sub_page"), url(r'^myadmin/team/addTeam$', team.addTeam, name="admin_add_Team"), url(r'^myadmin/team/getTeamForId$', team.getTeamForId, name="getTeamForId"), url(r'^myadmin/team/editTeam$', team.editTeam, name="admin_edit_team"), url(r'^myadmin/team/delTeam$', team.delTeam, name="admin_del_team"), url(r'^myadmin/team/resetTeam$', team.resetTeam, name="admin_reset_team"), url(r'^myadmin/team/getAllUsers$', team.getAllUsers, name="admin_get_all_users"), url(r'^myadmin/team/addUsersToTeam$', team.addUsersToTeam, name="admin_add_users_to_team"), url(r'^myadmin/team/getAllSelectedUsers$', team.getAllSelectedUsers, name="admin_get_all_selected_users"), url(r'^myadmin/team/deleteSelectedUsers$', team.deleteSelectedUsers, name="admin_delete_selected_users"), url(r'^myadmin/team/addPermissionsToTeam$', team.addPermissionsToTeam, name="admin_add_permissions_to_team"), url(r'^myadmin/team/getTeammates$', team.getTeammates, name="admin_get_teammates"), url(r'^myadmin/team/tansferData$', team.tansferData, name="admin_tansferData"), url(r'^myadmin/interfacePermission/getTeamPermissionKeys$', team.getTeamPermission), url(r'^myadmin/interfacePermission/reload$', team.permissionReload), #role url(r'^myadmin/role/check$', role.roleCheckPage, name="admin_role_check"), url(r'^myadmin/role/getRoleSubPage$', role.getRole, name="admin_role_get_role_sub_page"), url(r'^myadmin/role/addRole$', role.addRole, name="admin_add_Role"), url(r'^myadmin/role/getRoleForId$', role.getRoleForId, name="getRoleForId"), url(r'^myadmin/role/editRole$', role.editRole, name="admin_edit_role"), url(r'^myadmin/role/delRole$', role.delRole, name="admin_del_role"), url(r'^myadmin/role/resetRole$', role.resetRole, name="admin_reset_role"), url(r'^myadmin/role/addUsersToRole$', role.addUsersToRole, name="admin_add_user_to_role"), #adminManagePermission url(r'^myadmin/adminManagePermission/check$', adminManagePermission.permissionCheckPage, name="admin_adminManagePermission_check"), url(r'^myadmin/adminManagePermission/getPermissionSubPage$', adminManagePermission.getPermission, name="admin_permission_get_permission_sub_page"), url(r'^myadmin/adminManagePermission/addPermission$', adminManagePermission.addPermission, name="admin_add_Permission"), url(r'^myadmin/adminManagePermission/getPermissionForId$', adminManagePermission.getPermissionForId, name="getPermissionForId"), url(r'^myadmin/adminManagePermission/editPermission$', adminManagePermission.editPermission, name="admin_edit_permission"), url(r'^myadmin/adminManagePermission/delPermission$', adminManagePermission.delPermission, name="admin_del_permission"), url(r'^myadmin/adminManagePermission/resetPermission$', adminManagePermission.resetPermission, name="admin_reset_permission"), url(r'^myadmin/adminManagePermission/getAllPermissions$', adminManagePermission.getAllPermissions, name="admin_get_all_permissions"), url(r'^myadmin/adminManagePermission/getAllSelectedPermissions$', adminManagePermission.getAllSelectedPermissions, name="admin_get_all_selected_permissions"), url(r'^myadmin/adminManagePermission/getAllSelectedTeamPermissions$', adminManagePermission.getAllSelectedTeamPermissions, name="admin_get_all_selected_team_permissions"), url(r'^myadmin/adminManagePermission/getAllUsersSelectedPermissions$', adminManagePermission.getAllUsersSelectedPermissions, name="admin_get_all_users_selected_team_permissions"), #teamUser url(r'^myadmin/team/getTeammateSubPage$', teamUserRelation.getAllTeammates, name="admin_team_get_teammate_sub_page"), #userRole url(r'^myadmin/userRole/userRoleCheckPage$', userRole.userRoleCheckPage, name="admin_user_role_check_page"), url(r'^myadmin/userRole/getUserRoleSubPage$', userRole.getUserRole, name="admin_team_get_team_sub_page"), url(r'^myadmin/userRole/setTeamLeader$', userRole.setTeamLeader, name="admin_set_team_leader"), url(r'^myadmin/userRole/delTeamLeader$', userRole.delTeamLeader, name="admin_del_team_leader"), #adminUser url(r'^myadmin/admin/check$', adminUser.adminUserCheckPage, name="admin_admin_user_check"), url(r'^myadmin/admin/getAdminUserSubPage$', adminUser.getAdminUser, name="admin_get_admin_user_sub_page"), url(r'^myadmin/admin/addAdminUser$', adminUser.addAdminUser, name="admin_add_adminUser"), url(r'^myadmin/admin/getAdminUserForId$', adminUser.getAdminUserForId, name="admin_get_adminUser_for_id"), url(r'^myadmin/admin/editAdminUser$', adminUser.editAdminUser, name="admin_edit_adminUser"), url(r'^myadmin/admin/delAdminUser$', adminUser.delAdminUser, name="admin_del_adminUser"), url(r'^myadmin/admin/resetAdminUser$', adminUser.resetAdminUser, name="admin_reset_adminUser"), url(r'^myadmin/admin/addPermissionsToUser$', adminUser.addPermissionsToUser, name="admin_add_permissions_to_user"), #businessLine url(r'^myadmin/businessLine/check$', businessLine.businessLineCheckPage, name="admin_business_line_check"), url(r'^myadmin/businessLine/getBusinessLineSubPage$', businessLine.getBusinessLine, name="admin_get_business_line_sub_page"), url(r'^myadmin/businessLine/addBusinessLine$', businessLine.addBusinessLine, name="admin_add_business_line"), url(r'^myadmin/businessLine/getBusinessLineForId$', businessLine.getBusinessLineForId, name="admin_get_businessLine_for_id"), url(r'^myadmin/businessLine/editBusinessLine$', businessLine.editBusinessLine, name="admin_edit_businessLine"), url(r'^myadmin/businessLine/delBusinessLine$', businessLine.delBusinessLine, name="admin_del_businessLine"), url(r'^myadmin/businessLine/resetBusinessLine$', businessLine.resetBusinessLine, name="admin_reset_businessLine"), # webportalBusinessLine url(r'^myadmin/webportalBusinessLine/check$', webportalBusinessLine.businessLineCheckPage, name="admin_webportalBusinessLine_line_check"), url(r'^myadmin/webportalBusinessLine/getBusinessLineSubPage$', webportalBusinessLine.getBusinessLine, name="admin_get_webportalBusinessLine_line_sub_page"), url(r'^myadmin/webportalBusinessLine/addBusinessLine$', webportalBusinessLine.addBusinessLine, name="admin_add_webportalBusinessLine_line"), url(r'^myadmin/webportalBusinessLine/getBusinessLineForId$', webportalBusinessLine.getBusinessLineForId, name="admin_get_webportalBusinessLine_for_id"), url(r'^myadmin/webportalBusinessLine/editBusinessLine$', webportalBusinessLine.editBusinessLine, name="admin_edit_webportalBusinessLine"), url(r'^myadmin/webportalBusinessLine/delBusinessLine$', webportalBusinessLine.delBusinessLine, name="admin_del_webportalBusinessLine"), url(r'^myadmin/webportalBusinessLine/resetBusinessLine$', webportalBusinessLine.resetBusinessLine, name="admin_reset_webportalBusinessLine"), url(r'^myadmin/webportalBusinessLine/getAllBusinessLines$', webportalBusinessLine.getAllBusinessLines, name="admin_get_allBusinessLines"), #interfaceModule # url(r'^myadmin/interfaceModule/check$', interfaceModule.interfaceModuleCheckPage, name="admin_interface_module_check"), # url(r'^myadmin/interfaceModule/getInterfaceModuleSubPage$', interfaceModule.getInterfaceModule, name="admin_get_interface_module_sub_page"), # url(r'^myadmin/interfaceModule/addInterfaceModule$', interfaceModule.addInterfaceModule, name="admin_add_interface_module"), # url(r'^myadmin/interfaceModule/getInterfaceModuleForId$', interfaceModule.getInterfaceModuleForId, name="admin_get_interface_module_for_id"), # url(r'^myadmin/interfaceModule/editInterfaceModule$', interfaceModule.editInterfaceModule, name="admin_edit_interfaceModule"), # url(r'^myadmin/interfaceModule/delInterfaceModule$', interfaceModule.delInterfaceModule, name="admin_del_interfaceModule"), # url(r'^myadmin/interfaceModule/resetInterfaceModule$', interfaceModule.resetInterfaceModule, name="admin_reset_interfaceModule"), #interfacePermission url(r'^myadmin/interfacePermission/check$', interfacePermission.interfacePermissionCheckPage, name="admin_interface_permission_check"), url(r'^myadmin/interfacePermission/getInterfacePermissionSubPage$', interfacePermission.getInterfacePermission, name="admin_get_interface_permission_sub_page"), url(r'^myadmin/interfacePermission/addInterfacePermission$', interfacePermission.addInterfacePermission, name="admin_add_interface_permission"), url(r'^myadmin/interfacePermission/getInterfacePermissionForId$', interfacePermission.getInterfacePermissionForId, name="admin_get_interface_permission_for_id"), url(r'^myadmin/interfacePermission/editInterfacePermission$', interfacePermission.editInterfacePermission, name="admin_edit_interfacePermission"), url(r'^myadmin/interfacePermission/delInterfacePermission$', interfacePermission.delInterfacePermission, name="admin_del_interfacePermission"), url(r'^myadmin/interfacePermission/resetInterfacePermission$', interfacePermission.resetInterfacePermission, name="admin_del_interfacePermission"), # url(r'^myadmin/interfacePermission/getAllInterface$', interfacePermission.getAllInterface, name="admin_get_allInterface"), url(r'^myadmin/interfacePermission/getAllPermissionKeys$', interfacePermission.getAllPermissionKeys, name="admin_get_allPermissionKeys"), #module url(r'^myadmin/moduleManage/check$', moduleManage.moduleManageCheckPage, name="admin_module_manage_check"), url(r'^myadmin/moduleManage/getModuleManageSubPage$', moduleManage.getModuleManage, name="admin_get_module_manage_sub_page"), url(r'^myadmin/moduleManage/addModuleManage$', moduleManage.addModuleManage, name="admin_add_module_manage"), url(r'^myadmin/moduleManage/getModuleManageForId$', moduleManage.getModuleManageForId, name="admin_get_module_manage_for_id"), url(r'^myadmin/moduleManage/editModuleManage$', moduleManage.editModuleManage, name="admin_edit_moduleManage"), url(r'^myadmin/moduleManage/delModuleManage$', moduleManage.delModuleManage, name="admin_del_moduleManage"), url(r'^myadmin/moduleManage/resetModuleManage$', moduleManage.resetModuleManage, name="admin_del_moduleManage"), # source url(r'^myadmin/source/check$', source.sourceCheckPage, name="admin_source_check"), url(r'^myadmin/source/getSourceSubPage$', source.getSource, name="admin_get_source_sub_page"), url(r'^myadmin/source/addSource$', source.addSource, name="admin_add_source"), url(r'^myadmin/source/getSourceForId$', source.getSourceForId, name="admin_get_source_for_id"), url(r'^myadmin/source/editSource$', source.editSource, name="admin_edit_source"), url(r'^myadmin/source/delSource$', source.delSource, name="admin_del_source"), url(r'^myadmin/source/resetSource$', source.resetSource, name="admin_reset_source"), #changeLog url(r'^myadmin/changeLog/check$', changeLog.changeLogCheckPage, name="admin_changeLog_check"), url(r'^myadmin/changeLog/getChangeLogSubPage$', changeLog.getChangeLog, name="admin_get_changeLog_sub_page"), url(r'^myadmin/changeLog/getChangeLogDataForId$', changeLog.getChangeLogDataForId, name="admin_get_changeLogData_for_id"), #businessLineModule url(r'^myadmin/businessLineModule/check$', businessLineModule.businessLineModuleCheckPage, name="admin_businessLine_module_check"), url(r'^myadmin/businessLineModule/getBusinessLineModule$', businessLineModule.getBusinessLineModule, name="admin_get_businessLine_module"), url(r'^myadmin/businessLineModule/addBusinessLineModule$', businessLineModule.addBusinessLineModule, name="admin_add_businessLine_module"), url(r'^myadmin/businessLineModule/getAllBusinessLines$', businessLineModule.getAllBusinessLines, name="admin_get_all_businessLine"), url(r'^myadmin/businessLineModule/getAllModuleNames$', businessLineModule.getAllModuleNames, name="admin_get_all_moduleNames"), url(r'^myadmin/businessLineModule/getBusinessLineModuleForId$', businessLineModule.getBusinessLineModuleForId, name="admin_get_businessLineModule_for_id"), url(r'^myadmin/businessLineModule/delBusinessLineModule$', businessLineModule.delBusinessLineModule, name="admin_del_businessLineModule"), url(r'^myadmin/businessLineModule/editBusinessLineModule$', businessLineModule.editBusinessLineModule, name="admin_edit_businessLineModule"), url(r'^myadmin/businessLineModule/getBusinessLineId$', businessLineModule.getBusinessLineId, name="admin_get_businessLineId"), url(r'^myadmin/businessLineModule/getModuleId$', businessLineModule.getModuleId, name="admin_get_moduleId"), #configService url(r'^myadmin/configService/check$', configService.configServiceCheckPage, name="admin_configService_check"), url(r'^myadmin/configService/getConfigServiceSubPage$', configService.getConfigService, name="admin_get_configService_sub_page"), url(r'^myadmin/configService/addConfigService$', configService.addConfigService, name="admin_add_configService"), url(r'^myadmin/configService/getConfigServiceForId$', configService.getConfigServiceForId, name="admin_get_configService_for_id"), url(r'^myadmin/configService/editConfigService$', configService.editConfigService, name="admin_get_configService_for_id"), url(r'^myadmin/configService/delConfigService$', configService.delConfigService, name="admin_del_configService"), url(r'^myadmin/configService/resetConfigService$', configService.resetConfigService, name="admin_reset_configService"), # configUri url(r'^myadmin/configUri/check$', configUri.configUriCheckPage, name="admin_configURI_check"), url(r'^myadmin/configUri/getConfigUriSubPage$', configUri.getConfigUri, name="admin_get_configUri_sub_page"), url(r'^myadmin/configUri/addConfigUri$', configUri.addConfigUri, name="admin_add_configUri"), url(r'^myadmin/configUri/getConfigUriForId$', configUri.getConfigUriForId, name="admin_get_configUri_for_id"), url(r'^myadmin/configUri/editConfigUri$', configUri.editConfigUri, name="admin_edit_configUri"), url(r'^myadmin/configUri/delConfigUri$', configUri.delConfigUri, name="admin_del_configUri"), url(r'^myadmin/configUri/resetConfigUri$', configUri.resetConfigUri, name="admin_reset_configUri"), #jiraModule url(r'^myadmin/jiraModule/check$', jiraModule.jiraModuleCheckPage, name="admin_jiraModule_check"), url(r'^myadmin/jiraModule/getJiraModuleSubPage$', jiraModule.getJiraModule, name="admin_get_jiraModule_sub_page"), url(r'^myadmin/jiraModule/addJiraModule$', jiraModule.addJiraModule, name="admin_add_jiraModule"), url(r'^myadmin/jiraModule/getJiraModuleForId$', jiraModule.getJiraModuleForId, name="admin_get_jiraModule_for_id"), url(r'^myadmin/jiraModule/editJiraModule$', jiraModule.editJiraModule, name="admin_get_jiraModule_for_id"), url(r'^myadmin/jiraModule/delJiraModule$', jiraModule.delJiraModule, name="admin_del_jiraModule"), url(r'^myadmin/jiraModule/resetJiraModule$', jiraModule.resetJiraModule, name="admin_reset_jiraModule"), #modulePlatform url(r'^myadmin/modulePlatform/check$', modulePlatform.modulePlatformCheckPage, name="admin_modulePlatform_check"), url(r'^myadmin/modulePlatform/getModulePlatform$', modulePlatform.getModulePlatform, name="admin_get_modulePlatform_sub_page"), url(r'^myadmin/modulePlatform/getAllJiraModules$', modulePlatform.getAllJiraModules, name="admin_get_all_jiraModules"), url(r'^myadmin/modulePlatform/addModulePlatform$', modulePlatform.addModulePlatform, name="admin_add_modulePlatform"), url(r'^myadmin/modulePlatform/getModulePlatformForId$', modulePlatform.getModulePlatformForId, name="admin_get_modulePlatform_for_id"), url(r'^myadmin/modulePlatform/editModulePlatform$', modulePlatform.editModulePlatform, name="admin_edit_modulePlatform"), url(r'^myadmin/modulePlatform/deleteModulePlatform$', modulePlatform.deleteModulePlatform, name="admin_del_modulePlatform"), url(r'^myadmin/modulePlatform/getJiraModuleId$', modulePlatform.getJiraModuleId, name="admin_get_jiraModuleId"), url(r'^myadmin/modulePlatform/getModuleId$', modulePlatform.getModuleId, name="admin_get_moduleId"), #jiraBusinessLine url(r'^myadmin/jiraBusinessLine/check$', jiraBusinessLine.jiraBusinessLineCheckPage, name="admin_jiraBusinessLine_check"), url(r'^myadmin/jiraBusinessLine/getJiraBusinessLineSubPage$', jiraBusinessLine.getJiraBusinessLine, name="admin_get_jiraBusinessLine_sub_page"), url(r'^myadmin/jiraBusinessLine/addJiraBusinessLine$', jiraBusinessLine.addJiraBusinessLine, name="admin_add_jiraBusinessLine"), url(r'^myadmin/jiraBusinessLine/getJiraBusinessLineForId$', jiraBusinessLine.getJiraBusinessLineForId, name="admin_get_jiraBusinessLine_for_id"), url(r'^myadmin/jiraBusinessLine/editJiraBusinessLine$', jiraBusinessLine.editJiraBusinessLine, name="admin_edit_jiraBusinessLine_for_id"), url(r'^myadmin/jiraBusinessLine/delJiraBusinessLine$', jiraBusinessLine.delJiraBusinessLine, name="admin_del_jiraBusinessLine"), url(r'^myadmin/jiraBusinessLine/resetJiraBusinessLine$', jiraBusinessLine.resetJiraBusinessLine, name="admin_reset_jiraBusinessLine"), # jiraBusinessLinePlatform url(r'^myadmin/jiraBusinessLinePlatform/check$', jiraBusinessLinePlatform.jiraBusinessLinePlatformCheckPage, name="admin_jiraBusinessLinePlatform_check"), url(r'^myadmin/jiraBusinessLinePlatform/getJiraBusinessLinePlatform$', jiraBusinessLinePlatform.getJiraBusinessLinePlatform, name="admin_get_jiraBusinessLinePlatform_sub_page"), url(r'^myadmin/jiraBusinessLinePlatform/getAllPlatformBusinessLines$', jiraBusinessLinePlatform.getAllPlatformBusinessLines, name="admin_get_all_platformBusinessLines"), url(r'^myadmin/jiraBusinessLinePlatform/getAllJiraBusinessLines$',jiraBusinessLinePlatform.getAllJiraBusinessLines, name="admin_get_all_jiraBusinessLines"), url(r'^myadmin/jiraBusinessLinePlatform/addJiraBusinessLinePlatform$', jiraBusinessLinePlatform.addJiraBusinessLinePlatform, name="admin_add_jiraBusinessLinePlatform"), url(r'^myadmin/jiraBusinessLinePlatform/getJiraBusinessLinePlatformForId$', jiraBusinessLinePlatform.getJiraBusinessLinePlatformForId, name="admin_get_jiraBusinessLinePlatform_for_id"), url(r'^myadmin/jiraBusinessLinePlatform/editJiraBusinessLinePlatform$', jiraBusinessLinePlatform.editJiraBusinessLinePlatform, name="admin_edit_jiraBusinessLinePlatform"), url(r'^myadmin/jiraBusinessLinePlatform/deleteJiraBusinessLinePlatform$', jiraBusinessLinePlatform.deleteJiraBusinessLinePlatform, name="admin_del_jiraBusinessLinePlatform"), url(r'^myadmin/jiraBusinessLinePlatform/getJiraBusinessLineId$', jiraBusinessLinePlatform.getJiraBusinessLineId, name="admin_get_jiraBusinessLineId"), #configHttp url(r'^myadmin/configHttp/check$', configHttp.configHttpCheckPage, name="admin_configHttp_check"), url(r'^myadmin/configHttp/getConfigHttpSubPage$', configHttp.getConfigHttp, name="admin_get_configHttp_sub_page"), url(r'^myadmin/configHttp/getAllServiceConfKeys$', configHttp.getAllServiceConfKeys, name="admin_get_all_serviceConfKeys"), url(r'^myadmin/configHttp/addConfigHttp$', configHttp.addConfigHttp, name="admin_add_configHttp"), url(r'^myadmin/configHttp/getConfigHttpForId$', configHttp.getConfigHttpForId, name="admin_get_configHttp_for_id"), url(r'^myadmin/configHttp/editConfigHttp$', configHttp.editConfigHttp, name="admin_edit_configHttp"), url(r'^myadmin/configHttp/delConfigHttp$', configHttp.delConfigHttp, name="admin_del_configHttp"), url(r'^myadmin/configHttp/resetConfigHttp$', configHttp.resetConfigHttp, name="admin_reset_configHttp"), # httpInterfaceDebug url(r'^myadmin/httpInterfaceDebug/check$', httpInterfaceDebug.httpInterfaceDebugCheckPage, name="admin_httpInterfaceDebug_check"), url(r'^myadmin/httpInterfaceDebug/getHttpInterfaceDebugSubPage$', httpInterfaceDebug.getHttpInterfaceDebug, name="admin_get_httpInterfaceDebug_sub_page"), url(r'^myadmin/httpInterfaceDebug/getAllBusinessLines$', httpInterfaceDebug.getAllBusinessLines, name="admin_get_all_businessLines"), url(r'^myadmin/httpInterfaceDebug/getAllModuleNames$', httpInterfaceDebug.getAllModuleNames, name="admin_get_all_moduleNames"), url(r'^myadmin/httpInterfaceDebug/getAllSourceNames$', httpInterfaceDebug.getAllSourceNames, name="admin_get_all_sourceNames"), url(r'^myadmin/httpInterfaceDebug/getAllHttpConfKeys$', httpInterfaceDebug.getAllHttpConfKeys, name="admin_get_all_httpConfKeys"), url(r'^myadmin/httpInterfaceDebug/getAllUsers$', httpInterfaceDebug.getAllUsers, name="admin_get_all_users"), url(r'^myadmin/httpInterfaceDebug/addHttpInterfaceDebug$', httpInterfaceDebug.addHttpInterfaceDebug, name="admin_add_httpInterfaceDebug"), url(r'^myadmin/httpInterfaceDebug/getHttpInterfaceDebugForId$', httpInterfaceDebug.getHttpInterfaceDebugForId, name="admin_get_httpInterfaceDebug_for_id"), url(r'^myadmin/httpInterfaceDebug/editHttpInterfaceDebug$', httpInterfaceDebug.editHttpInterfaceDebug, name="admin_edit_httpInterfaceDebug"), url(r'^myadmin/httpInterfaceDebug/delHttpInterfaceDebug$', httpInterfaceDebug.delHttpInterfaceDebug, name="admin_del_httpInterfaceDebug"), url(r'^myadmin/httpInterfaceDebug/resetHttpInterfaceDebug$', httpInterfaceDebug.resetHttpInterfaceDebug, name="admin_reset_httpInterfaceDebug"), # httpTestcaseDebug url(r'^myadmin/httpTestcaseDebug/check$', httpTestcaseDebug.httpTestcaseDebugCheckPage, name="admin_httpTestcaseDebug_check"), url(r'^myadmin/httpTestcaseDebug/getHttpTestcaseDebugSubPage$', httpTestcaseDebug.getHttpTestcaseDebug, name="admin_get_httpTestcaseDebug_sub_page"), url(r'^myadmin/httpTestcaseDebug/getAllBusinessLines$', httpTestcaseDebug.getAllBusinessLines, name="admin_get_all_businessLines"), url(r'^myadmin/httpTestcaseDebug/getAllModuleNames$', httpTestcaseDebug.getAllModuleNames, name="admin_get_all_moduleNames"), url(r'^myadmin/httpTestcaseDebug/getAllSourceNames$', httpTestcaseDebug.getAllSourceNames, name="admin_get_all_sourceNames"), url(r'^myadmin/httpTestcaseDebug/getAllHttpConfKeys$', httpTestcaseDebug.getAllHttpConfKeys, name="admin_get_all_httpConfKeys"), url(r'^myadmin/httpTestcaseDebug/getAllUsers$', httpTestcaseDebug.getAllUsers, name="admin_get_all_users"), url(r'^myadmin/httpTestcaseDebug/addHttpTestcaseDebug$', httpTestcaseDebug.addHttpTestcaseDebug, name="admin_add_httpTestcaseDebug"), url(r'^myadmin/httpTestcaseDebug/getHttpTestcaseDebugForId$', httpTestcaseDebug.getHttpTestcaseDebugForId, name="admin_get_httpTestcaseDebug_for_id"), url(r'^myadmin/httpTestcaseDebug/editHttpTestcaseDebug$', httpTestcaseDebug.editHttpTestcaseDebug, name="admin_edit_httptestcaseDebug"), url(r'^myadmin/httpTestcaseDebug/delHttpTestcaseDebug$', httpTestcaseDebug.delHttpTestcaseDebug, name="admin_del_httptestcaseDebug"), url(r'^myadmin/httpTestcaseDebug/resetHttpTestcaseDebug$', httpTestcaseDebug.resetHttpTestcaseDebug, name="admin_reset_httptestcaseDebug"), # exePython url(r'^myadmin/exePython/check$', exePython.exePythonCheckPage, name="admin_exePython_check"), url(r'^myadmin/exePython/getExePythonSubPage$', exePython.getExePython,name="admin_get_exePython_sub_page"), url(r'^myadmin/exePython/addExePython$', exePython.addExePython, name="admin_add_exePython"), url(r'^myadmin/exePython/getExePythonForId$', exePython.getExePythonForId, name="admin_get_exePython_for_id"), url(r'^myadmin/exePython/editExePython$', exePython.editExePython, name="admin_edit_exePython"), url(r'^myadmin/exePython/delExePython$', exePython.delExePython, name="admin_del_exePythone"), url(r'^myadmin/exePython/delRedisKey$', exePython.delRedisKey, name="admin_del_redisKey"), url(r'^myadmin/exePython/resetExePython$', exePython.resetExePython, name="admin_reset_exePython"), # standardTask url(r'^myadmin/standardTask/check$', standardTask.standardTaskCheckPage, name="admin_standardTask_check"), url(r'^myadmin/standardTask/getStandardTaskSubPage$', standardTask.getStandardTask, name="admin_get_standardTask_sub_page"), url(r'^myadmin/standardTask/addStandardTask$', standardTask.addStandardTask, name="admin_add_standardTaskn"), url(r'^myadmin/standardTask/getStandardTaskForId$', standardTask.getStandardTaskForId, name="admin_get_standardTask_for_id"), url(r'^myadmin/standardTask/editStandardTask$', standardTask.editStandardTask, name="admin_edit_standardTask"), url(r'^myadmin/standardTask/delStandardTask$', standardTask.delStandardTask, name="admin_del_standardTask"), url(r'^myadmin/standardTask/resetStandardTask$', standardTask.resetStandardTask, name="admin_reset_standardTask"), url(r'^myadmin/standardTask/getAllVersions$', standardTask.getAllVersions, name="admin_get_all_versions"), url(r'^myadmin/standardTask/copyTaskToOtherVersion$', standardTask.copyTaskToOtherVersion, name="admin_copy_task"), # standardTask url(r'^myadmin/openApiBusinessLine/check$', openApiBusinessLine.openApiBusinessLineCheckPage, name="admin_openApiBusinessLine_check"), url(r'^myadmin/openApiBusinessLine/getOpenApiBusinessLineSubPage$', openApiBusinessLine.getOpenApiBusinessLine, name="admin_get_openApiBusinessLine_sub_page"), url(r'^myadmin/openApiBusinessLine/addOpenApiBusinessLine$', openApiBusinessLine.addOpenApiBusinessLine, name="admin_add_openApiBusinessLine"), url(r'^myadmin/openApiBusinessLine/getOpenApiBusinessLineForId$', openApiBusinessLine.getOpenApiBusinessLineForId, name="admin_get_openApiBusinessLine_for_id"), url(r'^myadmin/openApiBusinessLine/editOpenApiBusinessLine$', openApiBusinessLine.editOpenApiBusinessLine, name="admin_edit_openApiBusinessLine"), url(r'^myadmin/openApiBusinessLine/delOpenApiBusinessLine$', openApiBusinessLine.delOpenApiBusinessLine, name="admin_del_openApiBusinessLine"), url(r'^myadmin/openApiBusinessLine/resetOpenApiBusinessLine$', openApiBusinessLine.resetOpenApiBusinessLine, name="admin_reset_openApiBusinessLine"), # openApiUri url(r'^myadmin/openApiUri/check$', openApiUri.openApiUriCheckPage, name="admin_openApiUri_check"), url(r'^myadmin/openApiUri/getOpenApiUriSubPage$', openApiUri.getOpenApiUri, name="admin_get_openApiUri_sub_page"), url(r'^myadmin/openApiUri/addOpenApiUri$', openApiUri.addOpenApiUri, name="admin_add_openApiUri"), url(r'^myadmin/openApiUri/getOpenApiUriForId$', openApiUri.getOpenApiUriForId, name="admin_get_openApiUri_for_id"), url(r'^myadmin/openApiUri/editOpenApiUri$', openApiUri.editOpenApiUri, name="admin_edit_openApiUri"), url(r'^myadmin/openApiUri/deleteOpenApiUri$', openApiUri.deleteOpenApiUri, name="admin_del_openApiUri"), url(r'^myadmin/openApiUri/resetOpenApiUri$', openApiUri.resetOpenApiUri, name="admin_reset_openApiUri"), # unitTestService url(r'^myadmin/unitTestService/check$', unitTestService.unitTestServiceCheckPage, name="admin_unitTestService_check"), url(r'^myadmin/unitTestService/getUnitTestServiceSubPage$', unitTestService.getUnitTestService, name="admin_get_unitTestService_sub_page"), url(r'^myadmin/unitTestService/addUnitTestService$', unitTestService.addUnitTestService, name="admin_add_unitTestService"), url(r'^myadmin/unitTestService/getUnitTestServiceForId$', unitTestService.getUnitTestServiceForId, name="admin_get_unitTestService_for_id"), url(r'^myadmin/unitTestService/editUnitTestService$', unitTestService.editUnitTestService, name="admin_edit_unitTestService"), url(r'^myadmin/unitTestService/deleteUnitTestService$', unitTestService.deleteUnitTestService, name="admin_del_unitTestService"), url(r'^myadmin/unitTestService/resetUnitTestService$', unitTestService.resetUnitTestService, name="admin_reset_unitTestService"), # uiMobileServer url(r'^myadmin/uiMobileServer/check$', uiMobileServer.uiMobileServerCheckPage, name="admin_uiMobileServer_check"), url(r'^myadmin/uiMobileServer/getUiMobileServerSubPage$', uiMobileServer.getUiMobileServer, name="admin_get_uiMobileServer_sub_page"), url(r'^myadmin/uiMobileServer/addUiMobileServer$', uiMobileServer.addUiMobileServer, name="admin_add_uiMobileServer"), url(r'^myadmin/uiMobileServer/getUiMobileServerForId$', uiMobileServer.getUiMobileServerForId, name="admin_get_uuiMobileServer_for_id"), url(r'^myadmin/uiMobileServer/editUiMobileServer$', uiMobileServer.editUiMobileServer, name="admin_edit_uiMobileServer"), url(r'^myadmin/uiMobileServer/deleteUiMobileServer$', uiMobileServer.deleteUiMobileServer, name="admin_del_uiMobileServer"), url(r'^myadmin/uiMobileServer/resetUiMobileServer$', uiMobileServer.resetUiMobileServer, name="admin_reset_uiMobileServer"), # versionManage url(r'^myadmin/versionManage/check$', versionManage.versionManageCheckPage, name="admin_versionManage_check"), url(r'^myadmin/versionManage/getVersionManageSubPage$', versionManage.getVersionManage, name="admin_get_versionManage_sub_page"), url(r'^myadmin/versionManage/addVersionManage$', versionManage.addVersionManage, name="admin_add_versionManage"), url(r'^myadmin/versionManage/getVersionManageForId$', versionManage.getVersionManageForId, name="admin_get_versionManage_for_id"), url(r'^myadmin/versionManage/editVersionManage$', versionManage.editVersionManage, name="admin_edit_versionManage"), url(r'^myadmin/versionManage/deleteVersionManage$', versionManage.deleteVersionManage, name="admin_del_versionManage"), url(r'^myadmin/versionManage/resetVersionManage$', versionManage.resetVersionManage, name="admin_reset_versionManage"), # userLog url(r'^myadmin/userLog/check$', userLog.userLogCheckPage, name="admin_userLog_check"), url(r'^myadmin/userLog/getUserLogSubPage$', userLog.getUserLog, name="admin_get_userLog_sub_page"), url(r'^myadmin/userLog/addUserLog$', userLog.addUserLog, name="admin_add_userLog"), url(r'^myadmin/userLog/getUserLogForId$', userLog.getUserLogForId, name="admin_get_userLog_for_id"), url(r'^myadmin/userLog/editUserLog$', userLog.editUserLog, name="admin_edit_userLog"), url(r'^myadmin/userLog/deleteUserLog$', userLog.deleteUserLog, name="admin_del_userLog"), url(r'^myadmin/userLog/resetUserLog$', userLog.resetUserLog, name="admin_reset_userLog"), # standardEnv url(r'^myadmin/standardEnv/check$', standardEnv.standardEnvCheckPage, name="admin_standardEnv_check"), url(r'^myadmin/standardEnv/getStandardEnvSubPage$', standardEnv.getStandardEnv, name="admin_get_standardEnv_sub_page"), url(r'^myadmin/standardEnv/addStandardEnv$', standardEnv.addStandardEnv, name="admin_add_standardEnv"), url(r'^myadmin/standardEnv/getStandardEnvForId$', standardEnv.getStandardEnvForId, name="admin_get_standardEnv_for_id"), url(r'^myadmin/standardEnv/editStandardEnv$', standardEnv.editStandardEnv, name="admin_edit_standardEnv"), url(r'^myadmin/standardEnv/deleteStandardEnv$', standardEnv.deleteStandardEnv, name="admin_del_standardEnv"), url(r'^myadmin/standardEnv/resetStandardEnv$', standardEnv.resetStandardEnv, name="admin_reset_standardEnv"), # cacheManage url(r'^myadmin/cacheManage/check$', cacheManage.cacheManageCheckPage, name="admin_cacheManage_check"), url(r'^myadmin/cacheManage/getCacheManageSubPage$', cacheManage.getCacheManage, name="admin_get_cacheManage_sub_page"), url(r'^myadmin/cacheManage/deleteCacheData$', cacheManage.deleteCacheData, name="admin_delete_cacheData_sub_page"), url(r'^myadmin/cacheManage/flushAllDatas$', cacheManage.flushAllDatas, name="admin_flush_allDatas_sub_page"), url(r'^myadmin/cacheManage/addCacheData$', cacheManage.addCacheData, name="admin_add_cacheData"), url(r'^myadmin/cacheManage/getCacheValueForCacheKey$', cacheManage.getCacheValueForCacheKey, name="admin_getCacheValue_for_cacheKey"), url(r'^myadmin/cacheManage/editCacheData$', cacheManage.editCacheData, name="admin_edit_cacheData"), # dataStorage url(r'^myadmin/dataStorage/check$', dataStorage.dataStorageCheckPage, name="admin_dataStorage_check"), url(r'^myadmin/dataStorage/getCacheManageSubPage$', dataStorage.getdataStorage, name="admin_get_dataStorage_sub_page"), #serverConf url(r'^myadmin/serviceConf/check$', adminServiceConf.adminServiceConf, name="admin_service_conf_page"), url(r'^myadmin/serviceConf/getAdminServiceConfForId$', adminServiceConf.getAdminServiceConfForId, name="admin_get_service_conf_for_id"), url(r'^myadmin/serviceConf/getServiceConfSubPage', adminServiceConf.getAdminServiceConf, name="admin_service_conf_sub_page"), url(r'^myadmin/serviceConf/getServiceTaskConfSubPage', adminServiceConf.getAdminServiceTaskConf, name="admin_service_conf_sub_page"), url(r'^myadmin/serviceConf/saveEditServiceConf', adminServiceConf.editAdminServiceConf, name="admin_edit_service_conf"), url(r'^myadmin/serviceConf/queueDeleteTask', adminServiceConf.queueDeleteTask, name="admin_edit_service_conf"), ]
homeassistant/components/tankerkoenig/sensor.py
learn-home-automation/core
22,481
133380
"""Tankerkoenig sensor integration.""" import logging from homeassistant.components.sensor import SensorEntity from homeassistant.const import ( ATTR_ATTRIBUTION, ATTR_LATITUDE, ATTR_LONGITUDE, CURRENCY_EURO, ) from homeassistant.helpers.update_coordinator import ( CoordinatorEntity, DataUpdateCoordinator, UpdateFailed, ) from .const import DOMAIN, NAME _LOGGER = logging.getLogger(__name__) ATTR_BRAND = "brand" ATTR_CITY = "city" ATTR_FUEL_TYPE = "fuel_type" ATTR_HOUSE_NUMBER = "house_number" ATTR_IS_OPEN = "is_open" ATTR_POSTCODE = "postcode" ATTR_STATION_NAME = "station_name" ATTR_STREET = "street" ATTRIBUTION = "Data provided by https://creativecommons.tankerkoenig.de" ICON = "mdi:gas-station" async def async_setup_platform(hass, config, async_add_entities, discovery_info=None): """Set up the tankerkoenig sensors.""" if discovery_info is None: return tankerkoenig = hass.data[DOMAIN] async def async_update_data(): """Fetch data from API endpoint.""" try: return await tankerkoenig.fetch_data() except LookupError as err: raise UpdateFailed("Failed to fetch data") from err coordinator = DataUpdateCoordinator( hass, _LOGGER, name=NAME, update_method=async_update_data, update_interval=tankerkoenig.update_interval, ) # Fetch initial data so we have data when entities subscribe await coordinator.async_refresh() stations = discovery_info.values() entities = [] for station in stations: for fuel in tankerkoenig.fuel_types: if fuel not in station: _LOGGER.warning( "Station %s does not offer %s fuel", station["id"], fuel ) continue sensor = FuelPriceSensor( fuel, station, coordinator, f"{NAME}_{station['name']}_{fuel}", tankerkoenig.show_on_map, ) entities.append(sensor) _LOGGER.debug("Added sensors %s", entities) async_add_entities(entities) class FuelPriceSensor(CoordinatorEntity, SensorEntity): """Contains prices for fuel in a given station.""" def __init__(self, fuel_type, station, coordinator, name, show_on_map): """Initialize the sensor.""" super().__init__(coordinator) self._station = station self._station_id = station["id"] self._fuel_type = fuel_type self._name = name self._latitude = station["lat"] self._longitude = station["lng"] self._city = station["place"] self._house_number = station["houseNumber"] self._postcode = station["postCode"] self._street = station["street"] self._price = station[fuel_type] self._show_on_map = show_on_map @property def name(self): """Return the name of the sensor.""" return self._name @property def icon(self): """Icon to use in the frontend.""" return ICON @property def native_unit_of_measurement(self): """Return unit of measurement.""" return CURRENCY_EURO @property def native_value(self): """Return the state of the device.""" # key Fuel_type is not available when the fuel station is closed, use "get" instead of "[]" to avoid exceptions return self.coordinator.data[self._station_id].get(self._fuel_type) @property def unique_id(self) -> str: """Return a unique identifier for this entity.""" return f"{self._station_id}_{self._fuel_type}" @property def extra_state_attributes(self): """Return the attributes of the device.""" data = self.coordinator.data[self._station_id] attrs = { ATTR_ATTRIBUTION: ATTRIBUTION, ATTR_BRAND: self._station["brand"], ATTR_FUEL_TYPE: self._fuel_type, ATTR_STATION_NAME: self._station["name"], ATTR_STREET: self._street, ATTR_HOUSE_NUMBER: self._house_number, ATTR_POSTCODE: self._postcode, ATTR_CITY: self._city, } if self._show_on_map: attrs[ATTR_LATITUDE] = self._latitude attrs[ATTR_LONGITUDE] = self._longitude if data is not None and "status" in data: attrs[ATTR_IS_OPEN] = data["status"] == "open" return attrs
Chapter1/buylowsellhigh.py
buiksat/Learn-Algorithmic-Trading
449
133397
from pandas_datareader import data start_date = '2014-01-01' end_date = '2018-01-01' goog_data = data.DataReader('GOOG', 'yahoo', start_date, end_date) import numpy as np import pandas as pd goog_data_signal = pd.DataFrame(index=goog_data.index) goog_data_signal['price'] = goog_data['Adj Close'] goog_data_signal['daily_difference'] = goog_data_signal['price'].diff() goog_data_signal['signal'] = 0.0 goog_data_signal['signal'][:] = np.where(goog_data_signal['daily_difference'][:] > 0, 1.0, 0.0) goog_data_signal['positions'] = goog_data_signal['signal'].diff() import matplotlib.pyplot as plt fig = plt.figure() ax1 = fig.add_subplot(111, ylabel='Google price in $') goog_data_signal['price'].plot(ax=ax1, color='r', lw=2.) ax1.plot(goog_data_signal.loc[goog_data_signal.positions == 1.0].index, goog_data_signal.price[goog_data_signal.positions == 1.0], '^', markersize=5, color='m') ax1.plot(goog_data_signal.loc[goog_data_signal.positions == -1.0].index, goog_data_signal.price[goog_data_signal.positions == -1.0], 'v', markersize=5, color='k') #plt.show() # Set the initial capital initial_capital= float(1000.0) positions = pd.DataFrame(index=goog_data_signal.index).fillna(0.0) portfolio = pd.DataFrame(index=goog_data_signal.index).fillna(0.0) positions['GOOG'] = goog_data_signal['signal'] portfolio['positions'] = (positions.multiply(goog_data_signal['price'], axis=0)) portfolio['cash'] = initial_capital - (positions.diff().multiply(goog_data_signal['price'], axis=0)).cumsum() portfolio['total'] = portfolio['positions'] + portfolio['cash'] portfolio.plot() plt.show() fig = plt.figure() ax1 = fig.add_subplot(111, ylabel='Portfolio value in $') portfolio['total'].plot(ax=ax1, lw=2.) ax1.plot(portfolio.loc[goog_data_signal.positions == 1.0].index,portfolio.total[goog_data_signal.positions == 1.0],'^', markersize=10, color='m') ax1.plot(portfolio.loc[goog_data_signal.positions == -1.0].index,portfolio.total[goog_data_signal.positions == -1.0],'v', markersize=10, color='k') plt.show()
starfish/core/spots/FindSpots/test/test_local_max_peak_finder.py
haoxusci/starfish
164
133401
import sys import numpy as np from starfish import ImageStack from starfish.spots import FindSpots from starfish.types import Axes def test_lmpf_uniform_peak(): data_array = np.zeros(shape=(1, 1, 1, 100, 100), dtype=np.float32) data_array[0, 0, 0, 45:55, 45:55] = 1 imagestack = ImageStack.from_numpy(data_array) # standard local max peak finder, should find spots for all the evenly illuminated pixels. lmpf_no_kwarg = FindSpots.LocalMaxPeakFinder(1, 1, 1, sys.maxsize) peaks = lmpf_no_kwarg.run(imagestack) results_no_kwarg = peaks[{Axes.ROUND: 0, Axes.CH: 0}] assert len(results_no_kwarg.spot_attrs.data) == 100 # local max peak finder, capped at one peak per label. lmpf_kwarg = FindSpots.LocalMaxPeakFinder(1, 1, 1, sys.maxsize, num_peaks_per_label=1) peaks = lmpf_kwarg.run(imagestack) results_kwarg = peaks[{Axes.ROUND: 0, Axes.CH: 0}] assert len(results_kwarg.spot_attrs.data) == 1
mitmproxy/tls.py
ianklatzco/mitmproxy
24,939
133429
<filename>mitmproxy/tls.py import io from dataclasses import dataclass from typing import List, Optional, Tuple from kaitaistruct import KaitaiStream from OpenSSL import SSL from mitmproxy import connection from mitmproxy.contrib.kaitaistruct import tls_client_hello from mitmproxy.net import check from mitmproxy.proxy import context class ClientHello: """ A TLS ClientHello is the first message sent by the client when initiating TLS. """ raw_bytes: bytes """The raw ClientHello bytes as seen on the wire""" def __init__(self, raw_client_hello: bytes): """Create a TLS ClientHello object from raw bytes.""" self.raw_bytes = raw_client_hello self._client_hello = tls_client_hello.TlsClientHello( KaitaiStream(io.BytesIO(raw_client_hello)) ) @property def cipher_suites(self) -> List[int]: """The cipher suites offered by the client (as raw ints).""" return self._client_hello.cipher_suites.cipher_suites @property def sni(self) -> Optional[str]: """ The [Server Name Indication](https://en.wikipedia.org/wiki/Server_Name_Indication), which indicates which hostname the client wants to connect to. """ if self._client_hello.extensions: for extension in self._client_hello.extensions.extensions: is_valid_sni_extension = ( extension.type == 0x00 and len(extension.body.server_names) == 1 and extension.body.server_names[0].name_type == 0 and check.is_valid_host(extension.body.server_names[0].host_name) ) if is_valid_sni_extension: return extension.body.server_names[0].host_name.decode("ascii") return None @property def alpn_protocols(self) -> List[bytes]: """ The application layer protocols offered by the client as part of the [ALPN](https://en.wikipedia.org/wiki/Application-Layer_Protocol_Negotiation) TLS extension. """ if self._client_hello.extensions: for extension in self._client_hello.extensions.extensions: if extension.type == 0x10: return list(x.name for x in extension.body.alpn_protocols) return [] @property def extensions(self) -> List[Tuple[int, bytes]]: """The raw list of extensions in the form of `(extension_type, raw_bytes)` tuples.""" ret = [] if self._client_hello.extensions: for extension in self._client_hello.extensions.extensions: body = getattr(extension, "_raw_body", extension.body) ret.append((extension.type, body)) return ret def __repr__(self): return f"ClientHello(sni: {self.sni}, alpn_protocols: {self.alpn_protocols})" @dataclass class ClientHelloData: """ Event data for `tls_clienthello` event hooks. """ context: context.Context """The context object for this connection.""" client_hello: ClientHello """The entire parsed TLS ClientHello.""" ignore_connection: bool = False """ If set to `True`, do not intercept this connection and forward encrypted contents unmodified. """ establish_server_tls_first: bool = False """ If set to `True`, pause this handshake and establish TLS with an upstream server first. This makes it possible to process the server certificate when generating an interception certificate. """ @dataclass class TlsData: """ Event data for `tls_start_client`, `tls_start_server`, and `tls_handshake` event hooks. """ conn: connection.Connection """The affected connection.""" context: context.Context """The context object for this connection.""" ssl_conn: Optional[SSL.Connection] = None """ The associated pyOpenSSL `SSL.Connection` object. This will be set by an addon in the `tls_start_*` event hooks. """
web3/utils/empty.py
jsmeng324/web3.py
326
133471
<reponame>jsmeng324/web3.py class Empty(object): def __bool__(self): return False def __nonzero__(self): return False empty = Empty()
src/openue/models/model.py
ikutalilas/OpenUE
461
133498
import transformers as trans import torch import pytorch_lightning as pl from torch.nn import CrossEntropyLoss, MSELoss from transformers.models.auto.configuration_auto import AutoConfig from transformers import AutoTokenizer from openue.data.utils import get_labels_ner, get_labels_seq, OutputExample from typing import Dict class BertForRelationClassification(trans.BertPreTrainedModel): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.bert = trans.BertModel(config) self.relation_classification = torch.nn.Linear(config.hidden_size, config.num_labels) self.loss_fn = torch.nn.BCEWithLogitsLoss() self.init_weights() def forward( self, input_ids=None, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, inputs_embeds=None, label_ids_seq=None, output_attentions=None, output_hidden_states=None, return_dict=None, ): return_dict = return_dict if return_dict is not None else self.config.use_return_dict # import pdb; pdb.set_trace() outputs = self.bert( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = outputs[0] cls_output = sequence_output[:, 0, :] relation_output = self.relation_classification(cls_output) relation_output_sigmoid = torch.sigmoid(relation_output) if label_ids_seq is None: return (relation_output_sigmoid, relation_output, cls_output) else: loss = self.loss_fn(relation_output, label_ids_seq) return (loss, relation_output_sigmoid, relation_output, cls_output) def add_to_argparse(parser): parser.add_argument("--model_type", type=str, default="bert") class BertForNER(trans.BertPreTrainedModel): def __init__(self, config, **model_kwargs): super().__init__(config) self.num_labels = config.num_labels self.bert = trans.BertModel(config) self.dropout = torch.nn.Dropout(config.hidden_dropout_prob) self.token_classification = torch.nn.Linear(config.hidden_size, config.num_labels) self.init_weights() def forward( self, input_ids=None, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, inputs_embeds=None, # labels=None, output_attentions=None, output_hidden_states=None, return_dict=None, label_ids_seq=None, label_ids_ner=None ): return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.bert( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) # batch_size * 107 * hidden_size sequence_poolout_output = self.dropout(outputs[0]) # batch_size * 107 * 6 logits = self.token_classification(sequence_poolout_output) if label_ids_ner is None: return logits ,outputs[1] loss_fct = CrossEntropyLoss() # Only keep active parts of the loss if attention_mask is not None: active_loss = attention_mask.view(-1) == 1 active_logits = logits.view(-1, self.num_labels) active_labels = torch.where( active_loss, label_ids_ner.view(-1), torch.tensor(loss_fct.ignore_index).type_as(label_ids_ner) ) loss = loss_fct(active_logits, active_labels) else: loss = loss_fct(logits.view(-1, self.num_labels), label_ids_ner.view(-1)) # if not return_dict: output = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output def add_to_argparse(parser): parser.add_argument("--model_type", type=str, default="bert") class Inference(pl.LightningModule): """ input the text, return the triples """ def __init__(self, args): super().__init__() self.args = args # init the labels self._init_labels() self._init_models() self.mode = "event" if "event" in args.task_name else "triple" self.start_idx = self.tokenizer("[relation0]", add_special_tokens=False)['input_ids'][0] if self.mode == "event": self.process = self.event_process else: self.process = self.normal_process def _init_labels(self): self.labels_ner = get_labels_ner() self.label_map_ner: Dict[int, str] = {i: label for i, label in enumerate(self.labels_ner)} self.num_labels_ner = len(self.labels_ner) # 读取seq的label self.labels_seq = get_labels_seq(self.args) self.label_map_seq: Dict[int, str] = {i: label for i, label in enumerate(self.labels_seq)} self.num_labels_seq = len(self.labels_seq) def _init_models(self): model_name_or_path = self.args.seq_model_name_or_path config = AutoConfig.from_pretrained( model_name_or_path, num_labels=self.num_labels_seq, label2id={label: i for i, label in enumerate(self.labels_seq)}, ) self.model_seq = BertForRelationClassification.from_pretrained( model_name_or_path, config=config, ) model_name_or_path = self.args.ner_model_name_or_path # 读取待训练的ner模型 config = AutoConfig.from_pretrained( model_name_or_path, num_labels=self.num_labels_ner, id2label=self.label_map_ner, label2id={label: i for i, label in enumerate(self.labels_ner)}, ) self.model_ner = BertForNER.from_pretrained( model_name_or_path, config=config, ) self.tokenizer = AutoTokenizer.from_pretrained( model_name_or_path, use_fast=False, ) def forward(self, inputs): """ 两种方案,一种直接所有relation搞起来,一种使用动态batch size, 针对出现的relation进行forward 首先通过model_seq获得输入语句的类别标签,batch中每一个样本中含有的关系, 之后选择大于阈值(0.5)的关系,将其输入取出来得到[batch_size*num_relation, seq_length]的输入向量,以及每一个样本对应的关系数量, 将其增加了关系类别embedding之后,输入到model_ner中,得到input_ids中每一个token的类别,之后常规的实体识别。 """ # for k, v in inputs.items(): # if isinstance(v, torch.Tensor): # inputs[k] = v.to(self.device) inputs_seq = {'input_ids': inputs['input_ids'], 'token_type_ids': inputs['token_type_ids'], 'attention_mask': inputs['attention_mask'], } with torch.no_grad(): outputs_seq = self.model_seq(**inputs_seq) batch_size = inputs_seq['input_ids'].shape[0] num_relations = len(self.label_map_seq.keys()) max_length = inputs_seq['input_ids'].shape[1] # [batch_size, num_relation] relation_output_sigmoid = outputs_seq[0] # 多关系预测 mask_relation_output_sigmoid = relation_output_sigmoid > 0.5 # # 这个0.5是超参数,超参数 # 如果没有关系那就选一个最大概率的关系抽取。 for i in range(batch_size): if torch.sum(mask_relation_output_sigmoid[i]) == 0: max_relation_idx = torch.max(relation_output_sigmoid[i], dim=0)[1].item() mask_relation_output_sigmoid[i][max_relation_idx] = 1 mask_relation_output_sigmoid = mask_relation_output_sigmoid.long() # mask_output [batch_size*num_relation] 表示哪一个输入是需要的 mask_output = mask_relation_output_sigmoid.view(-1) # relation 特殊表示,需要拼接 input_ids :[SEP relation] attention_mask: [1 1] token_type_ids:[1 1] # relation_index shape : [batch_size, num_relations] relation_index = torch.arange(self.start_idx, self.start_idx+num_relations).to(self.device).expand(batch_size, num_relations) # 需要拼接的部分1:REL, 选取拼接的部分 [batch_size * xxx 不定] relation_ids = torch.masked_select(relation_index, mask_relation_output_sigmoid.bool()) # 需要拼接的部分2:SEP cat_sep = torch.full((relation_ids.shape[0], 1), 102).long().to(self.device) # 需要拼接的部分3:[1] cat_one = torch.full((relation_ids.shape[0], 1), 1).long().to(self.device) # 需要拼接的部4:[0] cat_zero = torch.full((relation_ids.shape[0], 1), 0).long().to(self.device) # 需要原来的input_ids 扩展到relation num维度。 input_ids_ner = torch.unsqueeze(inputs['input_ids'], 1) # [batch_size, 1, seq_length] # [batch_size, 50, max_length], 复制50份 input_ids_ner = input_ids_ner.expand(-1, len(self.label_map_seq.keys()), -1) # [batch_size * 50, max_length] input_ids_ner_reshape = input_ids_ner.reshape(batch_size * num_relations, max_length) # 选择预测正确的所有关系 mask = mask_output.unsqueeze(dim=1).expand(-1, max_length) # [batch_size * num_relations, max_length] # 选取了正确的input_ids input_ids = torch.masked_select(input_ids_ner_reshape, mask.bool()).view(-1, max_length) # n(选出来的关系数字) * max_length # n >> batch_size, 因为一句话中有多个关系 # 添加 sep relation_ids 需要增加的东西 input_ids = torch.cat((input_ids, cat_zero), 1) input_ids_ner = torch.cat((input_ids, cat_zero), 1) # 利用attention中1的求和的到rel_pos的位置 attention_mask_ner = torch.unsqueeze(inputs['attention_mask'], 1) # [batch_size, 50, max_length], 复制50份 attention_mask_ner = attention_mask_ner.expand(-1, len(self.label_map_seq.keys()), -1) # [batch_size * 50, max_length] attention_mask_ner_reshape = attention_mask_ner.reshape(batch_size * num_relations, max_length) # 选择预测正确的所有关系 tmp1 = mask_output.unsqueeze(dim=1) # [200, 1] mask = tmp1.expand(-1, max_length) # [200, 79] tmp2 = torch.masked_select(attention_mask_ner_reshape, mask.bool()) # n(选出来的关系数字) * max_length # n >> batch_size, 因为一句话中有多个关系 tmp3 = tmp2.view(-1, max_length) # 利用attention中1的求和的到rel_pos的位置 rel_pos = torch.sum(tmp3, dim=1) (rel_number_find, max_length_find) = input_ids_ner.shape one_hot = torch.sparse.torch.eye(max_length_find).long().to(self.device) rel_pos_mask = one_hot.index_select(0, rel_pos) rel_pos_mask_plus = one_hot.index_select(0, rel_pos+1) # 拼接input_ids的输入 input_ids_ner[rel_pos_mask.bool()] = relation_ids input_ids_ner[rel_pos_mask_plus.bool()] = cat_sep.squeeze() # 拼接token_type_ids的输入 token_type_ids_ner = torch.zeros(rel_number_find, max_length_find).to(self.device) token_type_ids_ner[rel_pos_mask.bool()] = 1 token_type_ids_ner[rel_pos_mask_plus.bool()] = 1 token_type_ids_ner = token_type_ids_ner.long() # 拼接attention_mask的输入 # 拼接 0 tmp4 = torch.cat((tmp3, cat_zero), dim=1) # 拼接 0 tmp5 = torch.cat((tmp4, cat_zero), dim=1) tmp5[rel_pos_mask.bool()] = 1 tmp5[rel_pos_mask_plus.bool()] = 1 attention_mask_ner_tmp = tmp5 inputs_ner = { 'input_ids': input_ids_ner, 'token_type_ids': token_type_ids_ner, 'attention_mask': attention_mask_ner_tmp, } try: outputs_ner = self.model_ner(**inputs_ner)[0] except BaseException: print('23') _, results = torch.max(outputs_ner, dim=2) results = results.cpu().tolist() results = [[self.label_map_ner[__] for __ in _] for _ in results] attention_position_np = rel_pos.cpu().numpy() attention_position_list = attention_position_np.tolist() predict_relation_list = relation_ids.long().tolist() input_ids_list = input_ids_ner.cpu().tolist() output = [] input_ids = [] for idx, result in enumerate(results): tmp1 = result[0: attention_position_list[idx]-1] tmp2 = input_ids_list[idx][0: attention_position_list[idx]-1] output.append(tmp1) input_ids.append(tmp2) input_split = torch.sum(mask_relation_output_sigmoid, dim=1) for i in range(1, batch_size): input_split[i] += input_split[i-1] tmp_input_ids = [input_ids[:input_split[0]]] tmp_output = [output[:input_split[0]]] for i in range(1, batch_size): tmp_input_ids.append(input_ids[input_split[i-1]:input_split[i]]) tmp_output.append(output[input_split[i-1]:input_split[i]]) output = tmp_output input_ids = tmp_input_ids # 将ner的句子转化为BIOES的标签之后把实体拿出来 # processed_results_list_BIO = [] # for result in processed_results_list: # processed_results_list_BIO.append([self.label_map_ner[token] for token in result]) # 把结果剥离出来 index = 0 triple_output = [[] for _ in range(batch_size)] # for each relation type or event type # by default, extract the first head and tail to construct the triples if self.mode == "triple": cnt = 0 for ids_list, BIOS_list in zip(input_ids, output): for ids, BIOS in zip(ids_list, BIOS_list): labels = self.process(ids, BIOS) # r = label_map_seq[predict_relation_list[index]] r = predict_relation_list[index] - self.start_idx if len(labels['subject']) == 0: h = None else: h = labels['subject'] # h = ''.join(tokenizer.convert_ids_to_tokens(h)) if len(labels['object']) == 0: t = None else: t = labels['object'] # t = ''.join(tokenizer.convert_ids_to_tokens(t)) # greedy select the head and tail if h and t: for hh in h: for tt in t: triple_output[cnt].append([hh, r, tt]) index = index + 1 cnt += 1 # 先不考虑 # elif self.mode == "event": # for ids, BIOS in zip(processed_input_ids_list, processed_results_list_BIO): # triple_output.append(dict(event_type=predict_relation_list[index], argument=self.process(ids, BIOS))) return triple_output @staticmethod def normal_process(text, result): index = 0 start = None labels = {} labels['subject'] = [] labels['object'] = [] indicator = '' for w, t in zip(text, result): # ["O", "B-SUB", "I-SUB", "B-OBJ", "I-OBJ", "Relation" if start is None: if t == 'B-SUB': start = index indicator = 'subject' elif t == 'B-OBJ': start = index indicator = 'object' else: # if t == 'I-SUB' or t == 'I-OBJ': # continue if t == "O": # print(result[start: index]) labels[indicator].append(text[start: index]) start = None index += 1 # print(labels) return labels @staticmethod def event_process(text, result): """ return List[Dict(text, label)] """ index = 0 start = None labels = [] indicator = '' for w, t in zip(text, result): # ["O", "B-SUB", "I-SUB", "B-OBJ", "I-OBJ", "Relation" if start is None: if "B-" in t: # get the label name indicator = t.split("-")[-1] start = index else: if t.split("-")[-1] != indicator or "B-" in t: # B-a I-b wrong, B-a B-a wrong start = None elif t == "O": # print(result[start: index]) labels.append(dict(text=text[start: index], label=indicator)) start = None index += 1 # print(labels) return labels @staticmethod def add_to_argparse(parser): parser.add_argument("--seq_model_name_or_path", type=str, default="seq_model") parser.add_argument("--ner_model_name_or_path", type=str, default="ner_model") return parser
tests/test_find_enrichment_allfmts.py
flying-sheep/goatools
477
133499
#!/usr/bin/env python3 """Test running an enrichment using any annotation file format.""" from __future__ import print_function __copyright__ = "Copyright (C) 2010-2019, <NAME>, <NAME>. All rights reserved." import os import itertools from goatools.base import get_godag from goatools.associations import dnld_annofile from goatools.anno.factory import get_objanno from goatools.gosubdag.gosubdag import GoSubDag from goatools.goea.go_enrichment_ns import GOEnrichmentStudyNS REPO = os.path.join(os.path.dirname(os.path.abspath(__file__)), "..") def test_find_enrichment(): """RUn an enrichments using all annotation file formats""" godag = get_godag("go-basic.obo", optional_attrs=['relationship']) gos = _get_enriched_goids('GO:0006959', godag) # GO IDs related to humoral response # pylint: disable=superfluous-parens print('- DOWNLOAD AND LOAD -----------------------------------------------') annoobjs = [ _get_objanno('gene2go', taxid=10090), _get_objanno('gene2go', taxid=9606), _get_objanno('goa_human.gaf'), _get_objanno('goa_human.gpad', godag=godag), _get_objanno('data/association', anno_type='id2gos', godag=godag), ] for obj in annoobjs: ns2assc = obj.get_ns2assc() pop = list(itertools.chain.from_iterable(ns2assc.values())) print('{N:6,} population IDs'.format(N=len(pop))) enriched = set(nt.DB_ID for nt in obj.associations if nt.GO_ID in gos) objgoeans = _get_objgoeans(pop, ns2assc, godag) results = objgoeans.run_study(enriched) print('{N} results'.format(N=len(results))) # Run one branch bp2assc = {'BP': ns2assc['BP']} objgoeabp = _get_objgoeans(pop, bp2assc, godag) results_bp = objgoeabp.run_study(enriched) print('{N} results'.format(N=len(results_bp))) print("TEST PASSED") def _get_objgoeans(pop, ns2assoc, godag): """Run gene ontology enrichment analysis (GOEA).""" return GOEnrichmentStudyNS(pop, ns2assoc, godag, propagate_counts=True, relationships=False, alpha=0.05, methods={'fdr_bh'}) def _get_enriched_goids(top, godag): """Get a set of GO IDs related to specified top term""" gosubdag = GoSubDag(None, godag, relationships=True) return {go for go, s in gosubdag.rcntobj.go2descendants.items() if top in s or top == go} def _get_objanno(fin_anno, anno_type=None, **kws): """Get association object""" full_anno = os.path.join(REPO, fin_anno) dnld_annofile(full_anno, anno_type) obj = get_objanno(full_anno, anno_type, **kws) return obj if __name__ == '__main__': test_find_enrichment() # Copyright (C) 2010-2019, <NAME>, <NAME>. All rights reserved.
retrieval.py
janghyuncho/PiCIE
113
133519
<filename>retrieval.py import os import sys import argparse import logging import time as t import numpy as np import torch import torch.nn as nn import torch.nn.functional as F from modules import fpn from commons import * from utils import * from train_picie import * def initialize_classifier(args, n_query, centroids): classifier = nn.Conv2d(args.in_dim, n_query, kernel_size=1, stride=1, padding=0, bias=False) classifier = nn.DataParallel(classifier) classifier = classifier.cuda() if centroids is not None: classifier.module.weight.data = centroids.unsqueeze(-1).unsqueeze(-1) freeze_all(classifier) return classifier def get_testloader(args): testset = EvalDataset(args.data_root, dataset=args.dataset, res=args.res1, split=args.val_type, mode='test', stuff=args.stuff, thing=args.thing) testloader = torch.utils.data.DataLoader(testset, batch_size=args.batch_size_eval, shuffle=False, num_workers=args.num_workers, pin_memory=True, collate_fn=collate_eval) return testloader def compute_dist(featmap, metric_function, euclidean_train=True): centroids = metric_function.module.weight.data if euclidean_train: return - (1 - 2*metric_function(featmap)\ + (centroids*centroids).sum(dim=1).unsqueeze(0)) # negative l2 squared else: return metric_function(featmap) def get_nearest_neighbors(n_query, dataloader, model, classifier, k=10): model.eval() classifier.eval() min_dsts = [[] for _ in range(n_query)] min_locs = [[] for _ in range(n_query)] min_imgs = [[] for _ in range(n_query)] with torch.no_grad(): for indice, image, label in dataloader: image = image.cuda(non_blocking=True) feats = model(image) feats = F.normalize(feats, dim=1, p=2) dists = compute_dist(feats, classifier) # (B x C x H x W) B, _, H, W = dists.shape for c in range(n_query): dst, idx = dists[:, c].flatten().topk(1) idx = idx.item() ib = idx//(H*W) ih = idx%(H*W)//W iw = idx%(H*W)%W if len(min_dsts[c]) < k: min_dsts[c].append(dst) min_locs[c].append((ib, ih, iw)) min_imgs[c].append(indice[ib]) elif dst < max(min_dsts[c]): imax = np.argmax(min_dsts[c]) min_dsts[c] = min_dsts[c][:imax] + min_dsts[c][imax+1:] min_locs[c] = min_locs[c][:imax] + min_locs[c][imax+1:] min_imgs[c] = min_imgs[c][:imax] + min_imgs[c][imax+1:] min_dsts[c].append(dst) min_locs[c].append((ib, ih, iw)) min_imgs[c].append(indice[ib]) loclist = min_locs dataset = dataloader.dataset imglist = [[dataset.transform_data(*dataset.load_data(dataset.imdb[i]), i, raw_image=True) for i in ids] for ids in min_imgs] return imglist, loclist if __name__ == '__main__': args = parse_arguments() # Use random seed. fix_seed_for_reproducability(args.seed) # Init model. model = fpn.PanopticFPN(args) model = nn.DataParallel(model) model = model.cuda() # Load weights. checkpoint = torch.load(args.eval_path) model.load_state_dict(checkpoint['state_dict']) # Init classifier (for eval only.) queries = torch.tensor(np.load('querys.npy')).cuda() classifier = initialize_classifier(args, queries.size(0), queries) # Prepare dataloader. dataset = get_dataset(args, mode='eval_test') dataloader = torch.utils.data.DataLoader(dataset, batch_size=args.batch_size_test, shuffle=False, num_workers=args.num_workers, pin_memory=True, collate_fn=collate_eval, worker_init_fn=worker_init_fn(args.seed)) # Retrieve 10-nearest neighbors. imglist, loclist = get_nearest_neighbors(queries.size(0), dataloader, model, classifier, k=args.K_test) # Save the result. torch.save([imglist, loclist], args.save_root + '/picie_retrieval_result_coco.pkl') print('-Done.', flush=True)
mimesis/data/int/text.py
chinghwayu/mimesis
2,619
133530
<reponame>chinghwayu/mimesis<filename>mimesis/data/int/text.py # -*- coding: utf-8 -*- """Provides all the data related to text.""" SAFE_COLORS = [ "#1abc9c", "#16a085", "#2ecc71", "#27ae60", "#3498db", "#2980b9", "#9b59b6", "#8e44ad", "#34495e", "#2c3e50", "#f1c40f", "#f39c12", "#e67e22", "#d35400", "#e74c3c", "#c0392b", "#ecf0f1", "#bdc3c7", "#95a5a6", "#7f8c8d", ]
vmoe/nn/external.py
google-research/vmoe
205
133535
# Copyright 2022 Google LLC. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """External models wrapped to work with the V-MoE codebase.""" import flax.linen as nn import ml_collections from vit_jax import models_mixer from vit_jax import models_vit class MlpMixer(models_mixer.MlpMixer): """Official implementation of the MLP-Mixer.""" deterministic: bool = False def __post_init__(self): # Note: The base class assumes that patches is a ConfigDict. self.patches = ml_collections.ConfigDict(self.patches) super().__post_init__() @nn.compact def __call__(self, inputs): return super().__call__(inputs, train=not self.deterministic), {} class VisionTransformer(models_vit.VisionTransformer): """Official implementation of the Vision Transformer.""" deterministic: bool = False def __post_init__(self): # Note: The base class assumes that patches and resnet are ConfigDicts. self.patches = ml_collections.ConfigDict(self.patches) if self.resnet is not None: self.resnet = ml_collections.ConfigDict(self.resnet) super().__post_init__() @nn.compact def __call__(self, inputs): return super().__call__(inputs, train=not self.deterministic), {}
vibora/router/parser.py
brettcannon/vibora
6,238
133576
import re from ..exceptions import RouteConfigurationError class PatternParser: PARAM_REGEX = re.compile(b'<.*?>') DYNAMIC_CHARS = bytearray(b'*?.[]()') CAST = { str: lambda x: x.decode('utf-8'), int: lambda x: int(x), float: lambda x: float(x) } @classmethod def validate_param_name(cls, name: bytes): # TODO: if b':' in name: raise RouteConfigurationError('Special characters are not allowed in param name. ' 'Use type hints in function parameters to cast the variable ' 'or regexes with named groups to ensure only a specific URL matches.') @classmethod def extract_params(cls, pattern: bytes) -> tuple: """ :param pattern: :return: """ params = [] new_pattern = pattern simplified_pattern = pattern groups = cls.PARAM_REGEX.findall(pattern) for group in groups: name = group[1:-1] # Removing <> chars cls.validate_param_name(name) simplified_pattern = simplified_pattern.replace(group, b'$' + name) params.append(name.decode()) new_pattern = new_pattern.replace(group, b'(?P<' + name + b'>[^/]+)') return re.compile(new_pattern), params, simplified_pattern @classmethod def is_dynamic_pattern(cls, pattern: bytes) -> bool: for index, char in enumerate(pattern): if char in cls.DYNAMIC_CHARS: if index > 0 and pattern[index - 1] == '\\': continue return True return False
FWCore/Framework/test/test_deepCall_unscheduled_fail_cfg.py
ckamtsikis/cmssw
852
133582
<reponame>ckamtsikis/cmssw import FWCore.ParameterSet.Config as cms process = cms.Process("TEST") import FWCore.Framework.test.cmsExceptionsFatalOption_cff process.options = cms.untracked.PSet( Rethrow = FWCore.Framework.test.cmsExceptionsFatalOption_cff.Rethrow ) process.maxEvents = cms.untracked.PSet( input = cms.untracked.int32(3) ) process.source = cms.Source("EmptySource", timeBetweenEvents = cms.untracked.uint64(10), firstTime = cms.untracked.uint64(1000000) ) process.Tracer = cms.Service("Tracer") process.result1 = cms.EDProducer("AddIntsProducer", labels = cms.VInputTag('one') ) process.result2 = cms.EDProducer("AddIntsProducer", labels = cms.VInputTag('result1', 'one') ) process.result4 = cms.EDProducer("AddIntsProducer", labels = cms.VInputTag('result2', 'result2') ) process.get = cms.EDAnalyzer("IntTestAnalyzer", valueMustMatch = cms.untracked.int32(4), moduleLabel = cms.untracked.InputTag('result4') ) process.t = cms.Task(process.result1, process.result2, process.result4) process.p = cms.Path(process.get, process.t)
examples/prof.py
RanganThaya/cvxpylayers
1,287
133586
<reponame>RanganThaya/cvxpylayers<filename>examples/prof.py #!/usr/bin/env python3 import argparse import sys import numpy as np import numpy.random as npr import itertools import time import torch from qpth.qp import QPFunction from cvxpylayers.torch.cvxpylayer import CvxpyLayer from scipy.linalg import sqrtm from scipy import sparse import cvxpy as cp import pandas as pd from IPython.core import ultratb sys.excepthook = ultratb.FormattedTB( mode='Verbose', color_scheme='Linux', call_pdb=1) def main(): parser = argparse.ArgumentParser() parser.add_argument('--nTrials', type=int, default=10) args = parser.parse_args() npr.seed(0) prof(args) def prof(args): trials = [] for nz, nbatch, cuda in itertools.product( [128], [128], [True, False]): print('--- {} vars/cons, batch size: {}, cuda: {} ---'.format( nz, nbatch, cuda)) for i in range(args.nTrials): print(' + Trial {}'.format(i)) t = prof_dense_qp(i, nz, nbatch, 'dense', cuda) trials += t print(t) for nz, nbatch, cuda in itertools.product( [1024], [32], [False]): print('--- {} vars/cons, batch size: {}, cuda: {} ---'.format( nz, nbatch, cuda)) for i in range(args.nTrials): print(' + Trial {}'.format(i)) t = prof_sparse_qp(i, nz, nbatch, None, cuda) trials += t print(t) df = pd.DataFrame(trials) df.to_csv('results.csv', index=False) def prof_sparse_qp(trial, nz, nbatch, cons, cuda=True): trials = [] npr.seed(trial) A = sparse.random(nz, nz, density=.01) + \ sparse.eye(nz) A_rows, A_cols = A.nonzero() G = sparse.random(nz, nz, density=.01) + \ sparse.eye(nz) G_rows, G_cols = G.nonzero() Q = sparse.eye(nz) xs = npr.randn(nbatch, nz) p = npr.randn(nbatch, nz) b = np.array([A @ xs[i] for i in range(nbatch)]) h = np.array([G @ xs[i] for i in range(nbatch)]) def convert(A): A = [A.todense() for _ in range(nbatch)] return torch.from_numpy(np.array(A)).double().requires_grad_() Q_tch, A_tch, G_tch = [convert(mat) for mat in [Q, A, G]] p_tch, b_tch, h_tch = [ torch.from_numpy(x).double().requires_grad_() for x in [p, b, h] ] if cuda: p_tch, Q_tch, G_tch, h_tch, A_tch, b_tch = [ x.cuda() for x in [p_tch, Q_tch, G_tch, h_tch, A_tch, b_tch]] torch.cuda.synchronize() torch.cuda.synchronize() start = time.time() x = QPFunction(verbose=False, eps=1e-8, notImprovedLim=5, maxIter=1000)(Q_tch, p_tch, G_tch, h_tch, A_tch, b_tch) torch.cuda.synchronize() t = time.time() - start trials.append({ 'trial': trial, 'nz': nz, 'nbatch': nbatch, 'cuda': cuda, 'mode': 'qpth', 'direction': 'forward', 'time': t, 'qp': 'sparse' }) y = x.sum() start = time.time() y.backward() t = time.time() - start trials.append({ 'trial': trial, 'nz': nz, 'nbatch': nbatch, 'cuda': cuda, 'mode': 'qpth', 'direction': 'backward', 'time': t, 'qp': 'sparse' }) _p = cp.Parameter((nz, 1)) _b = cp.Parameter((nz, 1)) _h = cp.Parameter((nz, 1)) _z = cp.Variable((nz, 1)) obj = cp.Minimize(0.5 * cp.sum_squares(_z) + _p.T @ _z) cons = [G @ _z <= _h, A @ _z == _b] prob = cp.Problem(obj, cons) p_tch, b_tch, h_tch = [torch.from_numpy(x).unsqueeze(-1).requires_grad_() for x in [p, b, h]] solver_args = { 'mode': 'lsqr', 'verbose': False, 'max_iters': 1000, 'eps': 1e-6, 'use_indirect': False, 'gpu': False, 'n_jobs_forward': -1, 'n_jobs_backward': -1 } solve = CvxpyLayer(prob, [_p, _b, _h], [_z]) start = time.time() z, = solve(p_tch, b_tch, h_tch, solver_args=solver_args) t = time.time() - start trials.append({ 'trial': trial, 'nz': nz, 'nbatch': nbatch, 'cuda': cuda, 'mode': 'cvxpylayers', 'direction': 'forward', 'time': t, 'qp': 'sparse', 'canon_time': solve.info.get("canon_time") }) y = z.sum() start = time.time() y.backward() t = time.time() - start trials.append({ 'trial': trial, 'nz': nz, 'nbatch': nbatch, 'cuda': cuda, 'mode': 'cvxpylayers', 'direction': 'backward', 'time': t, 'qp': 'sparse', 'dcanon_time': solve.info.get("dcanon_time") }) return trials def prof_dense_qp(trial, nz, nbatch, cons, cuda=True): trials = [] npr.seed(trial) L = npr.rand(nbatch, nz, nz) Q = np.matmul(L, L.transpose((0, 2, 1))) + 1e-3 * np.eye(nz, nz) p = npr.randn(nbatch, nz) if cons == 'dense': nineq = nz G = npr.randn(nbatch, nineq, nz) z0 = npr.randn(nbatch, nz) s0 = npr.rand(nbatch, nineq) h = np.matmul(G, np.expand_dims(z0, axis=(2))).squeeze(2) + s0 elif cons == 'box': nineq = 2 * nz G = np.concatenate((-np.eye(nz), np.eye(nz))) G = np.stack([G] * nbatch) h = np.ones((nbatch, 2 * nz)) else: raise NotImplementedError p_tch, Q_tch, G_tch, h_tch = [ torch.from_numpy(x).double().requires_grad_() for x in [p, Q, G, h] ] if cuda: p_tch, Q_tch, G_tch, h_tch = [x.cuda() for x in [p_tch, Q_tch, G_tch, h_tch]] e = torch.Tensor() torch.cuda.synchronize() torch.cuda.synchronize() start = time.time() x = QPFunction(verbose=False, eps=1e-8, notImprovedLim=5, maxIter=1000)(Q_tch, p_tch, G_tch, h_tch, e, e) torch.cuda.synchronize() t = time.time() - start trials.append({ 'trial': trial, 'nz': nz, 'nbatch': nbatch, 'cuda': cuda, 'mode': 'qpth', 'direction': 'forward', 'time': t, 'qp': 'dense' }) y = x.sum() start = time.time() y.backward() t = time.time() - start trials.append({ 'trial': trial, 'nz': nz, 'nbatch': nbatch, 'cuda': cuda, 'mode': 'qpth', 'direction': 'backward', 'time': t, 'qp': 'dense' }) _Q_sqrt = cp.Parameter((nz, nz)) _p = cp.Parameter((nz, 1)) _G = cp.Parameter((nineq, nz)) _h = cp.Parameter((nineq, 1)) _z = cp.Variable((nz, 1)) obj = cp.Minimize(0.5 * cp.sum_squares(_Q_sqrt @ _z) + _p.T @ _z) cons = [_G @ _z <= _h] prob = cp.Problem(obj, cons) Q_sqrt = np.array([sqrtm(q) for q in Q]) Q_sqrt_tch, p_tch, G_tch, h_tch = [ torch.from_numpy(x).double().requires_grad_() for x in [Q_sqrt, p, G, h]] solver_args = { 'mode': 'dense', 'verbose': False, 'max_iters': 1000, 'eps': 1e-6, 'use_indirect': False, 'gpu': False, 'n_jobs_forward': 12, 'n_jobs_backward': 12 } solve = CvxpyLayer(prob, [_Q_sqrt, _p, _G, _h], [_z]) start = time.time() z, = solve( Q_sqrt_tch, p_tch.unsqueeze(-1), G_tch, h_tch.unsqueeze(-1), solver_args=solver_args ) t = time.time() - start trials.append({ 'trial': trial, 'nz': nz, 'nbatch': nbatch, 'cuda': cuda, 'mode': f'cvxpylayers', 'direction': 'forward', 'time': t, 'qp': 'dense', 'canon_time': solve.info.get("canon_time") }) y = z.sum() start = time.time() y.backward() t = time.time() - start trials.append({ 'trial': trial, 'nz': nz, 'nbatch': nbatch, 'cuda': cuda, 'mode': f'cvxpylayers', 'direction': 'backward', 'time': t, 'qp': 'dense', 'dcanon_time': solve.info.get("dcanon_time") }) return trials if __name__ == '__main__': main()
pocsuite/pocsuite_attack.py
zx273983653/vulscan
582
133590
#!/usr/bin/env python # -*- coding: utf-8 -*- """ Copyright (c) 2014-2016 pocsuite developers (https://seebug.org) See the file 'docs/COPYING' for copying permission """ import sys from pocsuite_cli import pcsInit from .lib.core.common import banner from .lib.core.common import dataToStdout from .lib.core.settings import PCS_OPTIONS def main(): try: pocFile, targetUrl = sys.argv[1: 3] except ValueError: excMsg = "usage: pcs-attack [pocfile] [url]\n" excMsg += "pocsuite: error: too few arguments" dataToStdout(excMsg) sys.exit(1) PCS_OPTIONS.update( { 'url': targetUrl, 'pocFile': pocFile, 'headers': None, 'extra_params': None, 'host': None, 'Mode': 'attack', 'retry': None, 'delay': None, 'dork': None, 'vulKeyword': None, } ) pcsInit(PCS_OPTIONS) if __name__ == "__main__": main()
website/debug_toolbar/views.py
bopopescu/canvas
121
133618
""" Helper views for the debug toolbar. These are dynamically installed when the debug toolbar is displayed, and typically can do Bad Things, so hooking up these views in any other way is generally not advised. """ import os import django.views.static from django.conf import settings from django.db import connection from django.http import HttpResponseBadRequest from django.shortcuts import render_to_response from django.utils import simplejson from django.utils.hashcompat import sha_constructor class InvalidSQLError(Exception): def __init__(self, value): self.value = value def __str__(self): return repr(self.value) def debug_media(request, path): root = getattr(settings, 'DEBUG_TOOLBAR_MEDIA_ROOT', None) if root is None: parent = os.path.abspath(os.path.dirname(__file__)) root = os.path.join(parent, 'media', 'debug_toolbar') return django.views.static.serve(request, path, root) def sql_select(request): """ Returns the output of the SQL SELECT statement. Expected GET variables: sql: urlencoded sql with positional arguments params: JSON encoded parameter values duration: time for SQL to execute passed in from toolbar just for redisplay hash: the hash of (secret + sql + params) for tamper checking """ from debug_toolbar.panels.sql import reformat_sql sql = request.GET.get('sql', '') params = request.GET.get('params', '') hash = sha_constructor(settings.SECRET_KEY + sql + params).hexdigest() if hash != request.GET.get('hash', ''): return HttpResponseBadRequest('Tamper alert') # SQL Tampering alert if sql.lower().strip().startswith('select'): params = simplejson.loads(params) cursor = connection.cursor() cursor.execute(sql, params) headers = [d[0] for d in cursor.description] result = cursor.fetchall() cursor.close() context = { 'result': result, 'sql': reformat_sql(cursor.db.ops.last_executed_query(cursor, sql, params)), 'duration': request.GET.get('duration', 0.0), 'headers': headers, } return render_to_response('debug_toolbar/panels/sql_select.html', context) raise InvalidSQLError("Only 'select' queries are allowed.") def sql_explain(request): """ Returns the output of the SQL EXPLAIN on the given query. Expected GET variables: sql: urlencoded sql with positional arguments params: JSON encoded parameter values duration: time for SQL to execute passed in from toolbar just for redisplay hash: the hash of (secret + sql + params) for tamper checking """ from debug_toolbar.panels.sql import reformat_sql sql = request.GET.get('sql', '') params = request.GET.get('params', '') hash = sha_constructor(settings.SECRET_KEY + sql + params).hexdigest() if hash != request.GET.get('hash', ''): return HttpResponseBadRequest('Tamper alert') # SQL Tampering alert if sql.lower().strip().startswith('select'): params = simplejson.loads(params) cursor = connection.cursor() if settings.DATABASE_ENGINE == "sqlite3": # SQLite's EXPLAIN dumps the low-level opcodes generated for a query; # EXPLAIN QUERY PLAN dumps a more human-readable summary # See http://www.sqlite.org/lang_explain.html for details cursor.execute("EXPLAIN QUERY PLAN %s" % (sql,), params) else: cursor.execute("EXPLAIN %s" % (sql,), params) headers = [d[0] for d in cursor.description] result = cursor.fetchall() cursor.close() context = { 'result': result, 'sql': reformat_sql(cursor.db.ops.last_executed_query(cursor, sql, params)), 'duration': request.GET.get('duration', 0.0), 'headers': headers, } return render_to_response('debug_toolbar/panels/sql_explain.html', context) raise InvalidSQLError("Only 'select' queries are allowed.") def sql_profile(request): """ Returns the output of running the SQL and getting the profiling statistics. Expected GET variables: sql: urlencoded sql with positional arguments params: JSON encoded parameter values duration: time for SQL to execute passed in from toolbar just for redisplay hash: the hash of (secret + sql + params) for tamper checking """ from debug_toolbar.panels.sql import reformat_sql sql = request.GET.get('sql', '') params = request.GET.get('params', '') hash = sha_constructor(settings.SECRET_KEY + sql + params).hexdigest() if hash != request.GET.get('hash', ''): return HttpResponseBadRequest('Tamper alert') # SQL Tampering alert if sql.lower().strip().startswith('select'): params = simplejson.loads(params) cursor = connection.cursor() result = None headers = None result_error = None try: cursor.execute("SET PROFILING=1") # Enable profiling cursor.execute(sql, params) # Execute SELECT cursor.execute("SET PROFILING=0") # Disable profiling # The Query ID should always be 1 here but I'll subselect to get the last one just in case... cursor.execute("SELECT * FROM information_schema.profiling WHERE query_id=(SELECT query_id FROM information_schema.profiling ORDER BY query_id DESC LIMIT 1)") headers = [d[0] for d in cursor.description] result = cursor.fetchall() except: result_error = "Profiling is either not available or not supported by your database." cursor.close() context = { 'result': result, 'result_error': result_error, 'sql': reformat_sql(cursor.db.ops.last_executed_query(cursor, sql, params)), 'duration': request.GET.get('duration', 0.0), 'headers': headers, } return render_to_response('debug_toolbar/panels/sql_profile.html', context) raise InvalidSQLError("Only 'select' queries are allowed.") def template_source(request): """ Return the source of a template, syntax-highlighted by Pygments if it's available. """ from django.template import TemplateDoesNotExist from django.utils.safestring import mark_safe from django.conf import settings template_name = request.GET.get('template', None) if template_name is None: return HttpResponseBadRequest('"template" key is required') try: # Django 1.2 ... from django.template.loader import find_template_loader, make_origin loaders = [] for loader_name in settings.TEMPLATE_LOADERS: loader = find_template_loader(loader_name) if loader is not None: loaders.append(loader) for loader in loaders: try: source, display_name = loader.load_template_source(template_name) origin = make_origin(display_name, loader, template_name, settings.TEMPLATE_DIRS) break except TemplateDoesNotExist: source = "Template Does Not Exist: %s" % (template_name,) except (ImportError, AttributeError): # Django 1.1 ... from django.template.loader import find_template_source source, origin = find_template_source(template_name) try: from pygments import highlight from pygments.lexers import HtmlDjangoLexer from pygments.formatters import HtmlFormatter source = highlight(source, HtmlDjangoLexer(), HtmlFormatter()) source = mark_safe(source) source.pygmentized = True except ImportError: pass return render_to_response('debug_toolbar/panels/template_source.html', { 'source': source, 'template_name': template_name })
tests/json_schema/test_draft07.py
StephenNneji/python-fastjsonschema
300
133638
import pytest from .utils import template_test, resolve_param_values_and_ids def pytest_generate_tests(metafunc): param_values, param_ids = resolve_param_values_and_ids( schema_version='http://json-schema.org/draft-07/schema', suite_dir='JSON-Schema-Test-Suite/tests/draft7', ignored_suite_files=[ # Optional. 'ecmascript-regex.json', 'idn-hostname.json', 'iri.json', ], ) metafunc.parametrize(['schema_version', 'schema', 'data', 'is_valid'], param_values, ids=param_ids) # Real test function to be used with parametrization by previous hook function. test = template_test
runners/old/rltools/run_con_hostage.py
SurvivorT/SRTP
489
133658
#!/usr/bin/env python # # File: run_con_hostage.py # # Created: Sunday, August 14 2016 by rejuvyesh <<EMAIL>> # from __future__ import absolute_import, print_function import argparse import json import numpy as np import tensorflow as tf from gym import spaces import rltools.algos.policyopt import rltools.log import rltools.util from rltools.samplers.serial import SimpleSampler, ImportanceWeightedSampler, DecSampler from rltools.samplers.parallel import ThreadedSampler, ParallelSampler from madrl_environments import ObservationBuffer from madrl_environments.hostage import ContinuousHostageWorld from rltools.baselines.linear import LinearFeatureBaseline from rltools.baselines.mlp import MLPBaseline from rltools.baselines.zero import ZeroBaseline from rltools.policy.gaussian import GaussianMLPPolicy from runners.archs import * def main(): parser = argparse.ArgumentParser() parser.add_argument('--discount', type=float, default=0.95) parser.add_argument('--gae_lambda', type=float, default=0.99) parser.add_argument('--interp_alpha', type=float, default=0.5) parser.add_argument('--policy_avg_weights', type=str, default='0.3333333,0.3333333,0.3333333') parser.add_argument('--n_iter', type=int, default=250) parser.add_argument('--sampler', type=str, default='simple') parser.add_argument('--sampler_workers', type=int, default=4) parser.add_argument('--max_traj_len', type=int, default=500) parser.add_argument('--adaptive_batch', action='store_true', default=False) parser.add_argument('--n_timesteps', type=int, default=8000) parser.add_argument('--n_timesteps_min', type=int, default=1000) parser.add_argument('--n_timesteps_max', type=int, default=64000) parser.add_argument('--timestep_rate', type=int, default=20) parser.add_argument('--is_n_backtrack', type=int, default=1) parser.add_argument('--is_randomize_draw', action='store_true', default=False) parser.add_argument('--is_n_pretrain', type=int, default=0) parser.add_argument('--is_skip_is', action='store_true', default=False) parser.add_argument('--is_max_is_ratio', type=float, default=0) parser.add_argument('--buffer_size', type=int, default=1) parser.add_argument('--n_good', type=int, default=3) parser.add_argument('--n_hostage', type=int, default=5) parser.add_argument('--n_bad', type=int, default=5) parser.add_argument('--n_coop_save', type=int, default=2) parser.add_argument('--n_coop_avoid', type=int, default=2) parser.add_argument('--n_sensors', type=int, default=20) parser.add_argument('--sensor_range', type=float, default=0.2) parser.add_argument('--save_reward', type=float, default=3) parser.add_argument('--hit_reward', type=float, default=-1) parser.add_argument('--encounter_reward', type=float, default=0.01) parser.add_argument('--bomb_reward', type=float, default=-10.) parser.add_argument('--policy_hidden_spec', type=str, default=GAE_ARCH) parser.add_argument('--min_std', type=float, default=0) parser.add_argument('--blend_freq', type=int, default=20) parser.add_argument('--baseline_type', type=str, default='mlp') parser.add_argument('--baseline_hidden_spec', type=str, default=GAE_ARCH) parser.add_argument('--max_kl', type=float, default=0.01) parser.add_argument('--vf_max_kl', type=float, default=0.01) parser.add_argument('--vf_cg_damping', type=float, default=0.01) parser.add_argument('--save_freq', type=int, default=20) parser.add_argument('--log', type=str, required=False) parser.add_argument('--tblog', type=str, default='/tmp/madrl_tb') parser.add_argument('--debug', dest='debug', action='store_true') parser.add_argument('--no-debug', dest='debug', action='store_false') parser.set_defaults(debug=True) args = parser.parse_args() policy_avg_weights = np.array(map(float, args.policy_avg_weights.split(','))) assert len(policy_avg_weights) == args.n_good env = ContinuousHostageWorld(args.n_good, args.n_hostage, args.n_bad, args.n_coop_save, args.n_coop_avoid, n_sensors=args.n_sensors, sensor_range=args.sensor_range, save_reward=args.save_reward, hit_reward=args.hit_reward, encounter_reward=args.encounter_reward, bomb_reward=args.bomb_reward) if args.buffer_size > 1: env = ObservationBuffer(env, args.buffer_size) policies = [GaussianMLPPolicy(agent.observation_space, agent.action_space, hidden_spec=args.policy_hidden_spec, enable_obsnorm=True, min_stdev=args.min_std, init_logstdev=0., tblog=args.tblog, varscope_name='gaussmlp_policy_{}'.format(agid)) for agid, agent in enumerate(env.agents)] if args.blend_freq: assert all( [agent.observation_space == env.agents[0].observation_space for agent in env.agents]) target_policy = GaussianMLPPolicy(env.agents[0].observation_space, env.agents[0].action_space, hidden_spec=args.policy_hidden_spec, enable_obsnorm=True, min_stdev=0., init_logstdev=0., tblog=args.tblog, varscope_name='targetgaussmlp_policy') else: target_policy = None if args.baseline_type == 'linear': baselines = [LinearFeatureBaseline(agent.observation_space, enable_obsnorm=True, varscope_name='linear_baseline_{}'.format(agid)) for agid, agent in enumerate(env.agents)] elif args.baseline_type == 'mlp': baselines = [MLPBaseline(agent.observation_space, args.baseline_hidden_spec, enable_obsnorm=True, enable_vnorm=True, max_kl=args.vf_max_kl, damping=args.vf_cg_damping, time_scale=1. / args.max_traj_len, varscope_name='mlp_baseline_{}'.format(agid)) for agid, agent in enumerate(env.agents)] else: baselines = [ZeroBaseline(agent.observation_space) for agent in env.agents] if args.sampler == 'parallel': sampler_cls = ParallelSampler sampler_args = dict(max_traj_len=args.max_traj_len, n_timesteps=args.n_timesteps, n_timesteps_min=args.n_timesteps_min, n_timesteps_max=args.n_timesteps_max, timestep_rate=args.timestep_rate, adaptive=args.adaptive_batch, enable_rewnorm=True, n_workers=args.sampler_workers, mode='concurrent') else: raise NotImplementedError() step_func = rltools.algos.policyopt.TRPO(max_kl=args.max_kl) popt = rltools.algos.policyopt.ConcurrentPolicyOptimizer( env=env, policies=policies, baselines=baselines, step_func=step_func, discount=args.discount, gae_lambda=args.gae_lambda, sampler_cls=sampler_cls, sampler_args=sampler_args, n_iter=args.n_iter, target_policy=target_policy, weights=policy_avg_weights, interp_alpha=args.interp_alpha) argstr = json.dumps(vars(args), separators=(',', ':'), indent=2) rltools.util.header(argstr) log_f = rltools.log.TrainingLog(args.log, [('args', argstr)], debug=args.debug) with tf.Session() as sess: sess.run(tf.initialize_all_variables()) popt.train(sess, log_f, args.blend_freq, args.save_freq) if __name__ == '__main__': main()
how-to-use-azureml/ml-frameworks/scikit-learn/train-hyperparameter-tune-deploy-with-sklearn/train_iris.py
lobrien/MachineLearningNotebooks
3,074
133666
<gh_stars>1000+ # Modified from https://www.geeksforgeeks.org/multiclass-classification-using-scikit-learn/ import argparse import os # importing necessary libraries import numpy as np from sklearn import datasets from sklearn.metrics import confusion_matrix from sklearn.model_selection import train_test_split import joblib from azureml.core.run import Run run = Run.get_context() def main(): parser = argparse.ArgumentParser() parser.add_argument('--kernel', type=str, default='linear', help='Kernel type to be used in the algorithm') parser.add_argument('--penalty', type=float, default=1.0, help='Penalty parameter of the error term') args = parser.parse_args() run.log('Kernel type', np.str(args.kernel)) run.log('Penalty', np.float(args.penalty)) # loading the iris dataset iris = datasets.load_iris() # X -> features, y -> label X = iris.data y = iris.target # dividing X, y into train and test data X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0) # training a linear SVM classifier from sklearn.svm import SVC svm_model_linear = SVC(kernel=args.kernel, C=args.penalty).fit(X_train, y_train) svm_predictions = svm_model_linear.predict(X_test) # model accuracy for X_test accuracy = svm_model_linear.score(X_test, y_test) print('Accuracy of SVM classifier on test set: {:.2f}'.format(accuracy)) run.log('Accuracy', np.float(accuracy)) # creating a confusion matrix cm = confusion_matrix(y_test, svm_predictions) print(cm) os.makedirs('outputs', exist_ok=True) # files saved in the "outputs" folder are automatically uploaded into run history joblib.dump(svm_model_linear, 'outputs/model.joblib') if __name__ == '__main__': main()
py/torch_tensorrt/_enums.py
svenchilton/Torch-TensorRT
430
133684
from torch_tensorrt._C import dtype, DeviceType, EngineCapability, TensorFormat
tests/test_plotting/test_matplotlib/test_plot_3d.py
RasaHQ/whatlies
325
133687
<gh_stars>100-1000 import pytest import numpy as np from whatlies.language import SpacyLanguage from whatlies.transformers import Pca words = [ "prince", "princess", "nurse", "doctor", "banker", "man", "woman", "cousin", "neice", "king", "queen", "dude", "guy", "gal", "fire", "dog", "cat", "mouse", "red", "blue", "green", "yellow", "water", "person", "family", "brother", "sister", ] # I'm loading in the spaCy model globally because it is much faster this way. lang = SpacyLanguage("en_core_web_md") @pytest.fixture def embset(): return lang[words] def test_set_title_works(embset): ax = embset.plot_3d(annot=True, title="foobar") assert ax.title._text == "foobar" def test_correct_points_plotted(embset): embset_plt = embset.transform(Pca(3)) ax = embset_plt.plot_3d(annot=True) offset = ax.collections[0]._offsets3d assert np.all(np.array(offset).T == embset_plt.to_X()) def test_correct_points_plotted_mapped(embset): embset_plt = embset.transform(Pca(3)) ax = embset_plt.plot_3d("king", "red", "dog", annot=True) offset = ax.collections[0]._offsets3d king, red, dog = [v for v in np.array(offset)] assert np.all(king == np.array([embset_plt[w] > embset_plt["king"] for w in words])) assert np.all(red == np.array([embset_plt[w] > embset_plt["red"] for w in words])) assert np.all(dog == np.array([embset_plt[w] > embset_plt["dog"] for w in words])) def test_basic_dimensions_3d_chart(embset): embset_plt = embset.transform(Pca(3)) ax = embset_plt.plot_3d(annot=True, title="foobar") assert ax.xaxis.get_label_text() == "Dimension 0" assert ax.yaxis.get_label_text() == "Dimension 1" assert ax.zaxis.get_label_text() == "Dimension 2" assert [t.get_text() for t in ax.texts] == words def test_named_dimensions_3d_chart(embset): ax = embset.transform(Pca(3)).plot_3d("king", "queen", "prince", annot=True) assert ax.xaxis.get_label_text() == "king" assert ax.yaxis.get_label_text() == "queen" assert ax.zaxis.get_label_text() == "prince" assert [t.get_text() for t in ax.texts] == words def test_named_dimensions_3d_chart_rename(embset): ax = embset.transform(Pca(3)).plot_3d( "king", "queen", "prince", annot=True, x_label="x", y_label="y" ) assert ax.xaxis.get_label_text() == "x" assert ax.yaxis.get_label_text() == "y" assert ax.zaxis.get_label_text() == "prince" assert [t.get_text() for t in ax.texts] == words
tests/unit/test_seeds.py
joye1503/cocrawler
166
133692
<reponame>joye1503/cocrawler<filename>tests/unit/test_seeds.py import cocrawler.seeds as seeds def test_special_seed_handling(): specialsh = seeds.special_seed_handling assert specialsh('foo') == 'http://foo' assert specialsh('//foo/') == 'http://foo/' assert specialsh('https://foo') == 'https://foo' #assert specialsh('mailto:foo') == 'mailto:foo'
language/emql/util_test.py
urikz/language
1,199
133704
# coding=utf-8 # Copyright 2018 The Google AI Language Team Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Lint as: python3 """Tests for emql.util.""" from language.emql import util import numpy as np import tensorflow.compat.v1 as tf class UtilTest(tf.test.TestCase): def setUp(self): super(UtilTest, self).setUp() self.sess = tf.Session() self.logits = tf.constant([[2, 3, 1, -1], [4, 1, 9, 3]], dtype=tf.float32) self.labels = tf.constant([[0, 1, 0, 1], [1, 0, 0, 0]], dtype=tf.float32) def test_hits_at_k(self): hits_at_one = util.compute_hits_at_k(self.logits, self.labels, k=1) hits_at_two = util.compute_hits_at_k(self.logits, self.labels, k=2) self.assertAllEqual( hits_at_one.eval(session=self.sess), np.array([1, 0])) self.assertAllEqual( hits_at_two.eval(session=self.sess), np.array([1, 1])) def test_recall_at_k(self): recall_at_one = util.compute_recall_at_k(self.logits, self.labels, k=1) recall_at_two = util.compute_recall_at_k(self.logits, self.labels, k=2) self.assertAllEqual( recall_at_one.eval(session=self.sess), np.array([0.5, 0])) self.assertAllEqual( recall_at_two.eval(session=self.sess), np.array([0.5, 1])) def test_map_at_k(self): map_at_one = util.compute_average_precision_at_k( self.logits, self.labels, k=1) map_at_two = util.compute_average_precision_at_k( self.logits, self.labels, k=2) self.assertAllEqual( map_at_one.eval(session=self.sess), np.array([1.0, 0.0])) self.assertAllEqual( map_at_two.eval(session=self.sess), np.array([1.0, 0.5])) def test_get_nonzero_ids(self): nonzero_at_one = util.get_nonzero_ids(self.labels, k=1) nonzero_at_two = util.get_nonzero_ids(self.labels, k=2) self.assertAllEqual( nonzero_at_one.eval(session=self.sess), np.array([[1], [0]])) self.assertAllEqual( nonzero_at_two.eval(session=self.sess), np.array([[1, 3], [0, -1]])) def test_embedding_lookup_with_padding(self): tokens = tf.constant([0, -1], dtype=tf.int32) embeddings_mat = tf.constant([[0.1, 0.2, 0.3], [0.2, 0.3, 0.4]]) embs = util.embedding_lookup_with_padding( embeddings_mat, tokens, padding=-1) embs_np = embs.eval(session=self.sess) # 2, 3 self.assertAllClose(embs_np[0, :], [0.1, 0.2, 0.3]) self.assertAllClose(embs_np[1, :], [0, 0, 0]) def test_x_in_set(self): x = tf.constant([[1, 2], [3, 4]], dtype=tf.int32) s = tf.constant([[1, 2, 5], [4, 7, 8]], dtype=tf.int32) _, x_in_s = util.compute_x_in_set(x, s) self.assertAllEqual( x_in_s.eval(session=self.sess), np.array([[1, 1], [0, 1]])) def test_bert_tokenizer(self): text = '<NAME>' bert_tokenizer = util.BertTokenizer() _, (token_ids, _, input_mask) = bert_tokenizer.tokenize(text) self.assertGreater(np.sum(input_mask), 2) self.assertAllEqual(token_ids != 0, input_mask) self.assertEqual(len(token_ids), bert_tokenizer.max_seq_length) if __name__ == '__main__': tf.test.main()
designate/tests/unit/metrics/test_metrics.py
mrlesmithjr/designate
145
133720
<filename>designate/tests/unit/metrics/test_metrics.py<gh_stars>100-1000 # # Copyright (C) 2016 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # import time from unittest import mock import monascastatsd from oslo_config import cfg from oslo_config import fixture as cfg_fixture from designate import metrics from designate.metrics_client import noop from designate.tests import fixtures from designate.tests import TestCase class TestNoopMetrics(TestCase): def setUp(self): super(TestCase, self).setUp() self.stdlog = fixtures.StandardLogging() self.useFixture(self.stdlog) self.CONF = self.useFixture(cfg_fixture.Config(cfg.CONF)).conf self.CONF.set_override('enabled', False, 'monasca:statsd') def test_monasca_metrics_disabled(self): self.metrics = metrics.Metrics() self.assertIsInstance(self.metrics.client, noop.Client) self.assertIn('Statsd disabled', self.stdlog.logger.output) def test_noop_metrics_client_getters(self): self.metrics = metrics.Metrics() self.assertIsInstance(self.metrics.counter('name'), noop.NoopCounter) self.assertIsInstance(self.metrics.gauge(), noop.NoopGauge) self.assertIsInstance(self.metrics.timer(), noop.NoopTimer) self.assertIsNotNone(self.metrics.timer.__self__) def test_noop_metrics_client_timed(self): self.metrics = metrics.Metrics() timer = self.metrics.client.get_timer() def func(a): start_time = time.time() try: return a finally: timer.timing('mdns.xfr.zone_sync', time.time() - start_time) result = func(1) self.assertEqual(result, 1) class TestMonascaMetrics(TestCase): def setUp(self): super(TestCase, self).setUp() self.stdlog = fixtures.StandardLogging() self.useFixture(self.stdlog) self.CONF = self.useFixture(cfg_fixture.Config(cfg.CONF)).conf self.CONF.set_override('enabled', True, 'monasca:statsd') @mock.patch('socket.socket.connect') def test_monasca_metrics_enabled(self, conn_mock): self.metrics = metrics.Metrics() self.assertIsInstance(self.metrics.client, monascastatsd.client.Client) self.assertIn('Statsd reports to 127.0.0.1:8125', self.stdlog.logger.output) self.assertTrue(conn_mock.called) @mock.patch('socket.socket.connect') def test_monasca_metrics_client_getters(self, conn_mock): self.metrics = metrics.Metrics() self.assertIsInstance(self.metrics.counter('name'), monascastatsd.counter.Counter) self.assertIsInstance(self.metrics.gauge(), monascastatsd.gauge.Gauge) self.assertIsInstance(self.metrics.timer(), monascastatsd.timer.Timer) self.assertIsNotNone(self.metrics.timer.__self__) self.assertTrue(conn_mock.called) @mock.patch('socket.socket.send') @mock.patch('socket.socket.connect') def test_monasca_metrics_client_timed(self, conn_mock, send_mock): self.metrics = metrics.Metrics() timer = self.metrics.client.get_timer() def func(a): start_time = time.time() try: return a finally: timer.timing('mdns.xfr.zone_sync', time.time() - start_time) result = func(1) self.assertEqual(result, 1) self.assertTrue(conn_mock.called) self.assertTrue(send_mock.called) def test_monasca_enabled_but_client_not_installed(self): restore = metrics.monascastatsd try: metrics.monascastatsd = None self.metrics = metrics.Metrics() self.assertIsInstance(self.metrics.client, noop.Client) self.assertIn( 'monasca-statsd client not installed. ' 'Metrics will be ignored.', self.stdlog.logger.output ) finally: metrics.monascastatsd = restore
tests/autocomplete.py
silviogutierrez/reactivated
178
133721
import pytest from reactivated import forms from sample.server.apps.samples import models @pytest.mark.django_db @pytest.mark.urls("tests.urls") def test_autocomplete(client): composer = models.Composer.objects.create(name="<NAME>") models.Composer.objects.create(name="<NAME>") assert client.get("/autocomplete-view/").status_code == 200 assert ( client.post( "/autocomplete-view/", {"name": "Zarzuela", "style": "BUFFA", "composer": composer.pk}, ).status_code == 302 ) response = client.get( "/autocomplete-view/", {"autocomplete": "name", "query": "Wagner"} ) assert "Rendered form" in str(response.content) response = client.get( "/autocomplete-view/", {"autocomplete": "composer", "query": "Wagner"} ) assert response.json()["results"][0]["label"] == "<NAME>" @pytest.mark.django_db @pytest.mark.urls("tests.urls") def test_invalid_value(client): response = client.post( "/autocomplete-view/", {"name": "Zarzuela", "composer": "21s7"} ) assert "Select a valid choice" in response.context["form"].errors["composer"][0] assert response.context["form"]["composer"].value() == "21s7" @pytest.mark.django_db @pytest.mark.urls("tests.urls") def test_typed_autocomplete(client): composer = models.Composer.objects.create(name="<NAME>") models.Composer.objects.create(name="<NAME>") assert client.get("/typed-autocomplete-view/").status_code == 200 assert ( client.post( "/typed-autocomplete-view/", {"name": "Zarzuela", "composer": composer.pk} ).status_code == 302 ) response = client.get( "/typed-autocomplete-view/", {"autocomplete": "name", "query": "Wagner"} ) assert "" in str(response.content) response = client.get( "/typed-autocomplete-view/", {"autocomplete": "composer", "query": "Wagner"} ) assert response.json()["results"][0]["label"] == "<NAME>" def test_prefix_calculation(client): assert forms.get_form_or_form_set_descriptor("opera_form_set-0-composer_field") == ( "opera_form_set", "composer_field", ) assert forms.get_form_or_form_set_descriptor("opera_form-composer_field") == ( "opera_form", "composer_field", ) assert forms.get_form_or_form_set_descriptor("composer_field") == ( None, "composer_field", )
src/storage-blob-preview/azext_storage_blob_preview/vendored_sdks/azure_mgmt_storage/v2021_01_01/models/_storage_management_client_enums.py
Mannan2812/azure-cli-extensions
2,728
133743
# coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- from enum import Enum, EnumMeta from six import with_metaclass class _CaseInsensitiveEnumMeta(EnumMeta): def __getitem__(self, name): return super().__getitem__(name.upper()) def __getattr__(cls, name): """Return the enum member matching `name` We use __getattr__ instead of descriptors or inserting into the enum class' __dict__ in order to support `name` and `value` being both properties for enum members (which live in the class' __dict__) and enum members themselves. """ try: return cls._member_map_[name.upper()] except KeyError: raise AttributeError(name) class AccessTier(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): """Required for storage accounts where kind = BlobStorage. The access tier used for billing. """ HOT = "Hot" COOL = "Cool" class AccountStatus(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): """Gets the status indicating whether the primary location of the storage account is available or unavailable. """ AVAILABLE = "available" UNAVAILABLE = "unavailable" class BlobInventoryPolicyName(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): DEFAULT = "default" class BlobRestoreProgressStatus(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): """The status of blob restore progress. Possible values are: - InProgress: Indicates that blob restore is ongoing. - Complete: Indicates that blob restore has been completed successfully. - Failed: Indicates that blob restore is failed. """ IN_PROGRESS = "InProgress" COMPLETE = "Complete" FAILED = "Failed" class Bypass(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): """Specifies whether traffic is bypassed for Logging/Metrics/AzureServices. Possible values are any combination of Logging|Metrics|AzureServices (For example, "Logging, Metrics"), or None to bypass none of those traffics. """ NONE = "None" LOGGING = "Logging" METRICS = "Metrics" AZURE_SERVICES = "AzureServices" class CorsRuleAllowedMethodsItem(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): DELETE = "DELETE" GET = "GET" HEAD = "HEAD" MERGE = "MERGE" POST = "POST" OPTIONS = "OPTIONS" PUT = "PUT" class CreatedByType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): """The type of identity that created the resource. """ USER = "User" APPLICATION = "Application" MANAGED_IDENTITY = "ManagedIdentity" KEY = "Key" class DefaultAction(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): """Specifies the default action of allow or deny when no other rules match. """ ALLOW = "Allow" DENY = "Deny" class DirectoryServiceOptions(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): """Indicates the directory service used. """ NONE = "None" AADDS = "AADDS" AD = "AD" class EnabledProtocols(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): """The authentication protocol that is used for the file share. Can only be specified when creating a share. """ SMB = "SMB" NFS = "NFS" class EncryptionScopeSource(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): """The provider for the encryption scope. Possible values (case-insensitive): Microsoft.Storage, Microsoft.KeyVault. """ MICROSOFT_STORAGE = "Microsoft.Storage" MICROSOFT_KEY_VAULT = "Microsoft.KeyVault" class EncryptionScopeState(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): """The state of the encryption scope. Possible values (case-insensitive): Enabled, Disabled. """ ENABLED = "Enabled" DISABLED = "Disabled" class ExtendedLocationTypes(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): """The type of extendedLocation. """ EDGE_ZONE = "EdgeZone" class GeoReplicationStatus(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): """The status of the secondary location. Possible values are: - Live: Indicates that the secondary location is active and operational. - Bootstrap: Indicates initial synchronization from the primary location to the secondary location is in progress.This typically occurs when replication is first enabled. - Unavailable: Indicates that the secondary location is temporarily unavailable. """ LIVE = "Live" BOOTSTRAP = "Bootstrap" UNAVAILABLE = "Unavailable" class HttpProtocol(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): """The protocol permitted for a request made with the account SAS. """ HTTPS_HTTP = "https,http" HTTPS = "https" class IdentityType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): """The identity type. """ NONE = "None" SYSTEM_ASSIGNED = "SystemAssigned" USER_ASSIGNED = "UserAssigned" SYSTEM_ASSIGNED_USER_ASSIGNED = "SystemAssigned,UserAssigned" class ImmutabilityPolicyState(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): """The ImmutabilityPolicy state of a blob container, possible values include: Locked and Unlocked. """ LOCKED = "Locked" UNLOCKED = "Unlocked" class ImmutabilityPolicyUpdateType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): """The ImmutabilityPolicy update type of a blob container, possible values include: put, lock and extend. """ PUT = "put" LOCK = "lock" EXTEND = "extend" class InventoryRuleType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): """The valid value is Inventory """ INVENTORY = "Inventory" class KeyPermission(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): """Permissions for the key -- read-only or full permissions. """ READ = "Read" FULL = "Full" class KeySource(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): """The encryption keySource (provider). Possible values (case-insensitive): Microsoft.Storage, Microsoft.Keyvault """ MICROSOFT_STORAGE = "Microsoft.Storage" MICROSOFT_KEYVAULT = "Microsoft.Keyvault" class KeyType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): """Encryption key type to be used for the encryption service. 'Account' key type implies that an account-scoped encryption key will be used. 'Service' key type implies that a default service key is used. """ SERVICE = "Service" ACCOUNT = "Account" class Kind(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): """Indicates the type of storage account. """ STORAGE = "Storage" STORAGE_V2 = "StorageV2" BLOB_STORAGE = "BlobStorage" FILE_STORAGE = "FileStorage" BLOCK_BLOB_STORAGE = "BlockBlobStorage" class LargeFileSharesState(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): """Allow large file shares if sets to Enabled. It cannot be disabled once it is enabled. """ DISABLED = "Disabled" ENABLED = "Enabled" class LeaseContainerRequestAction(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): """Specifies the lease action. Can be one of the available actions. """ ACQUIRE = "Acquire" RENEW = "Renew" CHANGE = "Change" RELEASE = "Release" BREAK_ENUM = "Break" class LeaseDuration(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): """Specifies whether the lease on a container is of infinite or fixed duration, only when the container is leased. """ INFINITE = "Infinite" FIXED = "Fixed" class LeaseState(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): """Lease state of the container. """ AVAILABLE = "Available" LEASED = "Leased" EXPIRED = "Expired" BREAKING = "Breaking" BROKEN = "Broken" class LeaseStatus(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): """The lease status of the container. """ LOCKED = "Locked" UNLOCKED = "Unlocked" class ListContainersInclude(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): DELETED = "deleted" class ListSharesExpand(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): DELETED = "deleted" SNAPSHOTS = "snapshots" class ManagementPolicyName(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): DEFAULT = "default" class MinimumTlsVersion(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): """Set the minimum TLS version to be permitted on requests to storage. The default interpretation is TLS 1.0 for this property. """ TLS1_0 = "TLS1_0" TLS1_1 = "TLS1_1" TLS1_2 = "TLS1_2" class Name(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): """Name of the policy. The valid value is AccessTimeTracking. This field is currently read only """ ACCESS_TIME_TRACKING = "AccessTimeTracking" class Permissions(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): """The signed permissions for the account SAS. Possible values include: Read (r), Write (w), Delete (d), List (l), Add (a), Create (c), Update (u) and Process (p). """ R = "r" D = "d" W = "w" L = "l" A = "a" C = "c" U = "u" P = "p" class PrivateEndpointConnectionProvisioningState(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): """The current provisioning state. """ SUCCEEDED = "Succeeded" CREATING = "Creating" DELETING = "Deleting" FAILED = "Failed" class PrivateEndpointServiceConnectionStatus(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): """The private endpoint connection status. """ PENDING = "Pending" APPROVED = "Approved" REJECTED = "Rejected" class ProvisioningState(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): """Gets the status of the storage account at the time the operation was called. """ CREATING = "Creating" RESOLVING_DNS = "ResolvingDNS" SUCCEEDED = "Succeeded" class PublicAccess(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): """Specifies whether data in the container may be accessed publicly and the level of access. """ CONTAINER = "Container" BLOB = "Blob" NONE = "None" class PutSharesExpand(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): SNAPSHOTS = "snapshots" class Reason(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): """Gets the reason that a storage account name could not be used. The Reason element is only returned if NameAvailable is false. """ ACCOUNT_NAME_INVALID = "AccountNameInvalid" ALREADY_EXISTS = "AlreadyExists" class ReasonCode(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): """The reason for the restriction. As of now this can be "QuotaId" or "NotAvailableForSubscription". Quota Id is set when the SKU has requiredQuotas parameter as the subscription does not belong to that quota. The "NotAvailableForSubscription" is related to capacity at DC. """ QUOTA_ID = "QuotaId" NOT_AVAILABLE_FOR_SUBSCRIPTION = "NotAvailableForSubscription" class RootSquashType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): """The property is for NFS share only. The default is NoRootSquash. """ NO_ROOT_SQUASH = "NoRootSquash" ROOT_SQUASH = "RootSquash" ALL_SQUASH = "AllSquash" class RoutingChoice(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): """Routing Choice defines the kind of network routing opted by the user. """ MICROSOFT_ROUTING = "MicrosoftRouting" INTERNET_ROUTING = "InternetRouting" class RuleType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): """The valid value is Lifecycle """ LIFECYCLE = "Lifecycle" class Services(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): """The signed services accessible with the account SAS. Possible values include: Blob (b), Queue (q), Table (t), File (f). """ B = "b" Q = "q" T = "t" F = "f" class ShareAccessTier(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): """Access tier for specific share. GpV2 account can choose between TransactionOptimized (default), Hot, and Cool. FileStorage account can choose Premium. """ TRANSACTION_OPTIMIZED = "TransactionOptimized" HOT = "Hot" COOL = "Cool" PREMIUM = "Premium" class SignedResource(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): """The signed services accessible with the service SAS. Possible values include: Blob (b), Container (c), File (f), Share (s). """ B = "b" C = "c" F = "f" S = "s" class SignedResourceTypes(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): """The signed resource types that are accessible with the account SAS. Service (s): Access to service-level APIs; Container (c): Access to container-level APIs; Object (o): Access to object-level APIs for blobs, queue messages, table entities, and files. """ S = "s" C = "c" O = "o" class SkuName(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): """The SKU name. Required for account creation; optional for update. Note that in older versions, SKU name was called accountType. """ STANDARD_LRS = "Standard_LRS" STANDARD_GRS = "Standard_GRS" STANDARD_RAGRS = "Standard_RAGRS" STANDARD_ZRS = "Standard_ZRS" PREMIUM_LRS = "Premium_LRS" PREMIUM_ZRS = "Premium_ZRS" STANDARD_GZRS = "Standard_GZRS" STANDARD_RAGZRS = "Standard_RAGZRS" class SkuTier(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): """The SKU tier. This is based on the SKU name. """ STANDARD = "Standard" PREMIUM = "Premium" class State(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): """Gets the state of virtual network rule. """ PROVISIONING = "provisioning" DEPROVISIONING = "deprovisioning" SUCCEEDED = "succeeded" FAILED = "failed" NETWORK_SOURCE_DELETED = "networkSourceDeleted" class StorageAccountExpand(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): GEO_REPLICATION_STATS = "geoReplicationStats" BLOB_RESTORE_STATUS = "blobRestoreStatus" class UsageUnit(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): """Gets the unit of measurement. """ COUNT = "Count" BYTES = "Bytes" SECONDS = "Seconds" PERCENT = "Percent" COUNTS_PER_SECOND = "CountsPerSecond" BYTES_PER_SECOND = "BytesPerSecond"
java_predict_client/src/main/proto/tensorflow/contrib/learn/python/learn/metric_spec.py
tobegit3hub/deep_cnn
101
133767
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """The metric spec class to flexibly connect models and metrics.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python.platform import tf_logging as logging class MetricSpec(object): """MetricSpec connects a model to metric functions. The MetricSpec class contains all information necessary to connect the output of a `model_fn` to the metrics (usually, streaming metrics) that are used in evaluation. It is passed in the `metrics` argument of `Estimator.evaluate`. The `Estimator` then knows which predictions, labels, and weight to use to call a given metric function. When building the ops to run in evaluation, `Estimator` will call `create_metric_ops`, which will connect the given `metric_fn` to the model as detailed in the docstring for `create_metric_ops`, and return the metric. Example: Assuming an model has an input function which returns inputs containing (among other things) a tensor with key "income", and a labels dictionary containing "has_clicked". Let's assume that the `model_fn` for this model returns a prediction with key "clicked". In order to compute the accuracy of the "clicked" prediction, we would add ``` "click accuracy": MetricSpec(metric_fn=streaming_accuracy, prediction_key="clicked", label_key="has_clicked") ``` to the metrics argument to `evaluate`. If we would like the accuracy to be weighted by "income", we can add that as the `weight_key` argument. ``` "click accuracy": MetricSpec(metric_fn=streaming_accuracy, prediction_key="clicked", label_key="has_clicked", weight_key="income") ``` """ def __init__(self, metric_fn, prediction_key=None, label_key=None, weight_key=None): """Constructor. Creates a MetricSpec. Args: metric_fn: A function to use as a metric. Must accept `predictions`, `labels` and optionally, `weights` tensors as inputs, and must return either a single tensor which is interpreted as a value of this metric, or a pair `(value_op, update_op)`, where value_op is the op to call to obtain the value of the metric, and update_op should be evaluated for each batch in order to update internal state. prediction_key: The key for a tensor in the `predictions` dict (output from the `model_fn`) to use as the `predictions` input to the `metric_fn`. Optional. If `None`, the `model_fn` must return a single tensor or a dict with only a single entry as `predictions`. label_key: The key for a tensor in the `labels` dict (output from the `input_fn`) to use as the `labels` input to the `metric_fn`. Optional. If `None`, the `input_fn` must return a single tensor or a dict with only a single entry as `labels`. weight_key: The key for a tensor in the `inputs` dict (output from the `input_fn`) to use as the `weights` input to the `metric_fn`. Optional. If `None`, no weights will be passed to the `metric_fn`. """ self._metric_fn = metric_fn self._prediction_key = prediction_key self._label_key = label_key self._weight_key = weight_key @property def prediction_key(self): return self._prediction_key @property def label_key(self): return self._label_key @property def weight_key(self): return self._weight_key @property def metric_fn(self): return self._metric_fn def __str__(self): if hasattr(self.metric_fn, '__name__'): fn_name = self.metric_fn.__name__ elif (hasattr(self.metric_fn, 'func') and hasattr(self.metric_fn.func, '__name__')): fn_name = self.metric_fn.func.__name__ # If it's a functools.partial. else: fn_name = '%s' % self.metric_fn return ('MetricSpec(metric_fn=%s, ' % fn_name + 'prediction_key=%s, ' % self.prediction_key + 'label_key=%s, ' % self.label_key + 'weight_key=%s)' % self.weight_key ) def create_metric_ops(self, inputs, labels, predictions): """Connect our `metric_fn` to the specified members of the given dicts. This function will call the `metric_fn` given in our constructor as follows: ``` metric_fn(predictions[self.prediction_key], labels[self.label_key], weights=weights[self.weight_key]) ``` And returns the result. The `weights` argument is only passed if `self.weight_key` is not `None`. `predictions` and `labels` may be single tensors as well as dicts. If `predictions` is a single tensor, `self.prediction_key` must be `None`. If `predictions` is a single element dict, `self.prediction_key` is allowed to be `None`. Conversely, if `labels` is a single tensor, `self.label_key` must be `None`. If `labels` is a single element dict, `self.label_key` is allowed to be `None`. Args: inputs: A dict of inputs produced by the `input_fn` labels: A dict of labels or a single label tensor produced by the `input_fn`. predictions: A dict of predictions or a single tensor produced by the `model_fn`. Returns: The result of calling `metric_fn`. Raises: ValueError: If `predictions` or `labels` is a single `Tensor` and `self.prediction_key` or `self.label_key` is not `None`; or if `self.label_key` is `None` but `labels` is a dict with more than one element, or if `self.prediction_key` is `None but `predictions` is a dict with more than one element. """ def _get_dict(name, dict_or_tensor, key): """Get a single tensor or an element of a dict or raise ValueError.""" if key: if not isinstance(dict_or_tensor, dict): raise ValueError('MetricSpec with ' + name + '_key specified' ' requires ' + name + 's dict, got %s' % dict_or_tensor) return dict_or_tensor[key] else: if isinstance(dict_or_tensor, dict): if len(dict_or_tensor) != 1: raise ValueError('MetricSpec without specified ' + name + '_key' ' requires ' + name + 's tensor or single element' ' dict, got %s' % dict_or_tensor) return dict_or_tensor.values()[0] else: return dict_or_tensor # Get the predictions prediction = _get_dict('prediction', predictions, self.prediction_key) # Get the labels label = _get_dict('label', labels, self.label_key) try: if self.weight_key: return self.metric_fn(prediction, label, weights=inputs[self.weight_key]) else: return self.metric_fn(prediction, label) except: # pylint: disable=bare-except logging.error('Could not create metric ops for %s.' % self) raise
tensorflow/contrib/framework/python/ops/prettyprint_ops_test.py
atfkaka/tensorflow
101
133772
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== # pylint: disable=unused-import from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np import tensorflow as tf class PrettyPrintOpsTest(tf.test.TestCase): def testPrintTensorPassthrough(self): a = tf.constant([1]) a = tf.contrib.framework.print_op(a) with self.test_session(): self.assertEqual(a.eval(), tf.constant([1]).eval()) def testPrintSparseTensorPassthrough(self): a = tf.SparseTensor(indices=[[0, 0], [1, 2]], values=[1, 2], shape=[3, 4]) b = tf.SparseTensor(indices=[[0, 0], [1, 2]], values=[1, 2], shape=[3, 4]) a = tf.contrib.framework.print_op(a) with self.test_session(): self.assertAllEqual(tf.sparse_tensor_to_dense(a).eval(), tf.sparse_tensor_to_dense(b).eval()) def testPrintTensorArrayPassthrough(self): a = tf.TensorArray(size=2, dtype=tf.int32, clear_after_read=False) a = a.write(1, 1) a = a.write(0, 0) a = tf.contrib.framework.print_op(a) with self.test_session(): self.assertAllEqual(a.pack().eval(), tf.constant([0, 1]).eval()) def testPrintVariable(self): a = tf.Variable(1.0) a = tf.contrib.framework.print_op(a) with self.test_session(): tf.initialize_all_variables().run() a.eval() if __name__ == "__main__": tf.test.main()
exercises/ja/exc_01_07.py
Jette16/spacy-course
2,085
133813
<gh_stars>1000+ import spacy # 「ja_core_news_sm」モデルをロード nlp = ____ text = "公式発表:Appleが米国の上場企業として初めて時価評価額1兆ドルに到達しました。" # テキストを処理 doc = ____ # docのテキストをプリント print(____.____)
firefly/distributed/reference.py
genghaolove/firefly
675
133815
<filename>firefly/distributed/reference.py<gh_stars>100-1000 #coding:utf8 ''' Created on 2013-8-14 @author: lan (www.9miao.com) ''' from twisted.spread import pb from firefly.utils.services import Service class ProxyReference(pb.Referenceable): '''代理通道''' def __init__(self): '''初始化''' self._service = Service('proxy') def addService(self,service): '''添加一条服务通道''' self._service = service def remote_callChild(self, command,*arg,**kw): '''代理发送数据 ''' return self._service.callTarget(command,*arg,**kw)