code
stringlengths 501
5.19M
| package
stringlengths 2
81
| path
stringlengths 9
304
| filename
stringlengths 4
145
|
---|---|---|---|
from verizon5gmecvnspapi.api_helper import APIHelper
class ServiceOnboardingHelmGitBranch(object):
"""Implementation of the 'ServiceOnboardingHelmGitBranch' model.
TODO: type model description here.
Attributes:
branch_name (string): The user can provide branchName for the Helm
chart.
helm_chart_path (string): The user can provide the path to the Helm
chart.
values_yaml_paths (list of string): The user can provide an array of
values.YAML files paths.
"""
# Create a mapping from Model property names to API property names
_names = {
"branch_name": 'branchName',
"helm_chart_path": 'helmChartPath',
"values_yaml_paths": 'valuesYamlPaths'
}
_optionals = [
'values_yaml_paths',
]
def __init__(self,
branch_name=None,
helm_chart_path=None,
values_yaml_paths=APIHelper.SKIP):
"""Constructor for the ServiceOnboardingHelmGitBranch class"""
# Initialize members of the class
self.branch_name = branch_name
self.helm_chart_path = helm_chart_path
if values_yaml_paths is not APIHelper.SKIP:
self.values_yaml_paths = values_yaml_paths
@classmethod
def from_dictionary(cls,
dictionary):
"""Creates an instance of this model from a dictionary
Args:
dictionary (dictionary): A dictionary representation of the object
as obtained from the deserialization of the server's response. The
keys MUST match property names in the API description.
Returns:
object: An instance of this structure class.
"""
if dictionary is None:
return None
# Extract variables from the dictionary
branch_name = dictionary.get("branchName") if dictionary.get("branchName") else None
helm_chart_path = dictionary.get("helmChartPath") if dictionary.get("helmChartPath") else None
values_yaml_paths = dictionary.get("valuesYamlPaths") if dictionary.get("valuesYamlPaths") else APIHelper.SKIP
# Return an object of this model
return cls(branch_name,
helm_chart_path,
values_yaml_paths)
|
Apiamtic-python
|
/Apiamtic_python-1.6.9-py3-none-any.whl/verizon5gmecvnspapi/models/service_onboarding_helm_git_branch.py
|
service_onboarding_helm_git_branch.py
|
from verizon5gmecvnspapi.api_helper import APIHelper
from verizon5gmecvnspapi.models.claim import Claim
class ServiceClaims(object):
"""Implementation of the 'ServiceClaims' model.
Response to get all claims.
Attributes:
count (int): Count for all the claims returned after hitting the API.
claims_res_list (list of Claim): List of all claims.
"""
# Create a mapping from Model property names to API property names
_names = {
"count": 'count',
"claims_res_list": 'claimsResList'
}
_optionals = [
'count',
'claims_res_list',
]
def __init__(self,
count=APIHelper.SKIP,
claims_res_list=APIHelper.SKIP):
"""Constructor for the ServiceClaims class"""
# Initialize members of the class
if count is not APIHelper.SKIP:
self.count = count
if claims_res_list is not APIHelper.SKIP:
self.claims_res_list = claims_res_list
@classmethod
def from_dictionary(cls,
dictionary):
"""Creates an instance of this model from a dictionary
Args:
dictionary (dictionary): A dictionary representation of the object
as obtained from the deserialization of the server's response. The
keys MUST match property names in the API description.
Returns:
object: An instance of this structure class.
"""
if dictionary is None:
return None
# Extract variables from the dictionary
count = dictionary.get("count") if dictionary.get("count") else APIHelper.SKIP
claims_res_list = None
if dictionary.get('claimsResList') is not None:
claims_res_list = [Claim.from_dictionary(x) for x in dictionary.get('claimsResList')]
else:
claims_res_list = APIHelper.SKIP
# Return an object of this model
return cls(count,
claims_res_list)
|
Apiamtic-python
|
/Apiamtic_python-1.6.9-py3-none-any.whl/verizon5gmecvnspapi/models/service_claims.py
|
service_claims.py
|
__all__ = [
'tag',
'service_tag',
'service_onboarding_additional_params',
'service_onboarding_helm_yaml_git_tag',
'workflow',
'service_swagger_spec_id',
'services',
'compatibility',
'resource_base',
'service_error',
'category',
'repository_credential',
'azure_csp_profile',
'csp_profile_data',
'service_onboarding_helm_git_branch',
'service_onboarding_helm_helmrepo',
'service_onboarding_yaml_git_branch',
'service_onboarding_terraform_git_tag',
'operations_wf',
'installation_wf',
'workload',
'service_delete_result',
'running_instance',
'edge_service_onboarding_delete_result',
'claim_status_request',
'aws_csp_profile',
'service_management_result',
'observability_template',
'service_resource',
'category_list',
'repository',
'service_onboarding_helm_git_tag',
'selected_service',
'claim',
'edge_service_onboarding_result',
'csp_profile',
'csp_profile_id_request',
'current_status',
'boundary',
'service_file',
'service',
'associate_cloud_credential_result',
'service_handler_id',
'service_dependency',
'service_onboarding_terraform_git_branch',
'custom_wf',
'dependent_service',
'service_claims',
'default_location',
'cluster_info_details',
'o_auth_token',
'service_state_enum',
'workflow_type_enum',
'workload_revision_type_enum',
'service_status_enum',
'service_type_enum',
'hook_type_enum',
'claim_type_enum',
'repository_reacheability_enum',
'csp_profile_type_enum',
'aws_csp_profile_cred_type_enum',
'sand_box_state_enum',
'sort_direction_enum',
'service_dependency_package_type_enum',
'csp_compatibility_enum',
'source_code_type_enum',
'event_type_enum',
'sand_box_status_enum',
'category_type_enum',
'repository_credential_type_enum',
'edge_service_repository_type_enum',
'upload_type_enum',
'workload_repository_type_enum',
'dependent_services_type_enum',
'claim_status_enum',
'o_auth_scope_enum',
'o_auth_provider_error_enum',
]
|
Apiamtic-python
|
/Apiamtic_python-1.6.9-py3-none-any.whl/verizon5gmecvnspapi/models/__init__.py
|
__init__.py
|
from verizon5gmecvnspapi.api_helper import APIHelper
class ServiceDependency(object):
"""Implementation of the 'ServiceDependency' model.
Dependency of the service.
Attributes:
rank (int): The dependency rank.
mtype (ServiceTypeEnum): Service Type e.g. Installation, Operations,
Custom.
service_name (string): Name of the dependent service.
version (string): Version of the service being used.
package_type (ServiceDependencyPackageTypeEnum): Deployment package
type.
"""
# Create a mapping from Model property names to API property names
_names = {
"rank": 'rank',
"mtype": 'type',
"service_name": 'serviceName',
"version": 'version',
"package_type": 'packageType'
}
_optionals = [
'rank',
'mtype',
'service_name',
'version',
'package_type',
]
_nullables = [
'package_type',
]
def __init__(self,
rank=APIHelper.SKIP,
mtype=APIHelper.SKIP,
service_name=APIHelper.SKIP,
version=APIHelper.SKIP,
package_type=APIHelper.SKIP):
"""Constructor for the ServiceDependency class"""
# Initialize members of the class
if rank is not APIHelper.SKIP:
self.rank = rank
if mtype is not APIHelper.SKIP:
self.mtype = mtype
if service_name is not APIHelper.SKIP:
self.service_name = service_name
if version is not APIHelper.SKIP:
self.version = version
if package_type is not APIHelper.SKIP:
self.package_type = package_type
@classmethod
def from_dictionary(cls,
dictionary):
"""Creates an instance of this model from a dictionary
Args:
dictionary (dictionary): A dictionary representation of the object
as obtained from the deserialization of the server's response. The
keys MUST match property names in the API description.
Returns:
object: An instance of this structure class.
"""
if dictionary is None:
return None
# Extract variables from the dictionary
rank = dictionary.get("rank") if dictionary.get("rank") else APIHelper.SKIP
mtype = dictionary.get("type") if dictionary.get("type") else APIHelper.SKIP
service_name = dictionary.get("serviceName") if dictionary.get("serviceName") else APIHelper.SKIP
version = dictionary.get("version") if dictionary.get("version") else APIHelper.SKIP
package_type = dictionary.get("packageType") if "packageType" in dictionary.keys() else APIHelper.SKIP
# Return an object of this model
return cls(rank,
mtype,
service_name,
version,
package_type)
|
Apiamtic-python
|
/Apiamtic_python-1.6.9-py3-none-any.whl/verizon5gmecvnspapi/models/service_dependency.py
|
service_dependency.py
|
from verizon5gmecvnspapi.api_helper import APIHelper
from verizon5gmecvnspapi.configuration import Server
from verizon5gmecvnspapi.controllers.base_controller import BaseController
from apimatic_core.request_builder import RequestBuilder
from apimatic_core.response_handler import ResponseHandler
from apimatic_core.types.parameter import Parameter
from verizon5gmecvnspapi.http.http_method_enum import HttpMethodEnum
from apimatic_core.authentication.multiple.single_auth import Single
from apimatic_core.authentication.multiple.and_auth_group import And
from apimatic_core.authentication.multiple.or_auth_group import Or
from verizon5gmecvnspapi.models.service_claims import ServiceClaims
from verizon5gmecvnspapi.models.associate_cloud_credential_result import AssociateCloudCredentialResult
from verizon5gmecvnspapi.exceptions.edge_service_onboarding_result_error_exception import EdgeServiceOnboardingResultErrorException
class ServiceClaimsController(BaseController):
"""A Controller to access Endpoints in the verizon5gmecvnspapi API."""
def __init__(self, config):
super(ServiceClaimsController, self).__init__(config)
def list_service_claims(self,
account_name,
service_id,
correlation_id=None,
claim_status=None,
q=None,
limit=None,
off_set=None,
sort_key='createdDate',
sort_dir=None,
details_flag=True):
"""Does a GET request to /v1/service/{serviceId}/claims.
Fetch all service's claim(s) associated with a service. Service claims
are generated based on service's compatibility with different cloud
service provider.
Args:
account_name (string): User account name.
service_id (string): Auto-generated Id of the claim whose
information needs to be fetched.
correlation_id (string, optional): TODO: type description here.
claim_status (string, optional): Queries with claim status on the
claims.
q (string, optional): Use the comma (,) character to separate
multiple values,eg claimType=Public
MEC:claims.sandBoxState=NOT_STARTED,STARTED.
limit (long|int, optional): Number of items to return.
off_set (long|int, optional): Id of the last respose value in the
previous list.
sort_key (string, optional): Sorts the response by an attribute.
Default is createdDate.
sort_dir (SortDirectionEnum, optional): Sorts the response. Use
asc for ascending or desc for descending order. The default is
desc.
details_flag (bool, optional): Default as true. If it is true then
it will return all details.
Returns:
ServiceClaims: Response from the API. OK.
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
return super().new_api_call_builder.request(
RequestBuilder().server(Server.SERVICES)
.path('/v1/service/{serviceId}/claims')
.http_method(HttpMethodEnum.GET)
.header_param(Parameter()
.key('AccountName')
.value(account_name))
.template_param(Parameter()
.key('serviceId')
.value(service_id)
.should_encode(True))
.header_param(Parameter()
.key('correlationId')
.value(correlation_id))
.query_param(Parameter()
.key('claimStatus')
.value(claim_status))
.query_param(Parameter()
.key('q')
.value(q))
.query_param(Parameter()
.key('limit')
.value(limit))
.query_param(Parameter()
.key('offSet')
.value(off_set))
.query_param(Parameter()
.key('sortKey')
.value(sort_key))
.query_param(Parameter()
.key('sortDir')
.value(sort_dir))
.query_param(Parameter()
.key('detailsFlag')
.value(details_flag))
.header_param(Parameter()
.key('accept')
.value('application/json'))
.auth(Single('global'))
).response(
ResponseHandler()
.deserializer(APIHelper.json_deserialize)
.deserialize_into(ServiceClaims.from_dictionary)
.local_error('400', 'Bad request.', EdgeServiceOnboardingResultErrorException)
.local_error('401', 'Unauthorized.', EdgeServiceOnboardingResultErrorException)
.local_error('404', 'Not found.', EdgeServiceOnboardingResultErrorException)
.local_error('500', 'Internal Server Error.', EdgeServiceOnboardingResultErrorException)
).execute()
def associate_cloud_credential_with_service_claim(self,
account_name,
service_id,
claim_id,
body,
correlation_id=None):
"""Does a POST request to /v1/services/{serviceId}/claims/{claimId}/associateCspProfile.
Associate an existing cloud credential with a service's claim which
will be used to connect to user's cloud provider.
Args:
account_name (string): User account name.
service_id (string): System generated unique identifier of the
service which user is using.
claim_id (string): System generated unique identifier for the
claim which user is using.
body (CSPProfileIdRequest): TODO: type description here.
correlation_id (string, optional): TODO: type description here.
Returns:
AssociateCloudCredentialResult: Response from the API. OK.
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
return super().new_api_call_builder.request(
RequestBuilder().server(Server.SERVICES)
.path('/v1/services/{serviceId}/claims/{claimId}/associateCspProfile')
.http_method(HttpMethodEnum.POST)
.header_param(Parameter()
.key('AccountName')
.value(account_name))
.template_param(Parameter()
.key('serviceId')
.value(service_id)
.should_encode(True))
.template_param(Parameter()
.key('claimId')
.value(claim_id)
.should_encode(True))
.header_param(Parameter()
.key('Content-Type')
.value('application/json'))
.body_param(Parameter()
.value(body))
.header_param(Parameter()
.key('correlationId')
.value(correlation_id))
.header_param(Parameter()
.key('accept')
.value('application/json'))
.body_serializer(APIHelper.json_serialize)
.auth(Single('global'))
).response(
ResponseHandler()
.deserializer(APIHelper.json_deserialize)
.deserialize_into(AssociateCloudCredentialResult.from_dictionary)
.local_error('400', 'Bad request.', EdgeServiceOnboardingResultErrorException)
.local_error('401', 'Unauthorized.', EdgeServiceOnboardingResultErrorException)
.local_error('404', 'Not Found.', EdgeServiceOnboardingResultErrorException)
.local_error('500', 'Internal Server Error.', EdgeServiceOnboardingResultErrorException)
).execute()
def update_service_claim_status(self,
account_name,
service_id,
claim_id,
body,
correlation_id=None):
"""Does a POST request to /v1/services/{serviceId}/claims/{claimId}/claimStatus.
Using this API user can update service's claim status as
complete/verified etc.
Args:
account_name (string): User account name.
service_id (string): System generated unique identifier of the
service which user is using.
claim_id (string): System generated unique identifier of the claim
which user is using.
body (ClaimStatusRequest): TODO: type description here.
correlation_id (string, optional): TODO: type description here.
Returns:
void: Response from the API. OK.
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
return super().new_api_call_builder.request(
RequestBuilder().server(Server.SERVICES)
.path('/v1/services/{serviceId}/claims/{claimId}/claimStatus')
.http_method(HttpMethodEnum.POST)
.header_param(Parameter()
.key('AccountName')
.value(account_name))
.template_param(Parameter()
.key('serviceId')
.value(service_id)
.should_encode(True))
.template_param(Parameter()
.key('claimId')
.value(claim_id)
.should_encode(True))
.header_param(Parameter()
.key('Content-Type')
.value('application/json'))
.body_param(Parameter()
.value(body))
.header_param(Parameter()
.key('correlationId')
.value(correlation_id))
.body_serializer(APIHelper.json_serialize)
.auth(Single('global'))
).execute()
def mark_service_claim_status_as_completed(self,
account_name,
service_id,
claim_id,
correlation_id=None):
"""Does a POST request to /v1/services/{serviceId}/claims/{claimId}/claimStatusCompleted.
Mark a service's claim status as complete post successful verification
of sandbox testing in the respective sandbox environment.
Args:
account_name (string): User account name.
service_id (string): System generated unique identifier of the
service which user is using.
claim_id (string): System generated unique identifier of the claim
which user is using.
correlation_id (string, optional): TODO: type description here.
Returns:
void: Response from the API. OK.
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
return super().new_api_call_builder.request(
RequestBuilder().server(Server.SERVICES)
.path('/v1/services/{serviceId}/claims/{claimId}/claimStatusCompleted')
.http_method(HttpMethodEnum.POST)
.header_param(Parameter()
.key('AccountName')
.value(account_name))
.template_param(Parameter()
.key('serviceId')
.value(service_id)
.should_encode(True))
.template_param(Parameter()
.key('claimId')
.value(claim_id)
.should_encode(True))
.header_param(Parameter()
.key('correlationId')
.value(correlation_id))
.auth(Single('global'))
).execute()
|
Apiamtic-python
|
/Apiamtic_python-1.6.9-py3-none-any.whl/verizon5gmecvnspapi/controllers/service_claims_controller.py
|
service_claims_controller.py
|
from verizon5gmecvnspapi.api_helper import APIHelper
from verizon5gmecvnspapi.configuration import Server
from verizon5gmecvnspapi.utilities.file_wrapper import FileWrapper
from verizon5gmecvnspapi.controllers.base_controller import BaseController
from apimatic_core.request_builder import RequestBuilder
from apimatic_core.response_handler import ResponseHandler
from apimatic_core.types.parameter import Parameter
from verizon5gmecvnspapi.http.http_method_enum import HttpMethodEnum
from apimatic_core.authentication.multiple.single_auth import Single
from apimatic_core.authentication.multiple.and_auth_group import And
from apimatic_core.authentication.multiple.or_auth_group import Or
from verizon5gmecvnspapi.models.edge_service_onboarding_delete_result import EdgeServiceOnboardingDeleteResult
from verizon5gmecvnspapi.models.current_status import CurrentStatus
from verizon5gmecvnspapi.models.service_management_result import ServiceManagementResult
from verizon5gmecvnspapi.models.service import Service
from verizon5gmecvnspapi.models.service_file import ServiceFile
from verizon5gmecvnspapi.models.services import Services
from verizon5gmecvnspapi.exceptions.edge_service_onboarding_result_error_exception import EdgeServiceOnboardingResultErrorException
class ServiceOnboardingController(BaseController):
"""A Controller to access Endpoints in the verizon5gmecvnspapi API."""
def __init__(self, config):
super(ServiceOnboardingController, self).__init__(config)
def remove_service(self,
account_name,
service_name,
version,
correlation_id=None):
"""Does a DELETE request to /v1/services/{serviceName}/{version}.
Remove a service from user's organization.
Args:
account_name (string): User account name.
service_name (string): Name of the service which is about to be
deleted.
version (string): Version of the service which is about to be
deleted.
correlation_id (string, optional): TODO: type description here.
Returns:
EdgeServiceOnboardingDeleteResult: Response from the API. OK.
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
return super().new_api_call_builder.request(
RequestBuilder().server(Server.SERVICES)
.path('/v1/services/{serviceName}/{version}')
.http_method(HttpMethodEnum.DELETE)
.header_param(Parameter()
.key('AccountName')
.value(account_name))
.template_param(Parameter()
.key('serviceName')
.value(service_name)
.should_encode(True))
.template_param(Parameter()
.key('version')
.value(version)
.should_encode(True))
.header_param(Parameter()
.key('correlationId')
.value(correlation_id))
.header_param(Parameter()
.key('accept')
.value('application/json'))
.auth(Single('global'))
).response(
ResponseHandler()
.deserializer(APIHelper.json_deserialize)
.deserialize_into(EdgeServiceOnboardingDeleteResult.from_dictionary)
.local_error('401', 'Unauthorized.', EdgeServiceOnboardingResultErrorException)
.local_error('404', 'Not found.', EdgeServiceOnboardingResultErrorException)
.local_error('500', 'Internal Server Error.', EdgeServiceOnboardingResultErrorException)
).execute()
def get_service_job_status(self,
account_name,
job_id,
correlation_id=None):
"""Does a GET request to /v1/services/{jobId}/status.
Check current status of job for a service using job ID.
Args:
account_name (string): User account name.
job_id (string): Auto-generated Id of the job.
correlation_id (string, optional): TODO: type description here.
Returns:
CurrentStatus: Response from the API. OK.
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
return super().new_api_call_builder.request(
RequestBuilder().server(Server.SERVICES)
.path('/v1/services/{jobId}/status')
.http_method(HttpMethodEnum.GET)
.header_param(Parameter()
.key('AccountName')
.value(account_name))
.template_param(Parameter()
.key('jobId')
.value(job_id)
.should_encode(True))
.header_param(Parameter()
.key('correlationId')
.value(correlation_id))
.header_param(Parameter()
.key('accept')
.value('application/json'))
.auth(Single('global'))
).response(
ResponseHandler()
.deserializer(APIHelper.json_deserialize)
.deserialize_into(CurrentStatus.from_dictionary)
.local_error('401', 'Unauthorized.', EdgeServiceOnboardingResultErrorException)
.local_error('404', 'Not found.', EdgeServiceOnboardingResultErrorException)
.local_error('500', 'Internal Server Error.', EdgeServiceOnboardingResultErrorException)
).execute()
def stop_service_testing(self,
account_name,
service_name,
version,
correlation_id=None):
"""Does a PUT request to /v1/services/{serviceName}/{version}/certify.
Start service certification process. On successful completion of this
process, service's status will change to certified.
Args:
account_name (string): User account name.
service_name (string): Name of the service e.g. any sub string of
serviceName.
version (string): Version of service which is to be certified.
correlation_id (string, optional): TODO: type description here.
Returns:
ServiceManagementResult: Response from the API. OK.
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
return super().new_api_call_builder.request(
RequestBuilder().server(Server.SERVICES)
.path('/v1/services/{serviceName}/{version}/certify')
.http_method(HttpMethodEnum.PUT)
.header_param(Parameter()
.key('AccountName')
.value(account_name))
.template_param(Parameter()
.key('serviceName')
.value(service_name)
.should_encode(True))
.template_param(Parameter()
.key('version')
.value(version)
.should_encode(True))
.header_param(Parameter()
.key('correlationId')
.value(correlation_id))
.header_param(Parameter()
.key('accept')
.value('application/json'))
.auth(Single('global'))
).response(
ResponseHandler()
.deserializer(APIHelper.json_deserialize)
.deserialize_into(ServiceManagementResult.from_dictionary)
.local_error('400', 'Bad Request.', EdgeServiceOnboardingResultErrorException)
.local_error('401', 'Unauthorized.', EdgeServiceOnboardingResultErrorException)
.local_error('500', 'Internal Server Error.', EdgeServiceOnboardingResultErrorException)
.local_error('default', 'Unexpected error.', EdgeServiceOnboardingResultErrorException)
).execute()
def register_service(self,
account_name,
body,
correlation_id=None):
"""Does a POST request to /v1/services.
Create a new service within user's organization.
Args:
account_name (string): User account name.
body (Service): TODO: type description here.
correlation_id (string, optional): TODO: type description here.
Returns:
Service: Response from the API. Created.
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
return super().new_api_call_builder.request(
RequestBuilder().server(Server.SERVICES)
.path('/v1/services')
.http_method(HttpMethodEnum.POST)
.header_param(Parameter()
.key('AccountName')
.value(account_name))
.header_param(Parameter()
.key('Content-Type')
.value('application/json'))
.body_param(Parameter()
.value(body))
.header_param(Parameter()
.key('correlationId')
.value(correlation_id))
.header_param(Parameter()
.key('accept')
.value('application/json'))
.body_serializer(APIHelper.json_serialize)
.auth(Single('global'))
).response(
ResponseHandler()
.deserializer(APIHelper.json_deserialize)
.deserialize_into(Service.from_dictionary)
.local_error('400', 'Bad Request.', EdgeServiceOnboardingResultErrorException)
.local_error('401', 'Unauthorized.', EdgeServiceOnboardingResultErrorException)
.local_error('403', 'Forbidden.', EdgeServiceOnboardingResultErrorException)
.local_error('404', 'Not found.', EdgeServiceOnboardingResultErrorException)
.local_error('415', 'Unsupported media type.', EdgeServiceOnboardingResultErrorException)
.local_error('429', 'Too many requests.', EdgeServiceOnboardingResultErrorException)
.local_error('500', 'Internal Server Error.', EdgeServiceOnboardingResultErrorException)
).execute()
def start_service_claim_sand_box_testing(self,
account_name,
service_id,
claim_id,
body,
correlation_id=None):
"""Does a PUT request to /v1/services/{serviceId}/claims/{claimId}/sandBoxStart.
Initiate testing of a service in sandbox environment per claim based
on service's compatibility(s).
Args:
account_name (string): User account name.
service_id (string): An id of the service created e.g. UUID.
claim_id (string): Id of the claim created e.g. UUID.
body (ClusterInfoDetails): TODO: type description here.
correlation_id (string, optional): TODO: type description here.
Returns:
ServiceManagementResult: Response from the API. OK.
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
return super().new_api_call_builder.request(
RequestBuilder().server(Server.SERVICES)
.path('/v1/services/{serviceId}/claims/{claimId}/sandBoxStart')
.http_method(HttpMethodEnum.PUT)
.header_param(Parameter()
.key('AccountName')
.value(account_name))
.template_param(Parameter()
.key('serviceId')
.value(service_id)
.should_encode(True))
.template_param(Parameter()
.key('claimId')
.value(claim_id)
.should_encode(True))
.header_param(Parameter()
.key('Content-Type')
.value('application/json'))
.body_param(Parameter()
.value(body))
.header_param(Parameter()
.key('correlationId')
.value(correlation_id))
.header_param(Parameter()
.key('accept')
.value('application/json'))
.body_serializer(APIHelper.json_serialize)
.auth(Single('global'))
).response(
ResponseHandler()
.deserializer(APIHelper.json_deserialize)
.deserialize_into(ServiceManagementResult.from_dictionary)
.local_error('400', 'Bad Request.', EdgeServiceOnboardingResultErrorException)
.local_error('401', 'Unauthorized.', EdgeServiceOnboardingResultErrorException)
.local_error('500', 'Internal Server Error.', EdgeServiceOnboardingResultErrorException)
.local_error('default', 'Unexpected error.', EdgeServiceOnboardingResultErrorException)
).execute()
def start_service_publishing(self,
account_name,
service_name,
version,
correlation_id=None):
"""Does a PUT request to /v1/services/{serviceName}/{version}/publish.
Start publishing a service. On successful completion, service's status
can be marked as Publish.
Args:
account_name (string): User account name.
service_name (string): Name of the service e.g. any sub string of
serviceName.
version (string): Version of service which is to be published.
correlation_id (string, optional): TODO: type description here.
Returns:
ServiceManagementResult: Response from the API. OK.
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
return super().new_api_call_builder.request(
RequestBuilder().server(Server.SERVICES)
.path('/v1/services/{serviceName}/{version}/publish')
.http_method(HttpMethodEnum.PUT)
.header_param(Parameter()
.key('AccountName')
.value(account_name))
.template_param(Parameter()
.key('serviceName')
.value(service_name)
.should_encode(True))
.template_param(Parameter()
.key('version')
.value(version)
.should_encode(True))
.header_param(Parameter()
.key('correlationId')
.value(correlation_id))
.header_param(Parameter()
.key('accept')
.value('application/json'))
.auth(Single('global'))
).response(
ResponseHandler()
.deserializer(APIHelper.json_deserialize)
.deserialize_into(ServiceManagementResult.from_dictionary)
.local_error('400', 'Bad Request.', EdgeServiceOnboardingResultErrorException)
.local_error('401', 'Unauthorized.', EdgeServiceOnboardingResultErrorException)
.local_error('500', 'Internal Server Error.', EdgeServiceOnboardingResultErrorException)
.local_error('default', 'Unexpected error.', EdgeServiceOnboardingResultErrorException)
).execute()
def mark_service_as_ready_for_public_use(self,
account_name,
service_name,
version,
correlation_id=None):
"""Does a PUT request to /v1/services/{serviceName}/{version}/readyToPublicUse.
Start the process to change a service's status to "Ready to Use". On
success, service's status will be changed to "Ready to Use". Only a
ready to use service can be deployed in production environment.
Args:
account_name (string): User account name.
service_name (string): Name of the service e.g. any sub string of
serviceName.
version (string): Version of the service which is already
certified and is ready for public use.
correlation_id (string, optional): TODO: type description here.
Returns:
ServiceManagementResult: Response from the API. OK.
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
return super().new_api_call_builder.request(
RequestBuilder().server(Server.SERVICES)
.path('/v1/services/{serviceName}/{version}/readyToPublicUse')
.http_method(HttpMethodEnum.PUT)
.header_param(Parameter()
.key('AccountName')
.value(account_name))
.template_param(Parameter()
.key('serviceName')
.value(service_name)
.should_encode(True))
.template_param(Parameter()
.key('version')
.value(version)
.should_encode(True))
.header_param(Parameter()
.key('correlationId')
.value(correlation_id))
.header_param(Parameter()
.key('accept')
.value('application/json'))
.auth(Single('global'))
).response(
ResponseHandler()
.deserializer(APIHelper.json_deserialize)
.deserialize_into(ServiceManagementResult.from_dictionary)
.local_error('400', 'Bad Request.', EdgeServiceOnboardingResultErrorException)
.local_error('401', 'Unauthorized.', EdgeServiceOnboardingResultErrorException)
.local_error('500', 'Internal Server Error.', EdgeServiceOnboardingResultErrorException)
.local_error('default', 'Unexpected error.', EdgeServiceOnboardingResultErrorException)
).execute()
def upload_service_workload_file(self,
account_name,
service_name,
version,
category_type,
category_name,
payload,
correlation_id=None,
category_version=None):
"""Does a POST request to /v1/files/{serviceName}/{version}/uploadAndValidate.
Upload workload payload/package in the MEC platform.
Args:
account_name (string): User account name.
service_name (string): Service name to which the file is going to
be associated.
version (string): Version of the service being used.
category_type (CategoryTypeEnum): Type of the file being
uploaded.
category_name (string): `workloadName` used in the service while
creation.
payload (typing.BinaryIO): Payload/file which is to be uploaded
should be provided in formData.
correlation_id (string, optional): TODO: type description here.
category_version (string, optional): It is mandatory for only
service file, not mandatory for workload and workflow file.
Returns:
ServiceFile: Response from the API. Upload success.
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
return super().new_api_call_builder.request(
RequestBuilder().server(Server.SERVICES)
.path('/v1/files/{serviceName}/{version}/uploadAndValidate')
.http_method(HttpMethodEnum.POST)
.header_param(Parameter()
.key('AccountName')
.value(account_name))
.template_param(Parameter()
.key('serviceName')
.value(service_name)
.should_encode(True))
.template_param(Parameter()
.key('version')
.value(version)
.should_encode(True))
.query_param(Parameter()
.key('categoryType')
.value(category_type))
.query_param(Parameter()
.key('categoryName')
.value(category_name))
.multipart_param(Parameter()
.key('payload')
.value(payload)
.default_content_type('application/octet-stream'))
.header_param(Parameter()
.key('correlationId')
.value(correlation_id))
.query_param(Parameter()
.key('categoryVersion')
.value(category_version))
.header_param(Parameter()
.key('accept')
.value('application/json'))
.auth(Single('global'))
).response(
ResponseHandler()
.deserializer(APIHelper.json_deserialize)
.deserialize_into(ServiceFile.from_dictionary)
.local_error('400', 'Bad Request.', EdgeServiceOnboardingResultErrorException)
.local_error('401', 'Unauthorized.', EdgeServiceOnboardingResultErrorException)
.local_error('404', 'Not found.', EdgeServiceOnboardingResultErrorException)
.local_error('500', 'Internal Server Error.', EdgeServiceOnboardingResultErrorException)
).execute()
def list_services(self,
account_name,
correlation_id=None,
name=None,
q=None,
limit=None,
off_set=None,
sort_key='createdDate',
sort_dir=None,
details_flag=True):
"""Does a GET request to /v1/services.
Fetch all organizational services in the platform.
Args:
account_name (string): User account name.
correlation_id (string, optional): TODO: type description here.
name (string, optional): Name of the service whose information
needs to be fetched.
q (string, optional): Use the comma (:) character to separate
multiple values eg
type=myService:workloads.packageType=Helm,YAML:state=DRAFTED,VA
LIDATION_COMPLETED.
limit (long|int, optional): Number of items to return.
off_set (long|int, optional): Id of the last respose value in the
previous list.
sort_key (string, optional): Sorts the response by an attribute.
Default is createdDate.
sort_dir (SortDirectionEnum, optional): Sorts the response. Use
asc for ascending or desc for descending order. The default is
desc.
details_flag (bool, optional): Default as true. If it is true,
then it will return all details.
Returns:
Services: Response from the API. OK.
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
return super().new_api_call_builder.request(
RequestBuilder().server(Server.SERVICES)
.path('/v1/services')
.http_method(HttpMethodEnum.GET)
.header_param(Parameter()
.key('AccountName')
.value(account_name))
.header_param(Parameter()
.key('correlationId')
.value(correlation_id))
.query_param(Parameter()
.key('name')
.value(name))
.query_param(Parameter()
.key('q')
.value(q))
.query_param(Parameter()
.key('limit')
.value(limit))
.query_param(Parameter()
.key('offSet')
.value(off_set))
.query_param(Parameter()
.key('sortKey')
.value(sort_key))
.query_param(Parameter()
.key('sortDir')
.value(sort_dir))
.query_param(Parameter()
.key('detailsFlag')
.value(details_flag))
.header_param(Parameter()
.key('accept')
.value('application/json'))
.auth(Single('global'))
).response(
ResponseHandler()
.deserializer(APIHelper.json_deserialize)
.deserialize_into(Services.from_dictionary)
.local_error('400', 'Bad Request.', EdgeServiceOnboardingResultErrorException)
.local_error('401', 'Unauthorized.', EdgeServiceOnboardingResultErrorException)
.local_error('404', 'Not Found.', EdgeServiceOnboardingResultErrorException)
.local_error('500', 'Internal Server Error.', EdgeServiceOnboardingResultErrorException)
).execute()
def list_service_details(self,
account_name,
service_name,
version,
correlation_id=None):
"""Does a GET request to /v1/services/{serviceName}/{version}.
Fetch a service details within user's organization using service name
and version.
Args:
account_name (string): User account name.
service_name (string): Name of the service whose information needs
to be fetched.
version (string): Version of service whose information needs to be
fetched.
correlation_id (string, optional): TODO: type description here.
Returns:
Service: Response from the API. OK.
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
return super().new_api_call_builder.request(
RequestBuilder().server(Server.SERVICES)
.path('/v1/services/{serviceName}/{version}')
.http_method(HttpMethodEnum.GET)
.header_param(Parameter()
.key('AccountName')
.value(account_name))
.template_param(Parameter()
.key('serviceName')
.value(service_name)
.should_encode(True))
.template_param(Parameter()
.key('version')
.value(version)
.should_encode(True))
.header_param(Parameter()
.key('correlationId')
.value(correlation_id))
.header_param(Parameter()
.key('accept')
.value('application/json'))
.auth(Single('global'))
).response(
ResponseHandler()
.deserializer(APIHelper.json_deserialize)
.deserialize_into(Service.from_dictionary)
.local_error('400', 'Bad Request.', EdgeServiceOnboardingResultErrorException)
.local_error('401', 'Unauthorized.', EdgeServiceOnboardingResultErrorException)
.local_error('404', 'Not Found.', EdgeServiceOnboardingResultErrorException)
.local_error('500', 'Internal Server Error.', EdgeServiceOnboardingResultErrorException)
.local_error('default', 'Unexpected error.', EdgeServiceOnboardingResultErrorException)
).execute()
def start_service_onboarding(self,
account_name,
service_name,
version,
correlation_id=None):
"""Does a PUT request to /v1/services/{serviceName}/{version}/startOnboarding.
Start service onboarding process to kick off service artifact
validation and making the service ready for sandbox testing. On
successful completion of this process system will generate claims for
each selected cloud provider using which user can start sandbox
testing.
Args:
account_name (string): User account name.
service_name (string): Name of the service which is to be
onboarded.
version (string): Version of service which is to be onboarded.
correlation_id (string, optional): TODO: type description here.
Returns:
ServiceManagementResult: Response from the API. OK.
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
return super().new_api_call_builder.request(
RequestBuilder().server(Server.SERVICES)
.path('/v1/services/{serviceName}/{version}/startOnboarding')
.http_method(HttpMethodEnum.PUT)
.header_param(Parameter()
.key('AccountName')
.value(account_name))
.template_param(Parameter()
.key('serviceName')
.value(service_name)
.should_encode(True))
.template_param(Parameter()
.key('version')
.value(version)
.should_encode(True))
.header_param(Parameter()
.key('correlationId')
.value(correlation_id))
.header_param(Parameter()
.key('accept')
.value('application/json'))
.auth(Single('global'))
).response(
ResponseHandler()
.deserializer(APIHelper.json_deserialize)
.deserialize_into(ServiceManagementResult.from_dictionary)
.local_error('400', 'Bad Request.', EdgeServiceOnboardingResultErrorException)
.local_error('401', 'Unauthorized.', EdgeServiceOnboardingResultErrorException)
.local_error('403', 'Forbidden.', EdgeServiceOnboardingResultErrorException)
.local_error('404', 'Not found.', EdgeServiceOnboardingResultErrorException)
.local_error('415', 'Unsupported media type.', EdgeServiceOnboardingResultErrorException)
.local_error('429', 'Too many requests.', EdgeServiceOnboardingResultErrorException)
.local_error('500', 'Internal Server Error.', EdgeServiceOnboardingResultErrorException)
).execute()
|
Apiamtic-python
|
/Apiamtic_python-1.6.9-py3-none-any.whl/verizon5gmecvnspapi/controllers/service_onboarding_controller.py
|
service_onboarding_controller.py
|
from verizon5gmecvnspapi.api_helper import APIHelper
from verizon5gmecvnspapi.configuration import Server
from verizon5gmecvnspapi.controllers.base_controller import BaseController
from apimatic_core.request_builder import RequestBuilder
from apimatic_core.response_handler import ResponseHandler
from apimatic_core.types.parameter import Parameter
from verizon5gmecvnspapi.http.http_method_enum import HttpMethodEnum
from apimatic_core.authentication.multiple.single_auth import Single
from apimatic_core.authentication.multiple.and_auth_group import And
from apimatic_core.authentication.multiple.or_auth_group import Or
from verizon5gmecvnspapi.models.repository import Repository
from verizon5gmecvnspapi.models.edge_service_onboarding_delete_result import EdgeServiceOnboardingDeleteResult
from verizon5gmecvnspapi.exceptions.edge_service_onboarding_result_error_exception import EdgeServiceOnboardingResultErrorException
class RepositoriesController(BaseController):
"""A Controller to access Endpoints in the verizon5gmecvnspapi API."""
def __init__(self, config):
super(RepositoriesController, self).__init__(config)
def create_repository(self,
account_name,
body,
correlation_id=None):
"""Does a POST request to /v1/config/repository.
Create a repository within user's organziation.
Args:
account_name (string): User account name.
body (Repository): TODO: type description here.
correlation_id (string, optional): TODO: type description here.
Returns:
Repository: Response from the API. Created.
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
return super().new_api_call_builder.request(
RequestBuilder().server(Server.SERVICES)
.path('/v1/config/repository')
.http_method(HttpMethodEnum.POST)
.header_param(Parameter()
.key('AccountName')
.value(account_name))
.header_param(Parameter()
.key('Content-Type')
.value('application/json'))
.body_param(Parameter()
.value(body))
.header_param(Parameter()
.key('correlationId')
.value(correlation_id))
.header_param(Parameter()
.key('accept')
.value('application/json'))
.body_serializer(APIHelper.json_serialize)
.auth(Single('global'))
).response(
ResponseHandler()
.deserializer(APIHelper.json_deserialize)
.deserialize_into(Repository.from_dictionary)
.local_error('400', 'Bad Request.', EdgeServiceOnboardingResultErrorException)
.local_error('401', 'Unauthorized.', EdgeServiceOnboardingResultErrorException)
.local_error('500', 'Internal Server Error.', EdgeServiceOnboardingResultErrorException)
).execute()
def list_repositories(self,
account_name,
correlation_id=None,
mtype=None):
"""Does a GET request to /v1/config/repository.
Get all repositories in the platform.
Args:
account_name (string): User account name.
correlation_id (string, optional): TODO: type description here.
mtype (string, optional): Repository type.
Returns:
list of Repository: Response from the API. OK.
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
return super().new_api_call_builder.request(
RequestBuilder().server(Server.SERVICES)
.path('/v1/config/repository')
.http_method(HttpMethodEnum.GET)
.header_param(Parameter()
.key('AccountName')
.value(account_name))
.header_param(Parameter()
.key('correlationId')
.value(correlation_id))
.query_param(Parameter()
.key('type')
.value(mtype))
.header_param(Parameter()
.key('accept')
.value('application/json'))
.auth(Single('global'))
).response(
ResponseHandler()
.deserializer(APIHelper.json_deserialize)
.deserialize_into(Repository.from_dictionary)
.local_error('401', 'Unauthorized.', EdgeServiceOnboardingResultErrorException)
.local_error('404', 'Not found.', EdgeServiceOnboardingResultErrorException)
.local_error('500', 'Internal Server Error.', EdgeServiceOnboardingResultErrorException)
).execute()
def delete_repository(self,
account_name,
repository_name,
correlation_id=None):
"""Does a DELETE request to /v1/config/repository/{repositoryName}.
Delete the repository.
Args:
account_name (string): User account name.
repository_name (string): Name of the repository which is about to
be deleted.
correlation_id (string, optional): TODO: type description here.
Returns:
EdgeServiceOnboardingDeleteResult: Response from the API. OK.
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
return super().new_api_call_builder.request(
RequestBuilder().server(Server.SERVICES)
.path('/v1/config/repository/{repositoryName}')
.http_method(HttpMethodEnum.DELETE)
.header_param(Parameter()
.key('AccountName')
.value(account_name))
.template_param(Parameter()
.key('repositoryName')
.value(repository_name)
.should_encode(True))
.header_param(Parameter()
.key('correlationId')
.value(correlation_id))
.header_param(Parameter()
.key('accept')
.value('application/json'))
.auth(Single('global'))
).response(
ResponseHandler()
.deserializer(APIHelper.json_deserialize)
.deserialize_into(EdgeServiceOnboardingDeleteResult.from_dictionary)
.local_error('400', 'Bad Request.', EdgeServiceOnboardingResultErrorException)
.local_error('401', 'Unauthorized.', EdgeServiceOnboardingResultErrorException)
.local_error('404', 'Not found.', EdgeServiceOnboardingResultErrorException)
.local_error('500', 'Internal Server Error.', EdgeServiceOnboardingResultErrorException)
).execute()
|
Apiamtic-python
|
/Apiamtic_python-1.6.9-py3-none-any.whl/verizon5gmecvnspapi/controllers/repositories_controller.py
|
repositories_controller.py
|
from verizon5gmecvnspapi.api_helper import APIHelper
from verizon5gmecvnspapi.configuration import Server
from verizon5gmecvnspapi.controllers.base_controller import BaseController
from apimatic_core.request_builder import RequestBuilder
from apimatic_core.response_handler import ResponseHandler
from apimatic_core.types.parameter import Parameter
from verizon5gmecvnspapi.http.http_method_enum import HttpMethodEnum
from apimatic_core.authentication.multiple.single_auth import Single
from apimatic_core.authentication.multiple.and_auth_group import And
from apimatic_core.authentication.multiple.or_auth_group import Or
from verizon5gmecvnspapi.models.category import Category
from verizon5gmecvnspapi.models.tag import Tag
from verizon5gmecvnspapi.exceptions.edge_service_onboarding_result_error_exception import EdgeServiceOnboardingResultErrorException
class ServiceMetadataController(BaseController):
"""A Controller to access Endpoints in the verizon5gmecvnspapi API."""
def __init__(self, config):
super(ServiceMetadataController, self).__init__(config)
def create_service_category(self,
account_name,
body,
correlation_id=None):
"""Does a POST request to /v1/category.
Create a new category within user's organization.
Args:
account_name (string): User account name.
body (list of Category): TODO: type description here.
correlation_id (string, optional): TODO: type description here.
Returns:
list of Category: Response from the API. Created.
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
return super().new_api_call_builder.request(
RequestBuilder().server(Server.SERVICES)
.path('/v1/category')
.http_method(HttpMethodEnum.POST)
.header_param(Parameter()
.key('AccountName')
.value(account_name))
.header_param(Parameter()
.key('Content-Type')
.value('application/json'))
.body_param(Parameter()
.value(body))
.header_param(Parameter()
.key('correlationId')
.value(correlation_id))
.header_param(Parameter()
.key('accept')
.value('application/json'))
.body_serializer(APIHelper.json_serialize)
.auth(Single('global'))
).response(
ResponseHandler()
.deserializer(APIHelper.json_deserialize)
.deserialize_into(Category.from_dictionary)
.local_error('400', 'Bad Request.', EdgeServiceOnboardingResultErrorException)
.local_error('401', 'Unauthorized.', EdgeServiceOnboardingResultErrorException)
.local_error('404', 'Not found.', EdgeServiceOnboardingResultErrorException)
.local_error('500', 'Internal Server Error.', EdgeServiceOnboardingResultErrorException)
).execute()
def create_service_tag(self,
account_name,
body,
correlation_id=None):
"""Does a POST request to /v1/tag/.
Create a new Tag within user's organization.
Args:
account_name (string): User account name.
body (list of Tag): TODO: type description here.
correlation_id (string, optional): TODO: type description here.
Returns:
list of Tag: Response from the API. Created.
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
return super().new_api_call_builder.request(
RequestBuilder().server(Server.SERVICES)
.path('/v1/tag/')
.http_method(HttpMethodEnum.POST)
.header_param(Parameter()
.key('AccountName')
.value(account_name))
.header_param(Parameter()
.key('Content-Type')
.value('application/json'))
.body_param(Parameter()
.value(body))
.header_param(Parameter()
.key('correlationId')
.value(correlation_id))
.header_param(Parameter()
.key('accept')
.value('application/json'))
.body_serializer(APIHelper.json_serialize)
.auth(Single('global'))
).response(
ResponseHandler()
.deserializer(APIHelper.json_deserialize)
.deserialize_into(Tag.from_dictionary)
.local_error('401', 'Unauthorized.', EdgeServiceOnboardingResultErrorException)
.local_error('404', 'Not found.', EdgeServiceOnboardingResultErrorException)
.local_error('415', 'Unsupported media type.', EdgeServiceOnboardingResultErrorException)
.local_error('500', 'Internal Server Error.', EdgeServiceOnboardingResultErrorException)
).execute()
|
Apiamtic-python
|
/Apiamtic_python-1.6.9-py3-none-any.whl/verizon5gmecvnspapi/controllers/service_metadata_controller.py
|
service_metadata_controller.py
|
from verizon5gmecvnspapi.api_helper import APIHelper
from verizon5gmecvnspapi.configuration import Server
from verizon5gmecvnspapi.controllers.base_controller import BaseController
from apimatic_core.request_builder import RequestBuilder
from apimatic_core.response_handler import ResponseHandler
from apimatic_core.types.parameter import Parameter
from verizon5gmecvnspapi.http.http_method_enum import HttpMethodEnum
from verizon5gmecvnspapi.models.o_auth_token import OAuthToken
from verizon5gmecvnspapi.exceptions.o_auth_provider_exception import OAuthProviderException
class OAuthAuthorizationController(BaseController):
"""A Controller to access Endpoints in the verizon5gmecvnspapi API."""
def __init__(self, config):
super(OAuthAuthorizationController, self).__init__(config)
def request_token(self,
authorization,
scope=None,
_optional_form_parameters=None):
"""Does a POST request to /oauth2/token.
Create a new OAuth 2 token.
Args:
authorization (string): Authorization header in Basic auth format
scope (string, optional): Requested scopes as a space-delimited
list.
_optional_form_parameters (Array, optional): Additional optional
form parameters are supported by this endpoint
Returns:
OAuthToken: Response from the API.
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
return super().new_api_call_builder.request(
RequestBuilder().server(Server.OAUTH_SERVER)
.path('/oauth2/token')
.http_method(HttpMethodEnum.POST)
.form_param(Parameter()
.key('grant_type')
.value('client_credentials'))
.header_param(Parameter()
.key('Authorization')
.value(authorization))
.form_param(Parameter()
.key('scope')
.value(scope))
.header_param(Parameter()
.key('content-type')
.value('application/x-www-form-urlencoded'))
.header_param(Parameter()
.key('accept')
.value('application/json'))
.additional_form_params(_optional_form_parameters)
).response(
ResponseHandler()
.deserializer(APIHelper.json_deserialize)
.deserialize_into(OAuthToken.from_dictionary)
.local_error('400', 'OAuth 2 provider returned an error.', OAuthProviderException)
.local_error('401', 'OAuth 2 provider says client authentication failed.', OAuthProviderException)
).execute()
|
Apiamtic-python
|
/Apiamtic_python-1.6.9-py3-none-any.whl/verizon5gmecvnspapi/controllers/o_auth_authorization_controller.py
|
o_auth_authorization_controller.py
|
from verizon5gmecvnspapi.api_helper import APIHelper
from verizon5gmecvnspapi.configuration import Server
from verizon5gmecvnspapi.controllers.base_controller import BaseController
from apimatic_core.request_builder import RequestBuilder
from apimatic_core.response_handler import ResponseHandler
from apimatic_core.types.parameter import Parameter
from verizon5gmecvnspapi.http.http_method_enum import HttpMethodEnum
from apimatic_core.authentication.multiple.single_auth import Single
from apimatic_core.authentication.multiple.and_auth_group import And
from apimatic_core.authentication.multiple.or_auth_group import Or
from verizon5gmecvnspapi.models.edge_service_onboarding_delete_result import EdgeServiceOnboardingDeleteResult
from verizon5gmecvnspapi.models.csp_profile import CSPProfile
from verizon5gmecvnspapi.models.csp_profile_data import CSPProfileData
from verizon5gmecvnspapi.exceptions.edge_service_onboarding_result_error_exception import EdgeServiceOnboardingResultErrorException
class CSPProfilesController(BaseController):
"""A Controller to access Endpoints in the verizon5gmecvnspapi API."""
def __init__(self, config):
super(CSPProfilesController, self).__init__(config)
def remove_cloud_credential(self,
account_name,
id,
correlation_id=None):
"""Does a DELETE request to /v1/cspProfiles/{id}.
Remove a cloud credential from user's organization.
Args:
account_name (string): User account name.
id (string): CSP Profile Id.
correlation_id (string, optional): TODO: type description here.
Returns:
EdgeServiceOnboardingDeleteResult: Response from the API. OK.
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
return super().new_api_call_builder.request(
RequestBuilder().server(Server.SERVICES)
.path('/v1/cspProfiles/{id}')
.http_method(HttpMethodEnum.DELETE)
.header_param(Parameter()
.key('AccountName')
.value(account_name))
.template_param(Parameter()
.key('id')
.value(id)
.should_encode(True))
.header_param(Parameter()
.key('correlationId')
.value(correlation_id))
.header_param(Parameter()
.key('accept')
.value('application/json'))
.auth(Single('global'))
).response(
ResponseHandler()
.deserializer(APIHelper.json_deserialize)
.deserialize_into(EdgeServiceOnboardingDeleteResult.from_dictionary)
.local_error('401', 'Unauthorized.', EdgeServiceOnboardingResultErrorException)
.local_error('404', 'Not Found.', EdgeServiceOnboardingResultErrorException)
.local_error('500', 'Internal Server Error.', EdgeServiceOnboardingResultErrorException)
).execute()
def create_cloud_credential(self,
account_name,
body,
correlation_id=None):
"""Does a POST request to /v1/cspProfiles/.
Create a new cloud credential within user's organization.
Args:
account_name (string): User account name.
body (CSPProfile): TODO: type description here.
correlation_id (string, optional): TODO: type description here.
Returns:
CSPProfile: Response from the API. Created.
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
return super().new_api_call_builder.request(
RequestBuilder().server(Server.SERVICES)
.path('/v1/cspProfiles/')
.http_method(HttpMethodEnum.POST)
.header_param(Parameter()
.key('AccountName')
.value(account_name))
.header_param(Parameter()
.key('Content-Type')
.value('application/json'))
.body_param(Parameter()
.value(body))
.header_param(Parameter()
.key('correlationId')
.value(correlation_id))
.header_param(Parameter()
.key('accept')
.value('application/json'))
.body_serializer(APIHelper.json_serialize)
.auth(Single('global'))
).response(
ResponseHandler()
.deserializer(APIHelper.json_deserialize)
.deserialize_into(CSPProfile.from_dictionary)
.local_error('400', 'Bad Request.', EdgeServiceOnboardingResultErrorException)
.local_error('401', 'Unauthorized.', EdgeServiceOnboardingResultErrorException)
.local_error('403', 'Forbidden.', EdgeServiceOnboardingResultErrorException)
.local_error('429', 'Too many requests.', EdgeServiceOnboardingResultErrorException)
.local_error('500', 'Internal Server Error.', EdgeServiceOnboardingResultErrorException)
.local_error('default', 'Forbidden.', EdgeServiceOnboardingResultErrorException)
).execute()
def fetch_cloud_credential_details(self,
account_name,
correlation_id=None,
q=None,
limit=None,
off_set=None):
"""Does a GET request to /v1/cspProfiles/.
Fetch available cloud credentials within user's organization.
Args:
account_name (string): User account name.
correlation_id (string, optional): TODO: type description here.
q (string, optional): Use the coloumn (:) character to separate
multiple query params eg
type=AWS:awsCspProfile.credType=ACCESS_KEY,ROLE_ARN:state=UNVER
IFIED,VERIFIED.
limit (long|int, optional): Number of items to return.
off_set (long|int, optional): Id of the last respose value in the
previous list.
Returns:
CSPProfileData: Response from the API. OK.
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
return super().new_api_call_builder.request(
RequestBuilder().server(Server.SERVICES)
.path('/v1/cspProfiles/')
.http_method(HttpMethodEnum.GET)
.header_param(Parameter()
.key('AccountName')
.value(account_name))
.header_param(Parameter()
.key('correlationId')
.value(correlation_id))
.query_param(Parameter()
.key('q')
.value(q))
.query_param(Parameter()
.key('limit')
.value(limit))
.query_param(Parameter()
.key('offSet')
.value(off_set))
.header_param(Parameter()
.key('accept')
.value('application/json'))
.auth(Single('global'))
).response(
ResponseHandler()
.deserializer(APIHelper.json_deserialize)
.deserialize_into(CSPProfileData.from_dictionary)
.local_error('401', 'Unauthorized.', EdgeServiceOnboardingResultErrorException)
.local_error('403', 'Forbidden.', EdgeServiceOnboardingResultErrorException)
.local_error('404', 'Not found.', EdgeServiceOnboardingResultErrorException)
.local_error('429', 'Too many requests.', EdgeServiceOnboardingResultErrorException)
.local_error('500', 'Internal Server Error.', EdgeServiceOnboardingResultErrorException)
.local_error('default', 'Forbidden.', EdgeServiceOnboardingResultErrorException)
).execute()
|
Apiamtic-python
|
/Apiamtic_python-1.6.9-py3-none-any.whl/verizon5gmecvnspapi/controllers/csp_profiles_controller.py
|
csp_profiles_controller.py
|
from apimatic_core.configurations.global_configuration import GlobalConfiguration
from apimatic_core.decorators.lazy_property import LazyProperty
from swaggerpetstore.configuration import Configuration
from swaggerpetstore.controllers.base_controller import BaseController
from swaggerpetstore.configuration import Environment
from swaggerpetstore.http.auth.custom_authentication import CustomAuthentication
from swaggerpetstore.controllers.pet_controller import PetController
from swaggerpetstore.controllers.store_controller import StoreController
from swaggerpetstore.controllers.user_controller import UserController
class SwaggerpetstoreClient(object):
@LazyProperty
def pet(self):
return PetController(self.global_configuration)
@LazyProperty
def store(self):
return StoreController(self.global_configuration)
@LazyProperty
def user(self):
return UserController(self.global_configuration)
def __init__(self, http_client_instance=None,
override_http_client_configuration=False, http_call_back=None,
timeout=60, max_retries=0, backoff_factor=2,
retry_statuses=[408, 413, 429, 500, 502, 503, 504, 521, 522, 524],
retry_methods=['GET', 'PUT'],
environment=Environment.PRODUCTION, password='TODO: Replace',
config=None):
if config is None:
self.config = Configuration(
http_client_instance=http_client_instance,
override_http_client_configuration=override_http_client_configuration,
http_call_back=http_call_back,
timeout=timeout,
max_retries=max_retries,
backoff_factor=backoff_factor,
retry_statuses=retry_statuses,
retry_methods=retry_methods,
environment=environment,
password=password)
else:
self.config = config
self.global_configuration = GlobalConfiguration(self.config)\
.global_errors(BaseController.global_errors())\
.base_uri_executor(self.config.get_base_uri)\
.user_agent(BaseController.user_agent(), BaseController.user_agent_parameters())
self.initialize_auth_managers(self.global_configuration)
self.global_configuration = self.global_configuration.auth_managers(self.auth_managers)
def initialize_auth_managers(self, global_config):
http_client_config = global_config.get_http_client_configuration()
self.auth_managers = { key: None for key in ['global']}
self.auth_managers['global'] = CustomAuthentication(http_client_config.password, http_client_config)
return self.auth_managers
|
ApiamticPackage5
|
/ApiamticPackage5-1.5.6-py3-none-any.whl/swaggerpetstore/swaggerpetstore_client.py
|
swaggerpetstore_client.py
|
from enum import Enum
from apimatic_core.http.configurations.http_client_configuration import HttpClientConfiguration
from apimatic_requests_client_adapter.requests_client import RequestsClient
class Environment(Enum):
"""An enum for SDK environments"""
PRODUCTION = 0
class Server(Enum):
"""An enum for API servers"""
SERVER1 = 0
SERVER2 = 1
AUTH_SERVER = 2
class Configuration(HttpClientConfiguration):
"""A class used for configuring the SDK by a user.
"""
@property
def environment(self):
return self._environment
@property
def password(self):
return self._password
def __init__(
self, http_client_instance=None,
override_http_client_configuration=False, http_call_back=None,
timeout=60, max_retries=0, backoff_factor=2,
retry_statuses=[408, 413, 429, 500, 502, 503, 504, 521, 522, 524],
retry_methods=['GET', 'PUT'], environment=Environment.PRODUCTION,
password='TODO: Replace'
):
super().__init__(http_client_instance, override_http_client_configuration, http_call_back, timeout, max_retries,
backoff_factor, retry_statuses, retry_methods)
# Current API environment
self._environment = environment
# TODO: Replace
self._password = password
# The Http Client to use for making requests.
super().set_http_client(self.create_http_client())
def clone_with(self, http_client_instance=None,
override_http_client_configuration=None, http_call_back=None,
timeout=None, max_retries=None, backoff_factor=None,
retry_statuses=None, retry_methods=None, environment=None,
password=None):
http_client_instance = http_client_instance or super().http_client_instance
override_http_client_configuration = override_http_client_configuration or super().override_http_client_configuration
http_call_back = http_call_back or super().http_callback
timeout = timeout or super().timeout
max_retries = max_retries or super().max_retries
backoff_factor = backoff_factor or super().backoff_factor
retry_statuses = retry_statuses or super().retry_statuses
retry_methods = retry_methods or super().retry_methods
environment = environment or self.environment
password = password or self.password
return Configuration(
http_client_instance=http_client_instance,
override_http_client_configuration=override_http_client_configuration,
http_call_back=http_call_back, timeout=timeout,
max_retries=max_retries, backoff_factor=backoff_factor,
retry_statuses=retry_statuses, retry_methods=retry_methods,
environment=environment, password=password
)
def create_http_client(self):
return RequestsClient(
timeout=super().timeout, max_retries=super().max_retries,
backoff_factor=super().backoff_factor, retry_statuses=super().retry_statuses,
retry_methods=super().retry_methods,
http_client_instance=super().http_client_instance,
override_http_client_configuration=super().override_http_client_configuration,
response_factory=super().http_response_factory
)
# All the environments the SDK can run in
environments = {
Environment.PRODUCTION: {
Server.SERVER1: 'https://petstore.swagger.io/v2',
Server.SERVER2: 'http://petstore.swagger.io/v2',
Server.AUTH_SERVER: 'https://petstore.swagger.io/oauth'
}
}
def get_base_uri(self, server=Server.SERVER1):
"""Generates the appropriate base URI for the environment and the
server.
Args:
server (Configuration.Server): The server enum for which the base
URI is required.
Returns:
String: The base URI.
"""
return self.environments[self.environment][server]
|
ApiamticPackage5
|
/ApiamticPackage5-1.5.6-py3-none-any.whl/swaggerpetstore/configuration.py
|
configuration.py
|
from swaggerpetstore.api_helper import APIHelper
from swaggerpetstore.models.category import Category
from swaggerpetstore.models.tag import Tag
class Pet(object):
"""Implementation of the 'Pet' model.
TODO: type model description here.
Attributes:
id (long|int): TODO: type description here.
category (Category): TODO: type description here.
name (string): TODO: type description here.
photo_urls (list of string): TODO: type description here.
tags (list of Tag): TODO: type description here.
status (StatusEnum): pet status in the store
"""
# Create a mapping from Model property names to API property names
_names = {
"name": 'name',
"photo_urls": 'photoUrls',
"id": 'id',
"category": 'category',
"tags": 'tags',
"status": 'status'
}
_optionals = [
'id',
'category',
'tags',
'status',
]
def __init__(self,
name=None,
photo_urls=None,
id=APIHelper.SKIP,
category=APIHelper.SKIP,
tags=APIHelper.SKIP,
status=APIHelper.SKIP):
"""Constructor for the Pet class"""
# Initialize members of the class
if id is not APIHelper.SKIP:
self.id = id
if category is not APIHelper.SKIP:
self.category = category
self.name = name
self.photo_urls = photo_urls
if tags is not APIHelper.SKIP:
self.tags = tags
if status is not APIHelper.SKIP:
self.status = status
@classmethod
def from_dictionary(cls,
dictionary):
"""Creates an instance of this model from a dictionary
Args:
dictionary (dictionary): A dictionary representation of the object
as obtained from the deserialization of the server's response. The
keys MUST match property names in the API description.
Returns:
object: An instance of this structure class.
"""
if dictionary is None:
return None
# Extract variables from the dictionary
name = dictionary.get("name") if dictionary.get("name") else None
photo_urls = dictionary.get("photoUrls") if dictionary.get("photoUrls") else None
id = dictionary.get("id") if dictionary.get("id") else APIHelper.SKIP
category = Category.from_dictionary(dictionary.get('category')) if 'category' in dictionary.keys() else APIHelper.SKIP
tags = None
if dictionary.get('tags') is not None:
tags = [Tag.from_dictionary(x) for x in dictionary.get('tags')]
else:
tags = APIHelper.SKIP
status = dictionary.get("status") if dictionary.get("status") else APIHelper.SKIP
# Return an object of this model
return cls(name,
photo_urls,
id,
category,
tags,
status)
|
ApiamticPackage5
|
/ApiamticPackage5-1.5.6-py3-none-any.whl/swaggerpetstore/models/pet.py
|
pet.py
|
from swaggerpetstore.api_helper import APIHelper
class ApiResponse(object):
"""Implementation of the 'ApiResponse' model.
TODO: type model description here.
Attributes:
code (int): TODO: type description here.
mtype (string): TODO: type description here.
message (string): TODO: type description here.
"""
# Create a mapping from Model property names to API property names
_names = {
"code": 'code',
"mtype": 'type',
"message": 'message'
}
_optionals = [
'code',
'mtype',
'message',
]
def __init__(self,
code=APIHelper.SKIP,
mtype=APIHelper.SKIP,
message=APIHelper.SKIP):
"""Constructor for the ApiResponse class"""
# Initialize members of the class
if code is not APIHelper.SKIP:
self.code = code
if mtype is not APIHelper.SKIP:
self.mtype = mtype
if message is not APIHelper.SKIP:
self.message = message
@classmethod
def from_dictionary(cls,
dictionary):
"""Creates an instance of this model from a dictionary
Args:
dictionary (dictionary): A dictionary representation of the object
as obtained from the deserialization of the server's response. The
keys MUST match property names in the API description.
Returns:
object: An instance of this structure class.
"""
if dictionary is None:
return None
# Extract variables from the dictionary
code = dictionary.get("code") if dictionary.get("code") else APIHelper.SKIP
mtype = dictionary.get("type") if dictionary.get("type") else APIHelper.SKIP
message = dictionary.get("message") if dictionary.get("message") else APIHelper.SKIP
# Return an object of this model
return cls(code,
mtype,
message)
|
ApiamticPackage5
|
/ApiamticPackage5-1.5.6-py3-none-any.whl/swaggerpetstore/models/api_response.py
|
api_response.py
|
from swaggerpetstore.api_helper import APIHelper
class User(object):
"""Implementation of the 'User' model.
TODO: type model description here.
Attributes:
id (long|int): TODO: type description here.
username (string): TODO: type description here.
first_name (string): TODO: type description here.
last_name (string): TODO: type description here.
email (string): TODO: type description here.
password (string): TODO: type description here.
phone (string): TODO: type description here.
user_status (int): User Status
"""
# Create a mapping from Model property names to API property names
_names = {
"id": 'id',
"username": 'username',
"first_name": 'firstName',
"last_name": 'lastName',
"email": 'email',
"password": 'password',
"phone": 'phone',
"user_status": 'userStatus'
}
_optionals = [
'id',
'username',
'first_name',
'last_name',
'email',
'password',
'phone',
'user_status',
]
def __init__(self,
id=APIHelper.SKIP,
username=APIHelper.SKIP,
first_name=APIHelper.SKIP,
last_name=APIHelper.SKIP,
email=APIHelper.SKIP,
password=APIHelper.SKIP,
phone=APIHelper.SKIP,
user_status=APIHelper.SKIP):
"""Constructor for the User class"""
# Initialize members of the class
if id is not APIHelper.SKIP:
self.id = id
if username is not APIHelper.SKIP:
self.username = username
if first_name is not APIHelper.SKIP:
self.first_name = first_name
if last_name is not APIHelper.SKIP:
self.last_name = last_name
if email is not APIHelper.SKIP:
self.email = email
if password is not APIHelper.SKIP:
self.password = password
if phone is not APIHelper.SKIP:
self.phone = phone
if user_status is not APIHelper.SKIP:
self.user_status = user_status
@classmethod
def from_dictionary(cls,
dictionary):
"""Creates an instance of this model from a dictionary
Args:
dictionary (dictionary): A dictionary representation of the object
as obtained from the deserialization of the server's response. The
keys MUST match property names in the API description.
Returns:
object: An instance of this structure class.
"""
if dictionary is None:
return None
# Extract variables from the dictionary
id = dictionary.get("id") if dictionary.get("id") else APIHelper.SKIP
username = dictionary.get("username") if dictionary.get("username") else APIHelper.SKIP
first_name = dictionary.get("firstName") if dictionary.get("firstName") else APIHelper.SKIP
last_name = dictionary.get("lastName") if dictionary.get("lastName") else APIHelper.SKIP
email = dictionary.get("email") if dictionary.get("email") else APIHelper.SKIP
password = dictionary.get("password") if dictionary.get("password") else APIHelper.SKIP
phone = dictionary.get("phone") if dictionary.get("phone") else APIHelper.SKIP
user_status = dictionary.get("userStatus") if dictionary.get("userStatus") else APIHelper.SKIP
# Return an object of this model
return cls(id,
username,
first_name,
last_name,
email,
password,
phone,
user_status)
|
ApiamticPackage5
|
/ApiamticPackage5-1.5.6-py3-none-any.whl/swaggerpetstore/models/user.py
|
user.py
|
from swaggerpetstore.api_helper import APIHelper
class Order(object):
"""Implementation of the 'Order' model.
TODO: type model description here.
Attributes:
id (long|int): TODO: type description here.
pet_id (long|int): TODO: type description here.
quantity (int): TODO: type description here.
ship_date (datetime): TODO: type description here.
status (Status1Enum): Order Status
complete (bool): TODO: type description here.
"""
# Create a mapping from Model property names to API property names
_names = {
"id": 'id',
"pet_id": 'petId',
"quantity": 'quantity',
"ship_date": 'shipDate',
"status": 'status',
"complete": 'complete'
}
_optionals = [
'id',
'pet_id',
'quantity',
'ship_date',
'status',
'complete',
]
def __init__(self,
id=APIHelper.SKIP,
pet_id=APIHelper.SKIP,
quantity=APIHelper.SKIP,
ship_date=APIHelper.SKIP,
status=APIHelper.SKIP,
complete=APIHelper.SKIP):
"""Constructor for the Order class"""
# Initialize members of the class
if id is not APIHelper.SKIP:
self.id = id
if pet_id is not APIHelper.SKIP:
self.pet_id = pet_id
if quantity is not APIHelper.SKIP:
self.quantity = quantity
if ship_date is not APIHelper.SKIP:
self.ship_date = APIHelper.RFC3339DateTime(ship_date) if ship_date else None
if status is not APIHelper.SKIP:
self.status = status
if complete is not APIHelper.SKIP:
self.complete = complete
@classmethod
def from_dictionary(cls,
dictionary):
"""Creates an instance of this model from a dictionary
Args:
dictionary (dictionary): A dictionary representation of the object
as obtained from the deserialization of the server's response. The
keys MUST match property names in the API description.
Returns:
object: An instance of this structure class.
"""
if dictionary is None:
return None
# Extract variables from the dictionary
id = dictionary.get("id") if dictionary.get("id") else APIHelper.SKIP
pet_id = dictionary.get("petId") if dictionary.get("petId") else APIHelper.SKIP
quantity = dictionary.get("quantity") if dictionary.get("quantity") else APIHelper.SKIP
ship_date = APIHelper.RFC3339DateTime.from_value(dictionary.get("shipDate")).datetime if dictionary.get("shipDate") else APIHelper.SKIP
status = dictionary.get("status") if dictionary.get("status") else APIHelper.SKIP
complete = dictionary.get("complete") if "complete" in dictionary.keys() else APIHelper.SKIP
# Return an object of this model
return cls(id,
pet_id,
quantity,
ship_date,
status,
complete)
|
ApiamticPackage5
|
/ApiamticPackage5-1.5.6-py3-none-any.whl/swaggerpetstore/models/order.py
|
order.py
|
from swaggerpetstore.api_helper import APIHelper
from swaggerpetstore.configuration import Server
from swaggerpetstore.controllers.base_controller import BaseController
from apimatic_core.request_builder import RequestBuilder
from apimatic_core.response_handler import ResponseHandler
from apimatic_core.types.parameter import Parameter
from swaggerpetstore.http.http_method_enum import HttpMethodEnum
from apimatic_core.authentication.multiple.single_auth import Single
from apimatic_core.authentication.multiple.and_auth_group import And
from apimatic_core.authentication.multiple.or_auth_group import Or
from swaggerpetstore.models.order import Order
from swaggerpetstore.exceptions.api_exception import APIException
class StoreController(BaseController):
"""A Controller to access Endpoints in the swaggerpetstore API."""
def __init__(self, config):
super(StoreController, self).__init__(config)
def place_order(self,
body):
"""Does a POST request to /store/order.
Place an order for a pet
Args:
body (Order): order placed for purchasing the pet
Returns:
Order: Response from the API. successful operation
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
return super().new_api_call_builder.request(
RequestBuilder().server(Server.SERVER1)
.path('/store/order')
.http_method(HttpMethodEnum.POST)
.body_param(Parameter()
.value(body))
.header_param(Parameter()
.key('Content-Type')
.value('application/json'))
.header_param(Parameter()
.key('accept')
.value('application/json'))
.body_serializer(APIHelper.json_serialize)
.auth(Single('global'))
).response(
ResponseHandler()
.deserializer(APIHelper.json_deserialize)
.deserialize_into(Order.from_dictionary)
.local_error('400', 'Invalid Order', APIException)
).execute()
def get_inventory(self):
"""Does a GET request to /store/inventory.
Returns a map of status codes to quantities
Returns:
dict: Response from the API. successful operation
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
return super().new_api_call_builder.request(
RequestBuilder().server(Server.SERVER1)
.path('/store/inventory')
.http_method(HttpMethodEnum.GET)
.auth(Single('global'))
).response(
ResponseHandler()
.deserializer(APIHelper.json_deserialize)
).execute()
def get_order_by_id(self,
order_id):
"""Does a GET request to /store/order/{orderId}.
For valid response try integer IDs with value >= 1 and <= 10. Other
values will generated exceptions
Args:
order_id (long|int): ID of pet that needs to be fetched
Returns:
Order: Response from the API. successful operation
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
return super().new_api_call_builder.request(
RequestBuilder().server(Server.SERVER1)
.path('/store/order/{orderId}')
.http_method(HttpMethodEnum.GET)
.template_param(Parameter()
.key('orderId')
.value(order_id)
.should_encode(True))
.header_param(Parameter()
.key('accept')
.value('application/json'))
.auth(Single('global'))
).response(
ResponseHandler()
.deserializer(APIHelper.json_deserialize)
.deserialize_into(Order.from_dictionary)
.local_error('400', 'Invalid ID supplied', APIException)
.local_error('404', 'Order not found', APIException)
).execute()
def delete_order(self,
order_id):
"""Does a DELETE request to /store/order/{orderId}.
For valid response try integer IDs with positive integer value.
Negative or non-integer values will generate API errors
Args:
order_id (long|int): ID of the order that needs to be deleted
Returns:
void: Response from the API.
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
return super().new_api_call_builder.request(
RequestBuilder().server(Server.SERVER1)
.path('/store/order/{orderId}')
.http_method(HttpMethodEnum.DELETE)
.template_param(Parameter()
.key('orderId')
.value(order_id)
.should_encode(True))
.auth(Single('global'))
).execute()
|
ApiamticPackage5
|
/ApiamticPackage5-1.5.6-py3-none-any.whl/swaggerpetstore/controllers/store_controller.py
|
store_controller.py
|
from swaggerpetstore.api_helper import APIHelper
from swaggerpetstore.configuration import Server
from swaggerpetstore.controllers.base_controller import BaseController
from apimatic_core.request_builder import RequestBuilder
from apimatic_core.response_handler import ResponseHandler
from apimatic_core.types.parameter import Parameter
from swaggerpetstore.http.http_method_enum import HttpMethodEnum
from apimatic_core.authentication.multiple.single_auth import Single
from apimatic_core.authentication.multiple.and_auth_group import And
from apimatic_core.authentication.multiple.or_auth_group import Or
from swaggerpetstore.models.user import User
from swaggerpetstore.exceptions.api_exception import APIException
class UserController(BaseController):
"""A Controller to access Endpoints in the swaggerpetstore API."""
def __init__(self, config):
super(UserController, self).__init__(config)
def create_users_with_array_input(self,
body):
"""Does a POST request to /user/createWithArray.
Creates list of users with given input array
Args:
body (list of User): List of user object
Returns:
void: Response from the API.
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
return super().new_api_call_builder.request(
RequestBuilder().server(Server.SERVER1)
.path('/user/createWithArray')
.http_method(HttpMethodEnum.POST)
.body_param(Parameter()
.value(body))
.header_param(Parameter()
.key('Content-Type')
.value('application/json'))
.body_serializer(APIHelper.json_serialize)
.auth(Single('global'))
).execute()
def get_user_by_name(self,
username):
"""Does a GET request to /user/{username}.
Get user by user name
Args:
username (string): The name that needs to be fetched. Use user1
for testing.
Returns:
User: Response from the API. successful operation
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
return super().new_api_call_builder.request(
RequestBuilder().server(Server.SERVER1)
.path('/user/{username}')
.http_method(HttpMethodEnum.GET)
.template_param(Parameter()
.key('username')
.value(username)
.should_encode(True))
.header_param(Parameter()
.key('accept')
.value('application/json'))
.auth(Single('global'))
).response(
ResponseHandler()
.deserializer(APIHelper.json_deserialize)
.deserialize_into(User.from_dictionary)
.local_error('400', 'Invalid username supplied', APIException)
.local_error('404', 'User not found', APIException)
).execute()
def delete_user(self,
username):
"""Does a DELETE request to /user/{username}.
This can only be done by the logged in user.
Args:
username (string): The name that needs to be deleted
Returns:
void: Response from the API.
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
return super().new_api_call_builder.request(
RequestBuilder().server(Server.SERVER1)
.path('/user/{username}')
.http_method(HttpMethodEnum.DELETE)
.template_param(Parameter()
.key('username')
.value(username)
.should_encode(True))
.auth(Single('global'))
).execute()
def login_user(self,
username,
password):
"""Does a GET request to /user/login.
Logs user into the system
Args:
username (string): The user name for login
password (string): The password for login in clear text
Returns:
string: Response from the API. successful operation
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
return super().new_api_call_builder.request(
RequestBuilder().server(Server.SERVER1)
.path('/user/login')
.http_method(HttpMethodEnum.GET)
.query_param(Parameter()
.key('username')
.value(username))
.query_param(Parameter()
.key('password')
.value(password))
.auth(Single('global'))
).response(
ResponseHandler()
.deserializer(APIHelper.json_deserialize)
.local_error('400', 'Invalid username/password supplied', APIException)
).execute()
def create_users_with_list_input(self,
body):
"""Does a POST request to /user/createWithList.
Creates list of users with given input array
Args:
body (list of User): List of user object
Returns:
void: Response from the API.
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
return super().new_api_call_builder.request(
RequestBuilder().server(Server.SERVER1)
.path('/user/createWithList')
.http_method(HttpMethodEnum.POST)
.body_param(Parameter()
.value(body))
.header_param(Parameter()
.key('Content-Type')
.value('application/json'))
.body_serializer(APIHelper.json_serialize)
.auth(Single('global'))
).execute()
def update_user(self,
username,
body):
"""Does a PUT request to /user/{username}.
This can only be done by the logged in user.
Args:
username (string): name that need to be updated
body (User): Updated user object
Returns:
void: Response from the API.
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
return super().new_api_call_builder.request(
RequestBuilder().server(Server.SERVER1)
.path('/user/{username}')
.http_method(HttpMethodEnum.PUT)
.template_param(Parameter()
.key('username')
.value(username)
.should_encode(True))
.body_param(Parameter()
.value(body))
.header_param(Parameter()
.key('Content-Type')
.value('application/json'))
.body_serializer(APIHelper.json_serialize)
.auth(Single('global'))
).execute()
def logout_user(self):
"""Does a GET request to /user/logout.
Logs out current logged in user session
Returns:
void: Response from the API.
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
return super().new_api_call_builder.request(
RequestBuilder().server(Server.SERVER1)
.path('/user/logout')
.http_method(HttpMethodEnum.GET)
.auth(Single('global'))
).execute()
def create_user(self,
body):
"""Does a POST request to /user.
This can only be done by the logged in user.
Args:
body (User): Created user object
Returns:
void: Response from the API.
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
return super().new_api_call_builder.request(
RequestBuilder().server(Server.SERVER1)
.path('/user')
.http_method(HttpMethodEnum.POST)
.body_param(Parameter()
.value(body))
.header_param(Parameter()
.key('Content-Type')
.value('application/json'))
.body_serializer(APIHelper.json_serialize)
.auth(Single('global'))
).execute()
|
ApiamticPackage5
|
/ApiamticPackage5-1.5.6-py3-none-any.whl/swaggerpetstore/controllers/user_controller.py
|
user_controller.py
|
from deprecation import deprecated
from swaggerpetstore.api_helper import APIHelper
from swaggerpetstore.configuration import Server
from swaggerpetstore.utilities.file_wrapper import FileWrapper
from swaggerpetstore.controllers.base_controller import BaseController
from apimatic_core.request_builder import RequestBuilder
from apimatic_core.response_handler import ResponseHandler
from apimatic_core.types.parameter import Parameter
from swaggerpetstore.http.http_method_enum import HttpMethodEnum
from apimatic_core.authentication.multiple.single_auth import Single
from apimatic_core.authentication.multiple.and_auth_group import And
from apimatic_core.authentication.multiple.or_auth_group import Or
from swaggerpetstore.models.api_response import ApiResponse
from swaggerpetstore.models.pet import Pet
from swaggerpetstore.exceptions.api_exception import APIException
class PetController(BaseController):
"""A Controller to access Endpoints in the swaggerpetstore API."""
def __init__(self, config):
super(PetController, self).__init__(config)
def inpet(self,
body):
"""Does a POST request to /pet.
Add a new pet to the store
Args:
body (Pet): Pet object that needs to be added to the store
Returns:
void: Response from the API.
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
return super().new_api_call_builder.request(
RequestBuilder().server(Server.SERVER1)
.path('/pet')
.http_method(HttpMethodEnum.POST)
.body_param(Parameter()
.value(body))
.header_param(Parameter()
.key('Content-Type')
.value('application/json'))
.body_serializer(APIHelper.json_serialize)
.auth(Single('global'))
).execute()
def upload_file(self,
pet_id,
additional_metadata=None,
file=None):
"""Does a POST request to /pet/{petId}/uploadImage.
uploads an image
Args:
pet_id (long|int): ID of pet to update
additional_metadata (string, optional): Additional data to pass to
server
file (typing.BinaryIO, optional): file to upload
Returns:
ApiResponse: Response from the API. successful operation
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
return super().new_api_call_builder.request(
RequestBuilder().server(Server.SERVER1)
.path('/pet/{petId}/uploadImage')
.http_method(HttpMethodEnum.POST)
.template_param(Parameter()
.key('petId')
.value(pet_id)
.should_encode(True))
.form_param(Parameter()
.key('additionalMetadata')
.value(additional_metadata))
.multipart_param(Parameter()
.key('file')
.value(file)
.default_content_type('application/octet-stream'))
.header_param(Parameter()
.key('accept')
.value('application/json'))
.auth(Single('global'))
).response(
ResponseHandler()
.deserializer(APIHelper.json_deserialize)
.deserialize_into(ApiResponse.from_dictionary)
).execute()
def update_an_pet(self,
body):
"""Does a PUT request to /pet.
Update an existing pet
Args:
body (Pet): Pet object that needs to be added to the store
Returns:
void: Response from the API.
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
return super().new_api_call_builder.request(
RequestBuilder().server(Server.SERVER1)
.path('/pet')
.http_method(HttpMethodEnum.PUT)
.body_param(Parameter()
.value(body))
.header_param(Parameter()
.key('Content-Type')
.value('application/json'))
.body_serializer(APIHelper.json_serialize)
.auth(Single('global'))
).execute()
def find_pet_in_the_status(self,
status):
"""Does a GET request to /pet/findByStatus.
Multiple status values can be provided with comma separated strings
Args:
status (list of Status2Enum): Status values that need to be
considered for filter
Returns:
list of Pet: Response from the API. successful operation
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
return super().new_api_call_builder.request(
RequestBuilder().server(Server.SERVER1)
.path('/pet/findByStatus')
.http_method(HttpMethodEnum.GET)
.query_param(Parameter()
.key('status')
.value(status))
.header_param(Parameter()
.key('accept')
.value('application/json'))
.auth(Single('global'))
).response(
ResponseHandler()
.deserializer(APIHelper.json_deserialize)
.deserialize_into(Pet.from_dictionary)
.local_error('400', 'Invalid status value', APIException)
).execute()
@deprecated()
def find_pets_an_tags(self,
tags):
"""Does a GET request to /pet/findByTags.
Multiple tags can be provided with comma separated strings. Use tag1,
tag2, tag3 for testing.
Args:
tags (list of string): Tags to filter by
Returns:
list of Pet: Response from the API. successful operation
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
return super().new_api_call_builder.request(
RequestBuilder().server(Server.SERVER1)
.path('/pet/findByTags')
.http_method(HttpMethodEnum.GET)
.query_param(Parameter()
.key('tags')
.value(tags))
.header_param(Parameter()
.key('accept')
.value('application/json'))
.auth(Single('global'))
).response(
ResponseHandler()
.deserializer(APIHelper.json_deserialize)
.deserialize_into(Pet.from_dictionary)
.local_error('400', 'Invalid tag value', APIException)
).execute()
def get_pet_by_id(self,
pet_id):
"""Does a GET request to /pet/{petId}.
Returns a single pet
Args:
pet_id (long|int): ID of pet to return
Returns:
Pet: Response from the API. successful operation
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
return super().new_api_call_builder.request(
RequestBuilder().server(Server.SERVER1)
.path('/pet/{petId}')
.http_method(HttpMethodEnum.GET)
.template_param(Parameter()
.key('petId')
.value(pet_id)
.should_encode(True))
.header_param(Parameter()
.key('accept')
.value('application/json'))
.auth(Single('global'))
).response(
ResponseHandler()
.deserializer(APIHelper.json_deserialize)
.deserialize_into(Pet.from_dictionary)
.local_error('400', 'Invalid ID supplied', APIException)
.local_error('404', 'Pet not found', APIException)
).execute()
def delete_pet(self,
pet_id,
api_key=None):
"""Does a DELETE request to /pet/{petId}.
Deletes a pet
Args:
pet_id (long|int): Pet id to delete
api_key (string, optional): TODO: type description here.
Returns:
void: Response from the API.
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
return super().new_api_call_builder.request(
RequestBuilder().server(Server.SERVER1)
.path('/pet/{petId}')
.http_method(HttpMethodEnum.DELETE)
.template_param(Parameter()
.key('petId')
.value(pet_id)
.should_encode(True))
.header_param(Parameter()
.key('api_key')
.value(api_key))
.auth(Single('global'))
).execute()
def update_pet_with_form(self,
pet_id,
name=None,
status=None):
"""Does a POST request to /pet/{petId}.
Updates a pet in the store with form data
Args:
pet_id (long|int): ID of pet that needs to be updated
name (string, optional): Updated name of the pet
status (string, optional): Updated status of the pet
Returns:
void: Response from the API.
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
return super().new_api_call_builder.request(
RequestBuilder().server(Server.SERVER1)
.path('/pet/{petId}')
.http_method(HttpMethodEnum.POST)
.template_param(Parameter()
.key('petId')
.value(pet_id)
.should_encode(True))
.form_param(Parameter()
.key('name')
.value(name))
.form_param(Parameter()
.key('status')
.value(status))
.header_param(Parameter()
.key('content-type')
.value('application/x-www-form-urlencoded'))
.auth(Single('global'))
).execute()
|
ApiamticPackage5
|
/ApiamticPackage5-1.5.6-py3-none-any.whl/swaggerpetstore/controllers/pet_controller.py
|
pet_controller.py
|
Apilisk
================================
Standalone rest client for Apiwatcher platform.
It can run testcases defined in Apiwatcher locally on you machine or on
CI server.
Installation
=================================
Currently Apilisk is distributed using Pypi, more options for download
are comming soon.
Linux
********************************
The best way is to create a virtual environment and then use *pip*.
.. code-block:: shell
virtualenv env
. env/bin/activate
pip install Apilisk
You must have libcurl installed.
OS X
*********************************
On Mac we suggest to use *easy_install*, although *pip* should work as well.
.. code-block:: shell
sudo easy_install Apilisk
Run
==================================
At first you need to have a team in Apiwatcher, so sign/log in.
You need to create a project and some testcases, otherwise there is nothing to
run. :) And finally you need to have a credentials (Client ID and
Client secret). This you can find under team settings - create a new pair and
download the configuration file for Apilisk or copy paste the command with
apilisk init, which will create the file for you.
.. code-block:: python
apilisk init --client-secret SECRET --client-id ID --agent-id "My agent"
Example file:
.. code-block::
{
"host": "https://api2.apiwatcher.com",
"port": 443,
"client_id": "YOUR_CLIENT_ID",
"client_secret": "YOUR_CLIENT_SECRET",
"agent_id": "My local agent"
}
And now just run it :)
.. code-block:: shell
apilisk run -c apilisk.json -u -v 1 -d YOUR_DATASET_ID -p YOUR_PROJECT_HASH
|
Apilisk
|
/Apilisk-0.2.1.tar.gz/Apilisk-0.2.1/README.rst
|
README.rst
|
import json
import argparse
from os.path import expanduser
import apilisk.printer
from apilisk.runner import Runner
from apilisk.exceptions import ApiliskException
from apilisk.printer import eprint, vprint
from apilisk.apiwatcher_client import ApiwatcherClient
from apilisk.junit_formatter import JunitFormatter
def _check_args(args):
if args.action == "init":
# Must all be filled
if (
args.client_id is None or
args.agent_id is None or
args.client_secret is None
):
eprint("Options --client-id, --client-secret and --agent-id must "
"be set for init action."
)
exit(1)
elif args.action == "run":
cfg = {}
if (args.client_id is not None or args.client_secret is not None
):
cfg = _get_config_data(
args.client_id, args.client_secret, args.agent_id
)
else:
try:
with open(args.config_file) as cfg_file:
cfg = json.load(cfg_file)
except IOError as e:
eprint("Could not open configuration file at {0}: {1}".format(
args.config_file, e.message
))
exit(1)
if args.project is None:
eprint("Project hash (-p) is mandatory for action 'run'")
exit(1)
return cfg
else:
eprint(
u"Unknown action {0}, allowed values are run or init".format(
args.action
))
parser.print_usage()
exit(1)
def _get_config_data(client_id, client_secret, name):
return {
"host": "https://api2.apiwatcher.com",
"port": 443,
"client_id": client_id,
"client_secret": client_secret,
"agent_id": name
}
def _create_config_file(data, filename):
with open(filename, 'w') as outfile:
json.dump(data, outfile)
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
"action", default=None,
help="What to do - init or run."
)
parser.add_argument(
"--client-id", default=None, type=str,
help="Client id for init"
)
parser.add_argument(
"--client-secret", default=None, type=str,
help="Client id for init"
)
parser.add_argument(
"--agent-id", default="NOT SET", type=str,
help="Agent id for init"
)
parser.add_argument(
"-p", "--project", default=None,
help="Hash of the project."
)
parser.add_argument(
"-d", "--dataset", type=int,
help="Id of dataset to use. None if no dataset should be used."
)
parser.add_argument(
"-v", "--verbose", type=int, default=1,
help="0 = no output, 1 = what is being done, 2 = data"
)
parser.add_argument(
"-c", "--config-file",
default="{0}/.apilisk.json".format(expanduser("~")),
help="Path to configuration file."
)
parser.add_argument(
"-j", "--junit", help="Provide output in junit format.",
action="store_true"
)
parser.add_argument(
"-o", "--junit-output-file", help="Path to junit output file",
type=str, default="./output.xml"
)
parser.add_argument(
"-i", "--include-data", help="Insert data into results.",
action="store_true"
)
parser.add_argument(
"-u", "--upload", help="Upload data to platform.",
action="store_true"
)
args = parser.parse_args()
apilisk.printer.verbosity = args.verbose
cfg = _check_args(args)
# Switch according to action
if args.action == "init":
_create_config_file(
_get_config_data(
args.client_id, args.client_secret, args.agent_id
),
args.config_file
)
elif args.action == "run":
try:
client = ApiwatcherClient(cfg)
project_cfg = client.get_project_config(args.project)
runner = Runner(project_cfg, args.dataset)
results = runner.run_project(
include_data=args.include_data, debug=True
)
if args.junit:
fmt = JunitFormatter(project_cfg, results)
fmt.to_file("./output.xml")
if args.upload:
client.upload_results(project_cfg, results)
except ApiliskException as e:
eprint(e.message)
exit(1)
|
Apilisk
|
/Apilisk-0.2.1.tar.gz/Apilisk-0.2.1/apilisk/command_line.py
|
command_line.py
|
from requests.exceptions import ConnectionError
from apiwatcher_pyclient.client import Client
from apiwatcher_pyclient.exceptions import ApiwatcherClientException
from apilisk.printer import vprint, Colors, eprint
from apilisk.exceptions import ObjectNotFound, ApiliskException
class ApiwatcherClient:
"""
Client using apiwatcher-pyclient to communicate with the platform
"""
def __init__(self, apilisk_cfg):
"""
Initialize and log in to platform
"""
try:
self.agent_id = apilisk_cfg["agent_id"]
self.client = Client(
apilisk_cfg["host"], apilisk_cfg["port"],
verify_certificate=True
)
vprint(
1, None,
"### Authorizing to {0} ... ".format(apilisk_cfg["host"]), True
)
self.client.authorize_client_credentials(
apilisk_cfg["client_id"], apilisk_cfg["client_secret"],
"private_agent"
)
vprint(
1, Colors.GREEN,
"\r### Authorizing to {0} ... OK".format(apilisk_cfg["host"])
)
except KeyError as e:
raise ApiliskException(
"Key {0} is missing in configuration file.".format(e.message)
)
except ApiwatcherClientException as e:
raise ApiliskException(
"Could not authenticate to Apiwatcher platform: {0}".format(
e.message
)
)
except ConnectionError as e:
raise ApiliskException(
"Could not connect to Apiwatcher platform: {0}".format(
e.message
)
)
def get_project_config(self, project_hash):
"""
Return configuration of a project
"""
vprint(
1, None,
"### Getting configuraton of project {0} ... ".format(project_hash),
True
)
rsp = self.client.get(
"/api/projects/{0}/configuration".format(project_hash)
)
if rsp.status_code == 404:
raise ObjectNotFound(
"Project with hash {0} has not been found".format(
project_hash
)
)
elif rsp.status_code != 200:
raise ApiliskException(
"Could not get configuration of project {0}: {1}".format(
project_hash, rsp.json()["message"]
)
)
vprint(
1, Colors.GREEN,
"\r### Getting configuraton of project {0} ... OK".format(project_hash)
)
cfg = rsp.json()["data"]
vprint(
2, Colors.GREEN,
"### Summary: {0} testcases, {1} requests, {2} datasets".format(
len(cfg["testcases"]), len(cfg["requests"]),
len(cfg["datasets"])
))
return cfg
def upload_results(self, config, results):
"""Upload data to platform"""
vprint(
1, None,
"### Uploading data to Apiwatcher platform ...", True
)
rsp = self.client.post(
"/api/projects/{0}/remote-results".format(results["project_hash"]),
data={
"agent_id": self.agent_id,
"configuration": config,
"results": results
}
)
if rsp.status_code == 201:
vprint(
1, Colors.GREEN,
"\r### Uploading data to Apiwatcher platform ... OK"
)
else:
eprint("### Upload failed with response code {0}".format(rsp.status_code))
|
Apilisk
|
/Apilisk-0.2.1.tar.gz/Apilisk-0.2.1/apilisk/apiwatcher_client.py
|
apiwatcher_client.py
|
import json
import copy
import pytz
import sys
from datetime import datetime
from apilisk.curl_caller import CurlCaller
from apilisk.printer import vprint, Colors
from apilisk.exceptions import ObjectNotFound, ApiliskException
from apiwatcher_pyclient.client import Client
class Runner(object):
def __init__(self, project_cfg, dataset_id):
"""
Initializes all the stuff
"""
self.project_hash = project_cfg["project_hash"]
self.project_name = project_cfg["project_name"]
self.testcases = {
str(item["id"]): item for item in project_cfg["testcases"]
}
self.requests = {
str(item["id"]): item for item in project_cfg["requests"]
}
self.dataset = None
if dataset_id is not None:
for dts in project_cfg["datasets"]:
if dts["id"] == dataset_id:
self.dataset = copy.deepcopy(dts)
if self.dataset == None:
raise ObjectNotFound(
u"Dataset with id {0} has not been found".format(
dataset_id
)
)
def run_project(self, debug=False, include_data=False):
"""
Runs testcases from project one project
"""
results = []
time_start = datetime.now()
total_count = len(self.testcases)
success_count = 0
failed_count = 0
vprint(
1, None,
u"## Starting project {0} ({1})".format(
self.project_name, self.project_hash
)
)
for tc_id in self.testcases:
res = self.run_one_testcase(tc_id, debug, include_data)
if res["status"] == "success":
success_count += 1
else:
failed_count += 1
results.append(res)
duration_sec = (datetime.now() - time_start).total_seconds()
if failed_count > 0:
vprint(
1, Colors.RED,
u"## Failed {0} testcases out of {1} in {2} sec.".format(
failed_count, total_count, duration_sec
)
)
else:
vprint(
1, Colors.GREEN, u"## Success in {0} sec".format(duration_sec)
)
return {
"project_hash": self.project_hash,
"total_count": total_count,
"success_count": success_count,
"failed_count": failed_count,
"duration_sec": duration_sec,
"results": results
}
def run_one_testcase(self, tc_id, debug=False, include_data=False):
"""
Runs a single testcase
"""
# Merge dataset variables and request variables
variables = {
"var": copy.deepcopy(
self.dataset["variables"]
) if self.dataset is not None else {},
"req": []
}
auth = self.testcases[tc_id]["authentication"]
status = "success"
results = []
time_start = datetime.now()
vprint(
1, None, u"# {0} ... ".format(
self.testcases[tc_id]["name"]
), True
)
for step in self.testcases[tc_id]["steps"]:
if step["action"] == "call_request":
caller = CurlCaller(
step["data"], variables, authentication=auth, debug=debug
)
result, req_var = caller.handle_and_get_report(
include_data=include_data
)
variables["req"].append(req_var)
results.append(result)
if result["status"] == "failed":
status = "failed"
break
if status == 'success':
vprint(
1, Colors.GREEN, u"\r# {0} ... SUCCESS".format(
self.testcases[tc_id]["name"]
)
)
else:
vprint(
1, Colors.RED, u"\r# {0} ... FAILED".format(
self.testcases[tc_id]["name"]
)
)
return {
"testcase_id": int(tc_id),
"steps_results": results,
"status": status,
"duration_sec": (datetime.now() - time_start).total_seconds()
}
|
Apilisk
|
/Apilisk-0.2.1.tar.gz/Apilisk-0.2.1/apilisk/runner.py
|
runner.py
|
import re
import copy
import json
from StringIO import StringIO
from datetime import datetime
from jsonschema import Draft4Validator, ValidationError
import pycurl
from apilisk.utils import substitute_variables_recursively
class PycurlErrorCodesEnum(object):
"""
Convert number to something human readable
https://curl.haxx.se/libcurl/c/libcurl-errors.html
"""
URL_MALFORMAT = 3
COULDNT_RESOLVE_HOST = 6
COULDNT_CONNECT = 7
HTTP_RETURNED_ERROR = 22
READ_ERROR = 26
OPERATION_TIMEDOUT = 28
SSL_CONNECT_ERROR = 35
TOO_MANY_REDIRECTS = 47
RANGE_ERROR = 33
class CurlCaller(object):
"""
Handler which handles single call request step
"""
def __init__(
self, data, variables, authentication, debug=False
):
"""
Initializes curl connection and sets all necessary options
If curr_conn is passed, it will be used, otherwise new one will be
created.
"""
self.variables = copy.deepcopy(variables)
self.debug_mode = debug
self.debug_data = {}
data_after_substitution = CurlCaller.assing_variables_to_request(
data, variables
)
data_after_substitution["url"] = CurlCaller._construct_url(
data_after_substitution["url"],
data_after_substitution["query_parameters"]
)
auth_after_substitution = None
if authentication:
auth_after_substitution = CurlCaller.assing_variables_to_request(
authentication, variables
)
self.validation = copy.deepcopy(data_after_substitution["validation"])
self._response_headers = {}
self._response_content_buffer = StringIO()
self.conn = pycurl.Curl()
# Defaults
self.conn.setopt(pycurl.FOLLOWLOCATION, True)
self.conn.setopt(pycurl.FAILONERROR, False)
self.conn.setopt(pycurl.NOSIGNAL, True)
self.conn.setopt(pycurl.NOPROGRESS, True)
self.conn.setopt(pycurl.SSL_VERIFYHOST, False)
self.conn.setopt(pycurl.SSL_VERIFYPEER, False)
self.conn.setopt(pycurl.HEADERFUNCTION, self._store_headers)
self.conn.setopt(pycurl.WRITEFUNCTION, self._response_content_buffer.write)
self.conn.setopt(pycurl.TIMEOUT, 30)
self.conn.setopt(pycurl.VERBOSE, False)
if auth_after_substitution and \
auth_after_substitution["type"] == "http_basic_auth":
self.conn.setopt(pycurl.HTTPAUTH, pycurl.HTTPAUTH_ANY)
self.conn.setopt(pycurl.USERNAME, authentication["data"]["user"])
self.conn.setopt(
pycurl.PASSWORD, authentication["data"]["password"]
)
self.conn.setopt(pycurl.URL, data_after_substitution["url"])
if data_after_substitution["method"] == "get":
self.conn.setopt(pycurl.HTTPGET, True)
else:
if data_after_substitution["body_data"] is not None:
self.conn.setopt(
pycurl.POSTFIELDS,
data_after_substitution["body_data"]["data"]
)
self.conn.setopt(
pycurl.CUSTOMREQUEST,
data_after_substitution["method"].upper())
# Set headers
header_array = [
u"{0}: {1}".format(key, value)
for key, value in data_after_substitution["headers"].iteritems()
]
self.conn.setopt(pycurl.HTTPHEADER, header_array)
if self.debug_mode:
self.debug_data["original_data"] = copy.deepcopy(data)
self.debug_data["data_after_variable_substitution"] = \
copy.deepcopy(data_after_substitution)
self.debug_data["user_variables"] = copy.deepcopy(variables["var"])
self.debug_data["req_variables"] = copy.deepcopy(variables["req"])
@staticmethod
def _construct_url(url, query_params):
if len(query_params.keys()) == 0:
return url
query_list = []
for (key, value) in query_params.iteritems():
query_list.append(u"{0}={1}".format(key, value))
question_mark_pos = url.find("?")
if question_mark_pos == -1:
return(u"{0}?{1}".format(url, "&".join(query_list)))
elif question_mark_pos == len(url) - 1 or url[-1] == "&":
return(u"{0}{1}".format(url, "&".join(query_list)))
else:
return(u"{0}&{1}".format(url, "&".join(query_list)))
def _get_response_data_as_unicode(self):
"""
Method to convert response data based on charset received in response
header (content-type key).
Taken from pyCurl sample
Returns:
Decoded response as unicode object
"""
encoding = None
if "content-type" in self._response_headers:
content_type = self._response_headers["content-type"].lower()
match = re.search("charset=(\S+)", content_type)
if match:
encoding = match.group(1)
if encoding is None:
# Default encoding for HTML is iso-8859-1.
# Other content types may have different default encoding,
# or in case of binary data, may have no encoding at all.
encoding = "iso-8859-1"
# Decode using the encoding we figured out.
return self._response_content_buffer.getvalue().decode(encoding)
def handle_and_get_report(self, include_data=False):
"""
Calls the stuff and gets report for the call
"""
try:
self.conn.perform()
except pycurl.error as pex:
error_code = pex[0]
message = pex[1]
if error_code == PycurlErrorCodesEnum.HTTP_RETURNED_ERROR:
return self._get_report_from_response(
self.conn, include_data
)
else:
return self._get_report_from_error(message)
else:
return self._get_report_from_response(self.conn, include_data)
def _get_report_from_response(self, response, include_data=False):
response_content = self._get_response_data_as_unicode()
errors = []
if response.getinfo(response.RESPONSE_CODE) not in self.validation["return_codes"]:
errors.append(
{
"id": "wrong_status_code",
"message": u"Status code of response was {0}, but allowed status codes are {1}".format(
response.getinfo(response.RESPONSE_CODE),
",".join([str(x) for x in self.validation["return_codes"]])
)
}
)
body = None
try:
body = json.loads(response_content)
except ValueError as e:
body = response_content
if self.validation["schema"]:
errors.append(
{
"id": "not_json",
"message": (
"There is json schema set, but response "
"content is not a valid json document."
)
}
)
else:
if self.validation["schema"] and \
self.validation["schema"]["type"] == "json":
validator = Draft4Validator(self.validation["schema"]["data"])
validator.check_schema(self.validation["schema"]["data"])
errs = []
try:
validator.validate(body)
except ValidationError:
errors.append(
{
"id": "not_valid",
"message": "Response is not valid against provided json schema",
}
)
report = {
"action": "call_request",
"data": {
"headers": self._response_headers,
"body": body if include_data else None,
"status_code": response.getinfo(
response.RESPONSE_CODE
),
"name_lookup_duration_ms": CurlCaller._convert_duration(
response.getinfo(response.NAMELOOKUP_TIME)
),
"connect_duration_ms": CurlCaller._convert_duration(
response.getinfo(response.CONNECT_TIME)
),
"app_connect_duration_ms": CurlCaller._convert_duration(
response.getinfo(response.APPCONNECT_TIME)
),
"pre_transfer_duration_ms": CurlCaller._convert_duration(
response.getinfo(response.PRETRANSFER_TIME)
),
"start_transfer_duration_ms": CurlCaller._convert_duration(
response.getinfo(response.STARTTRANSFER_TIME)
),
"total_duration_ms": CurlCaller._convert_duration(
response.getinfo(response.TOTAL_TIME)
),
"redirect_duration_ms": CurlCaller._convert_duration(
response.getinfo(response.REDIRECT_TIME)
)
},
"errors": errors,
"status": "failed" if len(errors) > 0 else "success"
}
try:
variables = json.loads(body)
except Exception as e:
variables = body
if self.debug_mode:
self.debug_data["new_variables"] = variables
report["debug"] = copy.deepcopy(self.debug_data)
return report, variables
def _get_report_from_error(self, message):
report = {
"action": "call_request",
"status": "failed",
"data": None,
"errors": [
{
"id": "no_response",
"message": message,
"data": None
}
]
}
if self.debug_mode:
report["debug"] = copy.deepcopy(self.debug_data)
return report, {}
@staticmethod
def _convert_duration(duration):
"""
Converts pycurl duration from seconds to miliseconds
Args:
duration: Float duration in seconds
Returns:
Int number
"""
return int(round(duration * 1000))
def _store_headers(self, header_line):
"""
Helper method to parse and store headers from curl
Args:
header_line: string with header
"""
header_line = header_line.decode("utf-8")
# Skip the first line
if ":" not in header_line:
return
# Break the header line into header name and value.
name, value = header_line.split(":", 1)
name = name.strip()
value = value.strip()
# Names are case insensitive
name = name.lower()
self._response_headers[name] = value
@staticmethod
def assing_variables_to_request(config, variables):
"""
Returns new configuration after variable substitution
"""
cfg = copy.deepcopy(config)
objects_with_allowed_vars = [
"url", "headers", "body_data", "query_parameters"
]
for key in objects_with_allowed_vars:
if cfg.get(key):
cfg[key] = substitute_variables_recursively(
config[key], variables
)
if config["validation"]["schema"]:
cfg["validation"]["schema"]["data"] = \
substitute_variables_recursively(
config["validation"]["schema"]["data"], variables
)
return cfg
|
Apilisk
|
/Apilisk-0.2.1.tar.gz/Apilisk-0.2.1/apilisk/curl_caller.py
|
curl_caller.py
|
Apiwatcher python client
================================
This project aims to be a simple python client for interaction with
Apiwatcher platform.
It solves authentication against platform's ouath2 workflow and thus can be
used as a base for more complex applications.
Installation
=============
Best way is to use *pip*.
.. code-block:: shell
pip install apiwatcher-pyclient
Usage
======
.. code-block:: python
from apiwatcher_pyclient.client import Client
cl = Client()
cl.authorize_client_credentials(
"your_client_id", "your_client_secret", scope="apilisk"
)
cl.post(
"/api/projects/xxx/testcase/123456/results",
{
"some": "data"
}
)
|
Apiwatcher-Pyclient
|
/Apiwatcher-Pyclient-0.1.3.tar.gz/Apiwatcher-Pyclient-0.1.3/README.rst
|
README.rst
|
import requests
from exceptions import ApiwatcherClientException
class Client(object):
"""Simple wrapper around python requests solving authentication to
Apiwatcher platform.
"""
def __init__(
self,
api_host="https://api2.apiwatcher.com",
api_port=443,
verify_certificate=True,
timeout=None
):
"""Initialize the client.
:param api_host: Hostname where Apiwatcher api is running.
:type api_host: String
:param api_port: Port where Apiwatcher api is running.
:type api_port: Integer
:param verify_certificate: If true, call will fail in case of invalid
certificate
:type verify_certificate: Boolean
:param timeout: Timeout of any single request in seconds
:type verify_certificate: Number
"""
if not api_host.startswith("http"):
api_host = "http://{0}".format(api_host)
self.base_url = "{0}:{1}".format(api_host, api_port)
self.auth_data = None
self.token = None
self.verify_certificate = verify_certificate
self.timeout = timeout
def authorize_client_credentials(
self, client_id, client_secret=None, scope="private_agent"
):
"""Authorize to platform with client credentials
This should be used if you posses client_id/client_secret pair
generated by platform.
"""
self.auth_data = {
"grant_type": "client_credentials",
"scope": [ scope ],
"client_id": client_id,
"client_secret": client_secret
}
self._do_authorize()
def authorize_password(self, client_id, username, password):
"""Authorize to platform as regular user
You must provide a valid client_id (same as web application),
your password and your username. Username and password is not stored in
client but refresh token is stored. The only valid scope for this
authorization is "regular_user".
:param client_id: Valid client_id
:type client_id: String
:param username: User email
:type username: String
:param password: User password
:type password: String
"""
self.auth_data = {
"grant_type": "password",
"username": username,
"password": password,
"client_id": client_id,
"scope": ["regular_user"]
}
self._do_authorize()
def _do_authorize(self):
""" Perform the authorization
"""
if self.auth_data is None:
raise ApiwatcherClientException("You must provide authorization data.")
r = requests.post(
"{0}/api/token".format(self.base_url), json=self.auth_data,
verify=self.verify_certificate, timeout=self.timeout
)
if r.status_code == 401:
raise ApiwatcherClientException("Wrong credentials supplied: {0}".format(
r.json()["message"]
))
elif r.status_code != 201:
try:
reason = r.json()["message"]
except:
reason = r.text
raise ApiwatcherClientException(
"Authorization failed. Reason {0} {1}".format(
r.status_code, reason)
)
else:
data = r.json()["data"]
self.token = data["access_token"]
if "refresh_token" in data:
self.auth_data = {
"grant_type": "refresh_token",
"refresh_token": data["refresh_token"],
"client_id": self.auth_data["client_id"]
}
def _do_request(self, method, endpoint, data=None):
"""Perform one request, possibly solving unauthorized return code
"""
# No token - authorize
if self.token is None:
self._do_authorize()
r = requests.request(
method,
"{0}{1}".format(self.base_url, endpoint),
headers={
"Authorization": "Bearer {0}".format(self.token),
"Content-Type": "application/json"
},
json=data,
verify=self.verify_certificate,
timeout=self.timeout
)
if r.status_code == 401:
self._do_authorize()
r = requests.request(
method,
"{0}{1}".format(self.base_url, endpoint),
headers={
"Authorization": "Bearer {0}".format(self.token),
"Content-Type": "application/json"
},
json=data,
verify=self.verify_certificate,
timeout=self.timeout
)
return r
def get(self, endpoint):
""" Calls HTTP GET request and returns response like requests
:param endpoint: Where to call the request
:type endpoint: String
"""
return self._do_request("get", endpoint)
def post(self, endpoint, data=None):
""" Calls HTTP POST request and returns response like requests
:param endpoint: Where to call the request
:type endpoint: String
:param data: Data to be send
:type data: Dictionary or array (must be JSON serializable)
"""
return self._do_request("post", endpoint, data)
def put(self, endpoint, data=None):
""" Calls HTTP PUT request and returns response like requests
:param endpoint: Where to call the request
:type endpoint: String
:param data: Data to be send
:type data: Dictionary or array (must be JSON serializable)
"""
return self._do_request("put", endpoint, data)
def delete(self, endpoint):
""" Calls HTTP DELETE request and returns response like requests
:param endpoint: Where to call the request
:type endpoint: String
"""
return self._do_request("delete", endpoint)
|
Apiwatcher-Pyclient
|
/Apiwatcher-Pyclient-0.1.3.tar.gz/Apiwatcher-Pyclient-0.1.3/apiwatcher_pyclient/client.py
|
client.py
|
import os
import sys
import zipfile
import re
import tarfile
from pathlib import Path
from setuptools import find_packages
folder_list = ['images', 'video', 'documents',
'audio', 'archives', 'different']
def sort(direct):
if direct:
u_input = direct
else:
print("Enter Directory to sort")
path = Path(u_input)
items = path.glob('**/*')
caunt = 1
for item in items:
# Фото
if '.jpg' in str(item) or '.jpeg' in str(item) or '.png' in str(item) or '.svg' in str(item):
source_path = Path(path / 'images')
if not source_path.exists():
source_path.mkdir()
try:
new_location = item.rename(source_path / normalize(item.name))
except FileExistsError:
new_location = item.rename(
source_path / f"{str(caunt)}{normalize(item.name)}")
caunt += 1
# Видио
elif '.avi' in str(item) or '.mp4' in str(item) or '.mov' in str(item) or '.mkv' in str(item):
source_path = Path(path / 'video')
if not source_path.exists():
source_path.mkdir()
try:
new_location = item.rename(source_path / normalize(item.name))
except FileExistsError:
new_location = item.rename(
source_path / f"{str(caunt)}{normalize(item.name)}")
caunt += 1
# Документы
elif '.doc' in str(item) or '.docx' in str(item) or '.txt' in str(item) or '.pdf' in str(item) or '.xlsx' in str(item) or '.pptx' in str(item):
source_path = Path(path / 'documents')
if not source_path.exists():
source_path.mkdir()
try:
new_location = item.rename(source_path / normalize(item.name))
except FileExistsError:
new_location = item.rename(
source_path / f"{str(caunt)}{normalize(item.name)}")
caunt += 1
# Музыка
elif '.ogg' in str(item) or '.mp3' in str(item) or '.wav' in str(item) or '.amr' in str(item):
source_path = Path(path / 'audio')
if not source_path.exists():
source_path.mkdir()
try:
new_location = item.rename(source_path / normalize(item.name))
except FileExistsError:
new_location = item.rename(
source_path / f"{str(caunt)}{normalize(item.name)}")
caunt += 1
# Архивы
elif '.zip' in str(item):
source_path = Path(path / 'archives')
if not source_path.exists():
source_path.mkdir()
fantasy_zip = zipfile.ZipFile(item)
try:
fantasy_zip.extractall(
source_path / normalize(item.name, ex=False))
except:
fantasy_zip.extractall(
source_path / f"{str(caunt)}{normalize(item.name, ex=False)}")
caunt += 1
fantasy_zip.close()
try:
new_location = item.rename(source_path / normalize(item.name))
except FileExistsError:
new_location = item.rename(
source_path / f"{str(caunt)}{normalize(item.name)}")
caunt += 1
elif '.gz' in str(item) or '.tar' in str(item):
source_path = Path(path / 'archives')
if not source_path.exists():
source_path.mkdir()
tar = tarfile.open(zipfile, "r:gz")
try:
tar.extractall(source_path / normalize(item.name, ex=False))
except:
tar.extractall(
source_path / f"{str(caunt)}{normalize(item.name, ex=False)}")
caunt += 1
tar.close()
elif '.tar' in str(item):
source_path = Path(path / 'archives')
if not source_path.exists():
source_path.mkdir()
tar = tarfile.open(zipfile, "r:")
try:
tar.extractall(source_path / normalize(item.name, ex=False))
except:
tar.extractall(
source_path / f"{str(caunt)}{normalize(item.name, ex=False)}")
caunt += 1
tar.close()
# Разное
else:
if item.is_file() and 'archives' not in item.name:
source_path = Path(path / "different")
if not source_path.exists():
source_path.mkdir()
try:
new_location = item.rename(
source_path / normalize(item.name))
except FileExistsError:
new_location = item.rename(
source_path / f"{str(caunt)}{normalize(item.name)}")
caunt += 1
del_empty_dirs(u_input)
def del_empty_dirs(path) -> None:
for d in os.listdir(path):
a = os.path.join(path, d)
if os.path.isdir(a) and 'archives' not in a:
print(a)
del_empty_dirs(a)
if not os.listdir(a):
os.rmdir(a)
def normalize(x: Path, ex=True) -> str:
TRANS = {'а': 'a', 'б': 'b', 'в': 'v', 'г': 'g', 'д': 'd', 'е': 'e', 'ё': 'e', 'ж': 'j', 'з': 'z', 'и': 'i', 'й': 'j', 'к': 'k', 'л':
'l', 'м': 'm', 'н': 'n', 'о': 'o', 'п': 'p', 'р': 'r', 'с': 's', 'т': 't', 'у': 'u', 'ф': 'f', 'х': 'h', 'ц': 'ts', 'ч': 'ch', 'ш': 'sh', 'щ': 'sch', 'ъ': '', 'ы': 'y', 'ь': '', 'э': 'e', 'ю': 'yu', 'я': 'ya', 'є': 'je', 'і': 'i', 'ї': 'ji', 'ґ': 'g',
'А': 'A', 'Б': 'B', 'В': 'V', 'Г': 'G', 'Д': 'D', 'Е': 'E', 'Ё': 'E', 'Ж': 'J', 'З': 'Z', 'И': 'I', 'Й': 'J', 'К': 'K', 'Л': 'L', 'М': 'M', 'Н': 'N', 'О': 'O', 'П': 'P', 'Р': 'R', 'С': 'S', 'Т': 'T', 'У': 'U', 'Ф': 'F', 'Х': 'H', 'Ц': 'Ts', 'Ч': 'Ch',
'Ш': 'Sh', 'Щ': 'Sch', 'Ъ': '', 'Ы': 'Y', 'Ь': '', 'Э': 'E', 'Ю': 'Yu', 'Я': 'Ya', 'Є': 'Je', 'І': 'I', 'Ї': 'Ji', 'Ґ': 'G'}
chars2drop = "!\"$%&'*+,-№#/:.;<>=?[\]^`{|}~\t\n\x0b\x0c\r"
x = str(x)
separator = None
ext = ''
for i in range(len(x)):
if x[i] == ".":
separator = i
if separator:
separator = len(x) - separator
s, ext = x[:-separator], x[-separator:]
else:
s = x
trans_tab = str.maketrans(dict.fromkeys(list(chars2drop), "_"))
res = " ".join((s.translate(trans_tab).split()))
res = re.sub("\s*>\s*$", "_", res)
res = res.translate(res.maketrans(TRANS))
if ex == False or separator == None:
return res
else:
res += ext
return res
|
Apllepy-personal-assistant
|
/Apllepy_personal_assistant-0.1.6-py3-none-any.whl/app/Sorter/sort.py
|
sort.py
|
from collections import UserDict
from datetime import datetime, timedelta
from itertools import islice
import re
class Field:
def __init__(self, value):
self.value = value
def __str__(self):
return f"{self.value}"
def __repr__(self):
return f"{self.value}"
class Birthday(Field):
# (рік-місяць-день)
@property
def value(self):
return self.__value
@value.setter
def value(self, val: str):
data = val.split("-")
if not "".join(data).isdigit():
raise ValueError
if int(data[0]) > datetime.now().year or int(data[1]) > 12 or int(data[2]) > 30:
raise ValueError
self.__value = val
class Address(Field):
def __init__(self, value):
self.value = value
class Email(Field):
def __repr__(self):
return self.value
@property
def value(self):
return self.__value
@value.setter
def value(self, value: str):
if value is None:
self.__value = None
else:
result = None
get_email = re.findall(
r'\b[a-zA-Z][\w\.]+@[a-zA-Z]+\.[a-zA-Z]{2,}', value)
if get_email:
for e in get_email:
result = e
if result is None:
raise AttributeError(f'Incorrect value provided {value}')
self.__value = result
class Name(Field):
def __init__(self, value):
self.value = value.capitalize()
class Phone(Field):
@property
def value(self):
return self.__value
@value.setter
def value(self, val: str):
if not len(val) == 10 and not len(val) == 13 and not val.lstrip("+").isdigit():
raise ValueError
if len(val) == 10:
val = "+38" + val
if not val[3] == "0":
raise ValueError
self.__value = val
class Record():
def __init__(self, name: Name, phone: Phone = None, birthday: Birthday = None, email=None, address: Address = None):
self.name = name
self.phones = []
self.birthday = birthday
self.email = email
self.address = address
if phone:
self.phones.append(phone)
def __str__(self):
return f"{self.name} - {', '.join([str(p) for p in self.phones])}"
def __repr__(self):
return f"{self.name} - {', '.join([str(p) for p in self.phones])}"
def add_phone(self, phone: Phone):
self.phones.append(phone)
def delete_phone(self, phone: Phone):
for p in self.phones:
if p.value == phone.value:
self.phones.remove(p)
return f'Phone {p.value} delete successful.'
return f'Phone {phone.value} not found'
def days_to_bd(self):
# (рік-місяць-день)
if not self.birthday:
print("Birthday not entered")
else:
date1 = self.birthday.value.split("-")
date = datetime(year=datetime.now().year, month=int(
date1[1]), day=int(date1[2]))
data_now = datetime.now()
dat = date - data_now
return dat.days
def add_email(self, email):
self.email = email
def add_address(self, address):
self.address = address
def add_birthday(self, birthday):
self.birthday = birthday
class AddressBook(UserDict):
index = 0
def add_record(self, rec: Record):
self[rec.name.value] = rec
def __str__(self):
return '\n'.join([str(i) for i in self.values()])
def iteration(self, step=5):
while AddressBook.index < len(self):
yield list(islice(self.items(), AddressBook.index, AddressBook.index+step))
if AddressBook.index > len(self):
raise StopIteration()
AddressBook.index += step
def birthday_ib_day(self, day):
days = datetime.now().date() + timedelta(days=day)
caunt = 0
for i in self:
if self[i].birthday:
date1 = self[i].birthday.value.split("-")
date = datetime(year=datetime.now().year, month=int(
date1[1]), day=int(date1[2])).date()
if days == date:
print(self[i])
caunt += 1
if caunt == 0:
print(f"In {day} days there is no birthday")
if __name__ == '__main__':
name = Name('Bill')
phone = Phone('0666266830')
rec = Record(name, phone)
ab = AddressBook()
ab.add_record(rec)
name1 = Name('alisa')
phone1 = Phone('+380662951095')
rec1 = Record(name1, phone1)
rec1.add_birthday(Birthday("1997-02-01"))
ab.add_record(rec1)
ab.birthday_ib_day(1)
|
Apllepy-personal-assistant
|
/Apllepy_personal_assistant-0.1.6-py3-none-any.whl/app/Address_book/Class.py
|
Class.py
|
from collections import UserDict
# from Class import *
from .Class import *
from datetime import datetime
from prompt_toolkit import print_formatted_text, prompt, HTML
from prompt_toolkit.completion import NestedCompleter
import pickle
try:
with open("AddressBook.bin", "rb") as file:
ab = pickle.load(file)
except:
ab = AddressBook()
pages = []
def input_error(func):
def wrapper(*args):
try:
return func(*args)
except IndexError:
print("Sorry, try again")
except ValueError:
print("incorrect input")
except KeyError:
print("incorrect Name")
except AttributeError:
print("incorrect Email")
return wrapper
def greetings(*args):
print("How can I help you?")
def help(*args):
print("I know these commands: hello, add, append, change, phone, show, del_phon, birthday, day, page, search, email, home, happy_birthday, help")
@input_error
def add(*argv):
phone, birthday = None, None
name = Name(argv[0][0])
if len(argv[0]) >= 2:
phone = Phone(argv[0][1])
if len(argv[0]) >= 3:
birthday = Birthday(argv[0][2])
ab.add_record(Record(name, phone, birthday))
print(f"Contact {name} added")
@input_error
def add_phon(*argv):
ab[argv[0][0].title()].add_phone(Phone(argv[0][1]))
print(f"phone {argv[0][1]} added to contact {argv[0][0].title()}")
@input_error
def change(*argv):
if len(argv[0]) == 3:
ab[argv[0][0].title()].add_phone(Phone(argv[0][2]))
ab[argv[0][0].title()].delete_phone(Phone(argv[0][1]))
print(
f"In contact {argv[0][0].title()} number {argv[0][1]} is replaced by {argv[0][2]}")
else:
print("To change the number, enter the name of the contact, the number to change, the new phone number")
@input_error
def del_phone(*argv):
ab[argv[0][0].title()].delete_phone(Phone(argv[0][1]))
print(f"Phone {argv[0][1]} removed from contact {argv[0][0].title()}")
@input_error
def show_all(*argv):
print(ab)
@input_error
def page(*argv):
reg = ab.iteration(2)
for b in reg:
pages.append(b)
print(f"page {int(argv[0][0])} of {len(pages)}")
for i in pages[int(argv[0][0]) - 1]:
print(i[1])
@input_error
def output_phone(name):
print(ab[name[0].title()])
if ab[name[0].title()].birthday:
print(f"Birthday: {ab[name[0].title()].birthday}")
if ab[name[0].title()].email:
print(f"email: {ab[name[0].title()].email}")
if ab[name[0].title()].address:
print(f"address: {ab[name[0].title()].address}")
@input_error
def add_birthday(*args):
ab[args[0][0].title()].birthday = Birthday(args[0][1])
print(f"Birthday {args[0][1]} added to contact {args[0][0].title()}")
@input_error
def search(val):
if val[0].isdigit():
for p in ab.items():
for x in p[1].phones:
if not str(x).find(val[0]) == -1:
print(p[1])
else:
for p in ab.items():
if not p[0].lower().find(val[0].lower()) == -1:
print(p[1])
@input_error
def day_birthday(name):
print(
f"Before birthday {name[0].title()}, {ab[name[0].title()].days_to_bd()} days")
@input_error
def add_email(*args):
ab[args[0][0].title()].email = Email(args[0][1])
print(f"Email {args[0][1]} added to contact {args[0][0].title()}")
@input_error
def add_address(*args):
ab[args[0][0].title()].address = Address(args[0][1])
print(f"Address {args[0][1]} added to contact {args[0][0].title()}")
@input_error
def birthday_ib_day(day):
print(f"In {day[0]} days the birthday of:")
ab.birthday_ib_day(int(day[0]))
completer = NestedCompleter.from_nested_dict({"hello": None, "add": None, "append": None, "change": None, "phone": None, "show": None,
"del": None, "birthday": None, "day": None, "page": None, "search": None, "email": None, "happy_birthday": None, "exit": None, "home": None})
COMMANDS = {
greetings: "hello",
add: "add",
add_phon: 'append',
change: "change",
output_phone: "phone",
show_all: "show",
del_phone: "del_phon",
add_birthday: "birthday",
day_birthday: "day",
page: "page",
search: "search",
add_email: "email",
add_address: "home",
help: "help",
birthday_ib_day: "happy_birthday"
}
def command_parser(u_input: str):
for comand, key_words in COMMANDS.items():
if u_input.startswith(key_words):
return comand, u_input.replace(key_words, "").strip().split(" ")
return None, None
def address_Book():
print("Address book open")
while True:
u_input = prompt('>>>', completer=completer)
u_input = u_input.lower()
if u_input in [".", "good bye", "close", "exit", "/", ""]:
print("Good bye!")
break
comand, data = command_parser(u_input)
if not comand:
print("Enter command")
else:
comand(data)
with open("AddressBook.bin", "wb") as file:
pickle.dump(ab, file)
if __name__ == "__main__":
address_Book()
|
Apllepy-personal-assistant
|
/Apllepy_personal_assistant-0.1.6-py3-none-any.whl/app/Address_book/AdressBook.py
|
AdressBook.py
|
from collections import UserDict
from datetime import datetime, date
import pickle
import re
# class Tags - key words, we add it to notes, as class Phones in home work, max lenght = 20
class Tag():
def __init__(self, value):
self.value = value
@property
def value(self):
return self.__value
@value.setter
def value(self, value: str):
if len(value) > 20:
raise ValueError('Max token length is 20 symbols')
self.__value = value
def __str__(self):
return f"{self.value}"
def __repr__(self):
return f"{self.value}"
def __eq__(self, other):
if not other:
return False
return self.value == other.value
def __gt__(self, other):
return self.value > other.value
# class Note. It has required field - title, text_content, optional field - tags
class Note():
def __init__(self, text_content: str, *tag):
self.text = text_content
self.number = None
self.tags = []
self.note_date = datetime.today().date()
for i in tag:
self.tags.append(Tag(i))
# add tag to note
def add_tag(self, tag: Tag):
if tag not in self.tags:
return self.tags.append(tag)
return self.tags
# delete tag
def del_tag(self, tag: Tag):
if tag in self.tags:
self.tags.remove(tag)
return True
return False
# replace one note to another
def change_note(self, new_text):
self.text = new_text
return "Ok"
# long representation. Note text cats to 50 symbols
def __repr__(self):
if len(self.text) < 50:
text = self.text
else:
text = self.text[0:50] + '...'
if not self.tags:
tags = 'empty'
else:
tags = self.show_all_tags()
return f"Number: {self.number}, Date: {self.note_date}, Tags: {tags}, Text: {text}"
# short representation. Only note text. If text > 50 - print line 50n symb
def __str__(self):
i = 0
pr_str = ''
pos_space = 0
if len(self.text) < 50:
return self.text
else:
while i < (len(self.text) - 50):
pos_space = self.text[i:i + 50].rfind(' ')
pr_str = pr_str + self.text[i:i + pos_space] + '\n'
i += pos_space + 1
if i < len(self.text):
pr_str = pr_str + self.text[i:]
return pr_str
def show_all_tags(self):
return ', '.join(str(p) for p in self.tags)
# class Notebook == Adressbook, dictionary, where key is unic class counter
class Notebook(UserDict):
index = 0
# add new note with class-count number as key
def add_note(self, note: Note):
Notebook.index += 1
note.number = Notebook.index
self.data[Notebook.index] = note
return "note added"
def search_by_tag(self, tag: Tag):
note_list = []
for value in self.data.values():
if tag in value.tags:
note_list.append(value)
return note_list
# to do search if we have some words in note text
def search_by_word_in_note(self, phrase):
text_coincidence = []
for value in self.data.values():
if re.search(phrase, value.text, flags=re.IGNORECASE):
text_coincidence.append(value)
return text_coincidence
# to do search if we have date
def search_by_date(self, day: date):
date_list = []
for value in self.data.values():
if day.date() == value.note_date:
date_list.append(value)
return date_list
#sort list of instances
def sort_by_date(self):
object_list = list(self.data.values())
object_list.sort(key=lambda note: note.note_date)
return object_list
@staticmethod
def sort_key_tag(note):
if note.tags:
return note.tags[0]
else:
return Tag('')
#sort list of instances
def sort_by_tag(self):
object_list = list(self.data.values())
object_list.sort(key=Notebook.sort_key_tag)
return object_list
@staticmethod
def sort_key_tag(note):
if note.tags:
return note.tags[0]
else:
return Tag('')
#sort list of instances
def sort_by_tag(self):
object_list = list(self.data.values())
object_list.sort(key=Notebook.sort_key_tag)
return object_list
# show list of tags
def show_all_tags(self):
tags_list = []
for value in self.data.values():
for item in value.tags:
if item not in tags_list:
tags_list.append(item)
tags_list.sort()
return tags_list
# show all dates
def show_all_dates(self):
date_list = []
for value in self.data.values():
if value.note_date not in date_list:
date_list.append(value.note_date)
date_list.sort()
return ', '.join(p.strftime('%Y-%m-%d') for p in date_list)
#serialize Notebook
def serializer(self, path_to_book):
if not self.data:
return 'Notebook is empty'
with open(path_to_book, "wb") as fh:
pickle.dump(self.data, fh)
return 'Notebook saved'
#deserialize Notebook
def deserializer(self, path_to_book):
keys_list = []
try:
with open(path_to_book, "rb") as fh:
self.data = pickle.load(fh)
keys_list = list(self.data)
Notebook.index = max(keys_list)
res = f'Saved notebook loaded, number of saved notes is {Notebook.index} '
except FileNotFoundError:
res = 'No file note_book.txt'
return res
|
Apllepy-personal-assistant
|
/Apllepy_personal_assistant-0.1.6-py3-none-any.whl/app/Notebook/notesclass.py
|
notesclass.py
|
from datetime import datetime
from os import path
from .notesclass import Tag, Note, Notebook
import time
from .decorator import input_error
from prompt_toolkit import print_formatted_text, prompt, HTML
from prompt_toolkit.completion import NestedCompleter
notebook = Notebook()
# add note
@input_error
def add_note(*args):
if not args:
return "Add text of note after command"
note = (' '.join(str(p) for p in args))
if len(note) > 1000:
return "Number of symbol > 1000, must be less"
else:
note = Note(note)
return notebook.add_note(note)
#редагування нотатки
@input_error
def edit_note(number):
note_key = int(number)
ch_note = notebook[note_key]
user_input = input(f' Input new text >>')
notebook[note_key] = Note(user_input)
ch_note.change_note(user_input)
print(ch_note)
return "Ok"
# add tag to note. You need to know 'number'. You can do some shearch and see number
@input_error
def add_tags(number, *tags):
results = []
for tag in tags:
tag = Tag(tag)
results.append(notebook[int(number)].add_tag(tag))
return ', '.join(results)
# delete note tag. You need to know 'number'. You can do some shearch and see number
@input_error
def del_tag(number, tag):
ch_note = notebook[int(number)]
res = ch_note.del_tag(Tag(tag))
if res:
return f'Tag {tag} delete successful.'
return f'Tag {tag} not found'
# delete note by number. You need to know 'number'. You can do some shearch and see number
@input_error
def del_note(number):
number = int(number)
return notebook.pop(number)
@input_error
def search_by_tag(tag):
notes = notebook.search_by_tag(Tag(tag))
if not notes:
return 'No notes found with this phrase'
return '\n'.join(repr(p) for p in notes)
@input_error
def search_by_text(word):
notes = notebook.search_by_word_in_note(word)
if not notes:
return 'No notes found with this phrase'
return '\n'.join(repr(p) for p in notes)
@input_error
def search_by_date(date):
print('Date have to be in format %Y-%m-%d')
date_for_search = datetime.strptime(date, '%Y-%m-%d')
notes = notebook.search_by_date(date_for_search)
if not notes:
return 'No notes found with this date'
return '\n'.join(repr(p) for p in notes)
# notes sorting for date
@input_error
def sort_by_date():
pr = ''
notes = notebook.sort_by_date()
for p in notes:
pr = pr + '\n' + p.note_date.strftime('%Y-%m-%d') + "|| " + repr(p)
return pr
# notes sorting for first tag
@input_error
def sort_by_tag():
pr = ''
notes = notebook.sort_by_tag()
for p in notes:
if not p.tags:
str_tag = 'empty'
else:
str_tag = str(p.tags[0])
pr = pr + '\n' + '{:<20}'.format(str_tag) + "|| " + repr(p)
return pr
# show all existing dates
@input_error
def show_all_dates():
return notebook.show_all_dates()
# show all existing tags
@input_error
def show_all_tags():
return repr(notebook.show_all_tags())
@input_error
def show_one_note(number):
return f'Full information: {repr(notebook[int(number)])} \nOnly text: {str(notebook[int(number)])}'
# show all existing tags
@input_error
def show_all_notes():
return f"Note: " + '\nNote: '.join(repr((notebook[key])) for key in notebook.keys())
@input_error
def save_notebook(path = 'notebook.txt'):
save_status = notebook.serializer(path)
return save_status
@input_error
def save_notebook_with_ques(path = 'notebook.txt'):
user_input = input('Do you want to save notes? (y/n)>>>')
if user_input not in ('y', 'n'):
return "Try once more"
elif user_input == 'y':
save_status = notebook.serializer(path)
return save_status
else:
return " "
@input_error
def load_notebook(path = 'notebook.txt'):
return notebook.deserializer(path)
@input_error
def help(*args):
print("I know these commands: \nadd_note <note_text>, \nedit <note_number>, \nadd_tag <note_number> <tag or list of tags>,"\
"\ndel_note <note_number>, \ndel_tag <note_number> <tag>, \nsearch_tag <tag>, \nsearch_text <text>, \nsearch_date <date>,"\
"\nsort_by_date, \nsort_by_tag, \nshow_dates, \nshow_tags, \nshow_notes, \nshow_single, \nsave, \nhelp")
return " "
completer = NestedCompleter.from_nested_dict({"add_note": None, "add_tag": None, "edit": None, "del_tag": None, "del_note": None, "search_tag": None,
"search_text": None, "search_date": None, "sort_by_date": None, "sort_by_tag": None, "show_dates": None, "show_tags": None, "show_notes": None, "exit": None, "show_single": None, "save": None, "help": None})
COMMANDS = {
"add_note": add_note,
"edit": edit_note,
"add_tag": add_tags,
"del_tag": del_tag,
"del_note": del_note,
"search_tag": search_by_tag,
"search_text": search_by_text,
"search_date": search_by_date,
"sort_by_date": sort_by_date,
"sort_by_tag": sort_by_tag,
"show_dates": show_all_dates,
"show_tags": show_all_tags,
"show_notes": show_all_notes,
"show_single": show_one_note,
"save": save_notebook,
"help": help
}
def command_parser(user_input):
key_word, *args = user_input.split()
command = None
key_word = key_word.lower()
if key_word not in COMMANDS:
return None, None
command = COMMANDS.get(key_word)
return command, *args
def notepad():
print("Notebook is opened")
load_status = load_notebook('notebook.txt')
print(load_status)
while True:
u_input = input(">>>")
u_input = u_input.lower()
if u_input in [".", "good bye", "close", "exit", "/", ]:
print(save_notebook_with_ques())
print("Good bye!")
time.sleep(1.5)
break
command, *data = command_parser(u_input)
if not command:
result = "Enter command or choose 'help'"
else:
result = command(*data)
print(result)
|
Apllepy-personal-assistant
|
/Apllepy_personal_assistant-0.1.6-py3-none-any.whl/app/Notebook/notes.py
|
notes.py
|
# 
[](https://travis-ci.org/TsimpDim/Aplos) [](https://coveralls.io/github/TsimpDim/Aplos?branch=master)
Aplos is a simple and elegant linear problem(LP) parser. It allows one to get all the information they need about any linear problem given with the correct syntax. You can read more about Linear Programming [here.](https://en.wikipedia.org/wiki/Linear_programming)
#### Expected LP format
>min/max c<sup>T</sup>x
>
>s.t /st /s.t. /subject to Ax ⊗ b
>
>End
Where ⊗ can be any of the following =, <=, >=
Variable(x) constraints/domains are not taken into consideration *(not yet)*.
---
Examples:
1. Max 3x1 +2x2
s.t. x1+2x2<=9
2x1+5x2<=4
End
2. min 3x1 - 5x2 + x4
st x2 + x3 = 2
2x1 + 3x2 + 5x4 >= 5
x1 - 5x2 + 2x3 - 4x4 <= 10
END
## Usage
``` python
import Aplos
# Initialization
# From a file
parser = Aplos.AplosParser(filename='lp.txt')
# From a string
text_lp = '''Max 3x1 +2x2
s.t. x1+2x2<=9
2x1+5x2<=4
End'''
parser = Aplos.AplosParser(text=text_lp)
# From a string with a custom delimeter
text = "Max 3x1 +2x3 + x5,s.t. x1+2x2<=9,2x1+5x2<=4,End"
parser = Aplos.AplosParser(text=text, delimeter=',')
# Getting the variables
variables_of_line = parser.get_vars(line_idx=0)
# variables_of_line = {"existing":['x1','x3'], "extended":['x1','x2','x3','x4','x5']}
variables_all = parser.get_vars()
# variables_all = ['x1','x2','x3','x4','x5']
# Detect errors
errors = parser.detect_errors() # set print_msg=True to print the full list of errors
if not errors:
# Get dimensions
dimensions = parser.get_dimensions()
m = dimensions['m']
n = dimensions['n']
# Get any matrix (A,b,c,Eqin or MinMax)
# Eqin and MinMax have values corresponding to symbols
# Eqin -- '<=': -1 | '>=' : 1 | '=' : 0
# MinMax -- 'max':1 | 'min':-1
matrix_A = parser.get_matrix('a')
matrix_b = parser.get_matrix('B')
# And so on
# Otherwise, get all matrices at once.
# Keys are : A,b,c,Eqin & MinMax
matrices = parser.get_matrices()
matrix_A = matrices['A']
# And so on
# Save matrices to file
parser.write_matrices_to_file('output.txt')
# Get dual matrices
# Variable constraints -- 'free' : 0 | '>= 0' : 1 | '<= 0' : -1}
dual_A = parser.get_dual_matrix('a')
# Variable constraints are calculated assuming that x(i) >= 0
# for every i. This is subject to change.
dual_var_constr = parser.get_dual_matrix('var_constr')
# And so on
# You can also get all the dual matrices together
# Similarly keys are : A,b,c,Eqin,MinMax & VarConstr
dual_matrices = parser.get_dual_matrices()
dual_A = dual_matrices['A']
# And so on
# Save dual matrices to file
parser.write_matrices_to_file('output_dual.txt', dual=True)
# After saving matrices (non-dual), you can also read them back
saved_matrices = parser.read_matrices_from_file('output.txt')
# If dual
saved_d_matrices = parser.read_matrices_from_file('output_dual.txt')
```
*As the project continues, the 'usage' section will get updated and eventually (hopefully) be moved in a documentation file/page altogether.*
|
Aplos
|
/Aplos-1.1.tar.gz/Aplos-1.1/README.md
|
README.md
|
# Crater Detection Tool
### Installation Guide
By using the command:
```
pip install ApolloCraterDetectionTool
```
### User instructions
### 📖Package
After installation, download the model(.pt file) using the download_models.py.
First, in terminal,
```
cd package_path + '/apollo'
```
Next, run the
```
python download_models.py
```
Then it would download a models folder in the same path, which includes Mars_best.pt, Moon_Left_Model.pt, and Moon_Right_Model.pt files.
Then add the module by running in python:
```
from apollo import *
```
Congratulation, now you can use the models to detecte the crater in two ways.
## 1.UI.py
Use the commmand
```
python UI.py
```
to run software and the UI interface will show up. ** Be care to install the yolov5 package first **

To run the model, firstly, select the Model type, e.g. Yolov5, Mars model.
Next, click the `test folder` button and select the test images file and input a folder name in the `result directory`.
Now, you can click the `Detect` to use the model to detect. Then the crater labels would be saved in the `user_directory/folder_name/detection`, the image with bounding boxes on the craters and the model's statistic would appear nearby.
Then, more results could be viewed by using the `Browse`.
Use the `Browse`, view one image in window, so you can view the image after detection.
## 2.CraterPredictor Class
To begin with, you can create an object named detection:
```
detection = CraterPredictor(mars_model, moon_model1, moon_model2, results_path, test_images_path, test_labels_path, img_size)
```
mars_model is string of path to the mars model weights, moon_model1, moon_model2: string of path to the moon model weights, results_path is string of path to the directory to store the results for the user, test_images_path is the str path to the directory containing the test images, test_labels_path is the string of path to the directory containing the test labels, optionally given by the user, and img_size is the size of the input images.
## Model
This class is about the model detection and some funcitions you can use.
In detection, two models could be selected, one is moon model and another is mars.
```
detection.predict_lunar_large_image(images)
```
This function would Predict lunar craters in a large lunar image by slicing it into smaller sub-images and making predictions on each sub-image, and the predictions are then aggregated into a single result.
```
detection.predict_mars_craters(images)
```
Use this function could let function to detect the whole images and return a csv. file in user_directory/detections.file using the moon model.
```
detection.draw_boxes(label_path=None)
```
Use this result to D=draw bounding boxes on images, both ground truth (if provided) and detections and results are saved in 'results_path/images/detections' and 'results_path/images/detections_and_gt'.
```
detection.get_statistics(label_path=None):
```
Use this function would save the true positive (tp), false positive (fp), and false negative (fn) values to a csv file. label_path (str): The path to the directory containing the label csv files. If not specified, the function returns without performing any operations.
### Analysis and Visualization
```
analysis.convert2physical(labels, subimg_pix, indices)
```
The function is to return the location (lat, lot) based on the information igven by user. `labels` means the predicted location and size of craters in the picture. `subimg_pix` is the number of pixels in each sub-picture, and `indices` is the indices of different sub-pictures.
```
size_frequency_compare_plot(folderpath, detection, real)
```
From this function, we can plot a separate plot of the cumulative crater size-frequency distribution of detected craters, if information the `crater size` is provided. `folderpath` means the user-specified input folder location. `detection` is the physical, real-world crater sizes in detection, and `real` is the physical, real-world crater sizes in real world.
### Model Perform Metric
After training our CDM, we randomly selected two sub-regions [Fig1](https://github.com/edsml-zw1622/33/raw/main/Img/B-19.jpg) and [Fig2](https://github.com/edsml-zw1622/33/raw/main/Img/B-0.jpg)(B-65-19, B-64-0). We plotted the crater size frequency distribution of these two regions. The blue broken line in the figure below represents the prediction result of our detector algorithm in this area, and the black broken line represents the actual distribution of crater diameters in this area. In addition, we also calculated the difference between the model prediction and the ground truth, and the residual is indicated by the red broken line.


The following two pictures show the `x, y, w, h, latitude, longitude, diameter` of the B-64-0 area. The first picture is the prediction result of the detector algorithm, and the second picture is the real information of the crater in this area . You can see the data in [Detection Data](https://github.com/edsml-zw1622/33/raw/main/Img/Detection.jpg) and [Label Data](https://github.com/edsml-zw1622/33/raw/main/Img/labeldata.jpg).
### UI Example:

### Documentation
The code includes [Sphinx](https://www.sphinx-doc.org) documentation. On systems with Sphinx installed, this can be build by running
```
python -m sphinx docs html
```
then viewing the generated `index.html` file in the `html` directory in your browser.
For systems with [LaTeX](https://www.latex-project.org/get/) installed, a manual pdf can be generated by running
```bash
python -m sphinx -b latex docs latex
```
Then following the instructions to process the `CraterDetectionTool.tex` file in the `latex` directory in your browser.
### Testing
The tool includes several tests, which you can use to check its operation on your system. With [pytest](https://doc.pytest.org/en/latest) installed, these can be run with
```bash
python -m pytest --doctest-modules apollo
```
Additionally, we write a test to check our analysis.py. You can use it first cd the folder *tests*, and run the **test_analysis.py**.
### Reading list
- (Description of lunar impact crater database Robbins, 2019.)
[https://agupubs.onlinelibrary.wiley.com/doi/full/10.1029/2018JE005592]
- (Yolvo5 Model description)
[https://github.com/ultralytics/yolov5/wiki/Train-Custom-Data]
- (Equirectangular projection descripition)[https://assets.publishing.service.gov.uk/government/uploads/system/uploads/attachment_data/file/283357/ILRSpecification2013_14Appendix_C_Dec2012_v1.pdf]
### Lisence
MIT[https://opensource.org/licenses/MIT]
|
ApolloCraterDetectionTool
|
/ApolloCraterDetectionTool-1.0.tar.gz/ApolloCraterDetectionTool-1.0/README.md
|
README.md
|
import csv
import os
import shutil
from sklearn.model_selection import train_test_split
class DataProcessor():
def __init__(self, image_data_path, label_data_path):
"""
DataProcessor class with image and label data paths.
Parameters
----------
image_data_path (str): path to the folder containing the image data.
label_data_path (str): path to the folder containing the label data.
"""
self.image_data_path = image_data_path
self.label_data_path = label_data_path
def convert_csv_to_yolo_txt(self, csv_file):
"""
Convert a CSV file to YOLO format text file.
Parameters
----------
csv_file (str): path to the CSV file to be converted.
"""
# Open the CSV file for reading
with open(csv_file, 'r') as file:
reader = csv.reader(file)
txt_file = csv_file.replace('.csv', '.txt')
with open(txt_file, 'w') as out_file:
for row in reader:
# add class 0 to the first column
out_file.write('0 ' + ' '.join(row[:4]) + '\n')
os.remove(csv_file)
def convert_all_labels_to_txt(self):
"""
Convert a CSV file with header to YOLO format text file.
Parameters
----------
csv_file (str): path to the CSV file with header to be converted.
"""
for filename in os.listdir(self.label_data_path):
if filename.endswith('.csv'):
self.convert_csv_to_yolo_txt(self.label_data_path + filename)
def create_data_directories(self, destination_root):
"""
Create directories for train, validation and test datasets.
Parameters
----------
destination_root (str): root directory where train,
validation and test datasets will be stored.
"""
os.mkdir(destination_root)
image_folder = destination_root + "images/"
labels_folder = destination_root + "labels/"
train_images = destination_root + "images/train/"
train_labels = destination_root + "labels/train/"
val_images = destination_root + "images/val/"
val_labels = destination_root + "labels/val/"
test_images = destination_root + "images/test/"
test_labels = destination_root + "labels/test/"
os.mkdir(image_folder)
os.mkdir(labels_folder)
os.mkdir(train_images)
os.mkdir(train_labels)
os.mkdir(val_images)
os.mkdir(val_labels)
os.mkdir(test_images)
os.mkdir(test_labels)
def split_and_move_data(self, destination_root):
"""
Split and move image and label data to train,
validation and test directories.
Parameters
----------
destination_root (str): root directory where train,
validation and test datasets will be stored.
"""
images = [
os.path.join(
self.image_data_path, x
) for x in os.listdir(self.image_data_path)
]
labels = [
os.path.join(
self.label_data_path, x
) for x in os.listdir(self.label_data_path) if x[-3:] == "txt"
]
images.sort()
labels.sort()
train_images, val_images, train_labels, val_labels = train_test_split(
images,
labels,
test_size=0.2,
random_state=1
)
val_images, test_images, val_labels, test_labels = train_test_split(
val_images,
val_labels,
test_size=0.5,
random_state=1
)
self.move_files_to_folder(
train_images, destination_root + 'images/train')
self.move_files_to_folder(
val_images, destination_root + 'images/val')
self.move_files_to_folder(
test_images, destination_root + 'images/test')
self.move_files_to_folder(
train_labels, destination_root + 'labels/train')
self.move_files_to_folder(
val_labels, destination_root + 'labels/val')
self.move_files_to_folder(
test_labels, destination_root + 'labels/test')
def move_files_to_folder(self, files, dest):
"""
Move files between two folders
Parameters
----------
files (str): from folder
dest (str): to folder
"""
print('moving files into ' + dest)
for f in files:
try:
shutil.copy2(f, dest)
except Exception:
print(f + "could not be moved")
|
ApolloCraterDetectionTool
|
/ApolloCraterDetectionTool-1.0.tar.gz/ApolloCraterDetectionTool-1.0/apollo/data_processor.py
|
data_processor.py
|
from yolov5 import detect
import os
import csv
import shutil
import cv2
import numpy as np
import pandas as pd
import analysis
class CraterPredictor:
"""
Class to generate user outputs from input images
by running object detection on the images using YOLOv5 models.
"""
def __init__(
self,
mars_model,
moon_model1,
moon_model2,
results_path,
test_images_path,
test_labels_path=None,
img_size=416,
):
"""
Parameters
----------
mars_model : str (.pt file)
Path to the mars model weights
moon_model : str (.pt file)
Path to the moon model weights
results_path : str
Path to the directory to store the results for the user
test_images_path : str
Path to the directory containing the test images
test_labels_path: str
Path to the directory containing the test labels,
optionally given by the user
img_size: int
Size of the input images, specified by the user
"""
self.mars_model = mars_model
self.moon_model1 = moon_model1
self.moon_model2 = moon_model2
self.results_path = results_path
self.detections_path = results_path + "detections/"
self.agg_detections_path = results_path + "agg_detections/"
self.images_path = results_path + "images/"
self.stats_path = results_path + "statistics/"
self.test_images_path = test_images_path
self.test_labels_path = test_labels_path
self.img_size = img_size
def crop_img(self, img, x, y, pixel_size=416):
"""
Crop a sub-image from a larger image.
Parameters
----------
img (ndarray): the larger image to crop a sub-image from
x (int): the x-coordinate of the top-left corner of the sub-image
y (int): the y-coordinate of the top-left corner of the sub-image
pixel_size (int): the size (h and w) of the sub-image (default: 416)
Returns:
ndarray: the cropped sub-image
"""
h, w, _ = img.shape
sub_img = img[
y * pixel_size: (y * pixel_size) + pixel_size,
x * pixel_size: (x * pixel_size) + pixel_size,
]
if sub_img.shape[0] != pixel_size or sub_img.shape[1] != pixel_size:
# Fill the empty part with black color
sub_img = cv2.copyMakeBorder(
sub_img,
0,
pixel_size - sub_img.shape[0],
0,
pixel_size - sub_img.shape[1],
cv2.BORDER_CONSTANT,
value=[0, 0, 0],
)
return sub_img
def slice_lunar_large_image(self, img_file, folder_name, image_size):
"""
Slice a large lunar image into smaller sub-images and save them
Parameters
----------
img_file (str): path to the large lunar image file
folder_name (str): name of the folder to save the sub-images to
image_size (int): the size (height and width) of each sub-image
Returns
----------
tuple: the shape of the large lunar image
"""
img = cv2.imread(img_file)
# figure out how many iterations
x_range = img.shape[1] // image_size
y_range = img.shape[0] // image_size
for x in range(x_range + 1):
for y in range(y_range + 1):
sub_img = self.crop_img(img, x, y, image_size)
cv2.imwrite(folder_name + "/{}-{}.jpg".format(x, y), sub_img)
return img.shape
def predict_lunar_large_image(self, images):
"""
Predict lunar craters in a large lunar image by slicing it into
smaller sub-images and making predictions on each sub-image.
The predictions are then aggregated into a single result.
Parameters
----------
images (str): path to the directory containing the large lunar image(s)
"""
image_size = self.img_size
if os.path.exists(self.agg_detections_path):
shutil.rmtree(
self.agg_detections_path, ignore_errors=False, onerror=None)
os.mkdir(self.agg_detections_path)
for i, img_file in enumerate(os.listdir(images)):
print(img_file)
result_file_name = img_file.replace(".jpg", ".csv")
img_file = os.path.join(images, img_file)
folder_path = "sliced_lunar_images_" + str(i)
if os.path.exists(folder_path):
shutil.rmtree(folder_path, ignore_errors=False, onerror=None)
os.mkdir(folder_path)
large_img_size = self.slice_lunar_large_image(
img_file, folder_path, image_size
)
self.predict_moon_craters(folder_path)
self.aggregate_labels(result_file_name, large_img_size, image_size)
def aggregate_labels(self, result_file, large_img_size, image_size):
"""
Combine the predictions of lunar craters in multiple sub-images into
a single result.
Parameters
----------
result_file (str): name of the file to save the aggregated result to
large_img_size (tuple): the shape of the large lunar image
image_size (int): the size (height and width) of each sub-image
"""
rows = []
# loop through the list of CSV files
for filename in os.listdir(self.detections_path):
# read each CSV file into a dataframe
if filename.endswith(".csv"):
with open(
os.path.join(
self.detections_path, filename), "r") as f:
reader = csv.reader(f)
# add each row from the csv file to the rows list
fs = filename.split('-')
x = int(fs[0])
y = int(fs[1][:-4])
for row in reader:
row = [float(i) for i in row[:4]]
row = self.convert_to_global(
row, x, y, large_img_size[1],
large_img_size[0], image_size
)
rows.append(row)
res_file = os.path.join(self.agg_detections_path, result_file)
with open(res_file, "w", newline="") as f:
writer = csv.writer(f)
writer.writerows(rows)
def convert_to_global(
self, detection, i_sub_img, j_sub_img, w_img, h_img, sub_img_px=416
):
"""
Converts the detected bounding boxes of a sub image to the global
coordinates in the larger image
Parameters
----------
detection (list): detected crater bounds in form of [x, y, w, h]
i_sub_img (int): the sub image x index in the larger image
j_sub_img (int): the sub image y index in the larger image
w_img (int): the width of the large image
h_img (int): the height of the large image
sub_img_pix (int): the pixel dimensions of the sub image
Return
----------
list: [x, y, w, h] as values in the large image
"""
x_local, y_local, w_local, h_local = detection
x_global = (x_local + i_sub_img) * sub_img_px / w_img
y_global = (y_local + j_sub_img) * sub_img_px / h_img
w_global = w_local * sub_img_px / w_img
h_global = h_local * sub_img_px / h_img
return [x_global, y_global, w_global, h_global]
def predict_moon_craters(self, images):
"""
Predict moon craters and put results in user_directory/detections.
Result is a csv file for each image containing the detected locations
Parameters
----------
images : str
Path to the test images
Example
-------
predictor.predict_moon_craters('test_images/')
"""
self.yolo_detect(
images, [self.moon_model1, self.moon_model2], name="detections"
)
def predict_mars_craters(self, images):
"""
Predict mars craters and put results in user_directory/detections.
Result is a csv file for each image containing the detected locations
Parameters
----------
images : str
Path to the test images
Example
-------
predictor.predict_mars_craters('test_images/')
"""
self.yolo_detect(images, self.mars_model, name="detections")
def yolo_detect(self, images, weights, name):
"""
Run YOLOv5 detection
Parameters
----------
images : str
Path to the test images
weights : str
Path to the model weights
name : str
Name of the output file. Detections
are converted to to csv and moved here.
Example
-------
predictor.yolo_detect('test_images/', 'mars.weights', 'detections')
"""
detect.run(
source=images,
weights=weights,
imgsz=self.img_size,
save_txt=True,
name=name,
)
self.convert_and_move_detections()
def create_user_results(self):
"""
Creates the directory specified in `results_path` if it does not exist.
"""
if not os.path.exists(self.results_path):
os.mkdir(self.results_path)
def convert_and_move_detections(self):
"""
Converts the YOLOv5 detections saved as txt files
in the `runs/detect/detections/labels`
directory to csv format and moves them to `results_path/detections'.
"""
self.create_user_results()
if os.path.exists(self.detections_path):
shutil.rmtree(
self.detections_path, ignore_errors=False, onerror=None)
os.mkdir(self.detections_path)
self.convert_all_detections_to_csv("runs/detect/detections/")
def convert_yolo_txt_to_csv(self, txt_file, csv_file):
"""
Converts a YOLOv5 txt file to a csv file.
Parameters
----------
txt_file : str
Path to the txt file
csv_file : str
Path to the csv file
"""
with open(txt_file, "r") as file:
labels = file.readlines()
csv_rows = []
for line in [lb.strip() for lb in labels]:
dims = line.split(" ")
row = [
float(dims[1]), float(dims[2]),
float(dims[3]), float(dims[4])]
csv_rows.append(row)
with open(csv_file, "w", newline="") as file:
writer = csv.writer(file)
writer.writerows(csv_rows)
os.remove(txt_file)
def convert_all_detections_to_csv(self, txt_path):
"""
Converts all txt files in the specified directory to csv format.
Parameters
----------
txt_path : str
Path to the directory containing the txt files
"""
txt_labels_path = txt_path + "labels"
for filename in os.listdir(txt_labels_path):
if filename.endswith(".txt"):
# Get the file paths
txt_file = os.path.join(txt_labels_path, filename)
csv_file = os.path.join(
self.detections_path, filename.replace(".txt", ".csv")
)
# Call the conversion function
self.convert_yolo_txt_to_csv(txt_file, csv_file)
# remove the labels folder so the same name can
# be used again for next run
shutil.rmtree(txt_path, ignore_errors=False, onerror=None)
def bounding_box(self, img, x, y, w, h, img_w, img_h, color):
"""
Draw bounding boxes.
Parameters
----------
img : numpy.array
The image read from cv2.imread()
x : float
x position of the crater centre
y : float
y position of the crater centre
w : float
Width of crater
h : float
Height of crater
img_w: int
Width of the image
img_h: int
Height of the image
Returns
-------
numpy.array
The image with bounding boxes drawn
Examples
------
>>> x = 0.5
>>> y = 0.5
>>> w = 0.1
>>> h = 0.1
>>> img_w = 416
>>> img_h = 416
>>> color = (0, 255, 0)
>>> bounding_box(img, x, y, w, h, img_w, img_h, color)
"""
x = x * img_w
# w = w * img_w
y = y * img_h
# h = h * img_h
cv2.rectangle(
img,
(int(x - w * img_w / 2), int(y - h * img_h / 2)),
(int(x + w * img_w / 2), int(y + h * img_h / 2)),
color,
1,
)
# cv2.rectangle(img, (0, 0), (208, 208), (0, 255, 0), 1)
return img
def draw_boxes(self, label_path=None):
"""
Draw bounding boxes on images,
both ground truth (if provided) and detections.
Results are saved in
'results_path/images/detections' and
'results_path/images/detections_and_gt'
Parameters
----------
label_path : str, optional
The path to the folder containing ground truth bounding boxes.
If not provided, only detections will be drawn.
Example
-------
>>> draw_boxes()
>>> draw_boxes(label_path='/path/to/ground_truth')
"""
# set the pred labels path to where they were saved
img_path = self.test_images_path
if os.path.exists(self.images_path):
shutil.rmtree(self.images_path, ignore_errors=False, onerror=None)
os.mkdir(self.images_path)
# make one folder for only detections
os.mkdir(self.images_path + "detections/")
# make one folder for both detections and ground truth
os.mkdir(self.images_path + "detections_and_gt/")
pred_label_path = self.detections_path
# TODO: handle different image file types
for file in os.listdir(img_path):
if file.endswith(".png") or \
file.endswith(".jpg") or \
file.endswith(".tif"):
filetype = "png"
if file.endswith(".jpg"):
filetype = "jpg"
if file.endswith(".tif"):
filetype = "tif"
file_path_img = f"{img_path}/{file}"
img_single = cv2.imread(file_path_img)
img_both = cv2.imread(file_path_img)
img_w = self.img_size
img_h = self.img_size
if label_path:
label = pd.read_csv(
f"{label_path}/{file.replace(filetype, 'csv')}",
names=["x", "y", "w", "h"],
)
# draw the ground truth bounding boxes
for i in range(label.shape[0]):
x = label["x"][i]
y = label["y"][i]
w = label["w"][i]
h = label["h"][i]
img_both = self.bounding_box(
img_both, x, y, w, h, img_w, img_h, (0, 0, 255)
)
try:
pred_label = pd.read_csv(
f"{pred_label_path}/{file.replace(filetype, 'csv')}",
names=["x", "y", "w", "h"],
)
except Exception:
print(
"No detection found: \
there were no craters detected in this image"
)
# cv2.imshow(img)
# if no detection is found, only show the ground truth box
if label_path:
cv2.imwrite(
self.images_path + "detections_and_gt/" +
file, img_both
)
continue
else:
cv2.imwrite(
self.images_path + "detections/" +
file, img_single)
continue
# print(label)
# draw the detected bounding boxes
for i in range(pred_label.shape[0]):
det_x = pred_label["x"][i]
det_y = pred_label["y"][i]
det_w = pred_label["w"][i]
det_h = pred_label["h"][i]
# print(det_x, det_y, det_w, det_h)
img_both = self.bounding_box(
img_both, det_x, det_y, det_w,
det_h, img_w, img_h, (255, 0, 0)
)
img_single = self.bounding_box(
img_single,
det_x,
det_y,
det_w,
det_h,
img_w,
img_h,
(255, 0, 0),
)
# print(img)
# cv2.imshow(img)
if label_path:
cv2.imwrite(
self.images_path + "detections_and_gt/" +
file, img_both
)
cv2.imwrite(
self.images_path + "detections/" + file, img_single)
def get_statistics(self, label_path=None):
"""
Calculate and write the true positive (tp),
false positive (fp), and false negative (fn) values to a csv file.
Parameters
----------
label_path (str): The path to the directory containing
the label csv files.
If not specified, the function returns without
performing any operations.
"""
if os.path.exists(self.stats_path):
shutil.rmtree(self.stats_path, ignore_errors=False, onerror=None)
os.mkdir(self.stats_path)
if not label_path:
return
ground_truth = self.csv_to_numpy(label_path)
detections = self.csv_to_numpy(self.detections_path)
iou_threshold = 0.5
tp = 0
fp = 0
fn = 0
for filename, labels in ground_truth.items():
if filename not in detections:
# no detections
fn += len(labels)
continue
else:
curr_tp, curr_fp, curr_fn = self.calculate_statistics(
detections[filename], labels, iou_threshold
)
tp += curr_tp
fp += curr_fp
fn += curr_fn
# use the same filename
filepath = os.path.join(self.stats_path, "statistics.csv")
with open(filepath, "w", newline="") as f:
writer = csv.writer(f)
writer.writerow(["tp", "fp", "fn"])
writer.writerow([tp, fp, fn])
def csv_to_numpy(self, directory):
"""
Convert all csv files in a directory into NumPy arrays.
Parameters
----------
directory (str): The path to the directory containing the csv files.
Returns
----------
arrays (dict): A dictionary where the keys are
the filenames of the csv files and the values are
the NumPy arrays of the contents of the csv files.
"""
arrays = {}
for filename in os.listdir(directory):
if filename.endswith(".csv"):
filepath = os.path.join(directory, filename)
arrays[filename] = np.genfromtxt(filepath, delimiter=",")
return arrays
def calculate_iou(self, detected, ground_truth):
"""
Calculate the Intersection over Union (IoU) between two bounding boxes.
Parameters
----------
detected (list): A list of 4 values representing
the bounding box coordinates (x, y, width, height).
ground_truth (list): A list of 4 values representing
the bounding box coordinates (x, y, width, height).
Returns
----------
float: The calculated IoU value.
"""
x1 = detected[0]
y1 = detected[1]
w1 = detected[2]
h1 = detected[3]
x2 = ground_truth[0]
y2 = ground_truth[1]
w2 = ground_truth[2]
h2 = ground_truth[3]
ax1 = x1 - w1 / 2
ay1 = y1 + h1 / 2
bx1 = x1 + w1 / 2
by1 = y1 - h1 / 2
ax2 = x2 - w2 / 2
ay2 = y2 + h2 / 2
bx2 = x2 + w2 / 2
by2 = y2 - h2 / 2
cox1 = max(ax1, ax2)
cox2 = min(bx1, bx2)
coy1 = min(ay1, ay2)
coy2 = max(by1, by2)
dx = abs(cox2 - cox1)
dy = abs(coy2 - coy1)
area_overlap = 0
area_overlap = max(0, dx) * max(0, dy)
area_a = w1 * h1
area_b = w2 * h2
area_all = area_a + area_b - area_overlap
iou = area_overlap / area_all
return iou
def calculate_statistics(self, detections, ground_truths, threshold):
"""
Calculate true positive, false positive,
and false negative values using IoU.
Parameters
----------
detections (numpy.ndarray or list): A list or Numpy array of
bounding box coordinates (x, y, width, height)
for each detected object.
ground_truths (numpy.ndarray or list): A list or Numpy array of
ground truth bounding box coordinates (x, y, width, height)
for each object.
threshold (float): The IoU threshold value to consider
a detection as a true positive.
Returns
----------
tuple: A tuple of 3 values representing true positive (int),
false positive (int), and false negative (int) values.
"""
tp = 0
fp = 0
fn = 0
if detections.ndim == 1:
detections = detections[np.newaxis, :]
if ground_truths.ndim == 1:
ground_truths = ground_truths[np.newaxis, :]
detections = detections.tolist()
ground_truths = ground_truths.tolist()
detected = [False] * len(ground_truths)
for d in detections:
max_iou = 0
best_match = -1
for i, gt in enumerate(ground_truths):
iou = self.calculate_iou(d, gt)
if iou > max_iou:
max_iou = iou
best_match = i
if max_iou >= threshold:
if not detected[best_match]:
tp += 1
detected[best_match] = True
else:
fp += 1
else:
fp += 1
fn = np.sum(np.logical_not(detected))
return tp, fp, fn
def idx_labels(self, det_path):
"""
Add crater size and physical locations of the craters
to the label csv files
Parameter
----------
det_path: str
Detection path containing information of detected craters
"""
for file in os.listdir(det_path):
info = file[:-4].split("-")
img_area, img_a, img_b = info[0], int(info[1]), int(info[2])
df = pd.read_csv(det_path + "/" + file, names=["x", "y", "w", "h"])
# print(type(df))
# print(type(df.iloc[0]))
lon, lat, crater_size = analysis.convert2physical(
[df["x"], df["y"], df["w"], df["h"]],
[416.0, 416.0],
[img_area, img_a, img_b],
)
print(lat)
print(lon)
print(crater_size)
df["lon"] = lon
df["lat"] = lat
df["crater_size"] = crater_size
df.to_csv(det_path + "/" + "calc" + file, index=False)
|
ApolloCraterDetectionTool
|
/ApolloCraterDetectionTool-1.0.tar.gz/ApolloCraterDetectionTool-1.0/apollo/crater_predictor.py
|
crater_predictor.py
|
import numpy as np
import matplotlib.pyplot as plt
from apollo.config import Params
params = Params()
__all__ = ["convert2physical", "size_frequency_compare_plot"]
def convert2physical(labels, subimg_pix, indices):
"""
Return a set of physical parameter of craters, including size and location.
Parameters
----------
img_phy: array or array-like
A set of parameters contain real-world, physical image center
longitude, latitude, image width, height in degrees, and
resolution, all given by the user
img_pix: array or array-like
The number of pixels in each picture.
labels: array or array-like
The predicted location and size of craters in the picture.
subimg_pix: array or array-like
The number of pixels in each sub-picture.
indices: array or array-like
The indices of different sub-pictures.
Returns
-------
crater_size: array or array-like
The physical, real-world crater sizes
crater_loc: array or array-like
The physical, real-world crater locations
Examples
--------
>>> import apollo
>>> labels = [0.782866827, 0.463593897, 0.099159444, 0.099098558]
>>> apollo.convert2physical([-135, -22.5, 90, 45, 100],
[27291, 13645], labels, [416, 416], [0, 1])
"""
x, y, w, h = labels
subimg_pix_w, subimg_pix_h = subimg_pix
part, a, b = indices
phy_lon = params.MOON_LOC[part][0]
phy_lat = params.MOON_LOC[part][1]
# longitude of the pic origin
img_origin_x = phy_lon - 0.5 * params.MOON_TRAIN_W
img_origin_y = phy_lat + 0.5 * params.MOON_TRAIN_H
# longitude of the subpic origin
subimg_origin_x = img_origin_x + a * subimg_pix_w \
* params.MOON_RESO * 180 / (np.pi * params.MOON_RADIUS)
subimg_origin_y = img_origin_y - b * subimg_pix_h \
* params.MOON_RESO * 180 / (np.pi * params.MOON_RADIUS)
# longitude of the crater center
crater_lon = subimg_origin_x + x * subimg_pix_w \
* params.MOON_RESO * 180 / (np.pi * params.MOON_RADIUS)
crater_lat = subimg_origin_y - y * subimg_pix_h \
* params.MOON_RESO * 180 / (np.pi * params.MOON_RADIUS)
crater_h = h * subimg_pix_h * params.MOON_RESO / 1e3
crater_size = crater_h
return crater_lon, crater_lat, crater_size
def size_frequency_compare_plot(folderpath, detection, real):
"""
Plot a separate plot of the cumulative crater size-frequency
distribution of detected craters, if information to calculate
crater size is provided.
Parameters
----------
folderpath: string
The user-specified input folder location
detection: array or array-like
The physical, real-world crater sizes
real: array or array-like
The physical, real-world crater sizes
"""
countdetected = np.histogram(detection, np.arange(1, 100, 2))
xdetected = list(countdetected[1])
ydetected = list(countdetected[0])
countreal = np.histogram(real, np.arange(1, 100, 2))
xreal = list(countreal[1])
yreal = list(countreal[0])
yreal.append(0)
ydetected.append(0)
residual = [a_item - b_item for a_item, b_item in zip(ydetected, yreal)]
plt.subplot(2, 1, 1)
plt.plot(xdetected, ydetected, "-co", label="real_crater_size1")
plt.plot(xreal, yreal, "-kx", label="crater_detector_algrithom")
plt.xlabel("$Diameter, km$", fontsize=14) # Add an x-label to the axes.
plt.ylabel("$Number of craters$", fontsize=14)
plt.title(
"Crater Size-frequency Distribution", fontsize=14
) # Add a title to the axes.
plt.legend() # Add a legend.
plt.subplot(2, 1, 2)
plt.plot(xdetected, residual, "-ro", label="real_crater_size1")
plt.ylabel("$Residuals$", fontsize=14)
filepath = folderpath + "/size_frequency.png"
plt.savefig(filepath)
return 1
|
ApolloCraterDetectionTool
|
/ApolloCraterDetectionTool-1.0.tar.gz/ApolloCraterDetectionTool-1.0/apollo/analysis.py
|
analysis.py
|
from tkinter import (
Frame,
StringVar,
OptionMenu,
Button,
LEFT,
RIGHT,
BOTTOM,
Label,
Entry,
PhotoImage,
Tk,
filedialog,
)
from crater_predictor import CraterPredictor
import pandas as pd
class GUI(Frame):
"""
This class creates a graphical user interface for a project,
allowing the user to interact with input options.
Parameters:
master (Tk, optional): The parent widget for the GUI. Default is None.
Attributes:
option_frame (Frame): The frame for user-defined hyperparameters.
model_options (list): The list of available models.
model_var (StringVar): The variable storing the selected model.
model_menu (OptionMenu):
The dropdown menu for selecting models.
model_var_button (Button): The button for displaying
the selected model value.
model_var_label (Label): The label displaying the selected model value.
data_options (list): The list of available data options.
data_var (StringVar): The variable storing the selected data option.
data_menu (OptionMenu): The dropdown menu for selecting data options.
test_folder_frame (Frame): The frame for the test folder path.
test_folder_path (Button): The button for choosing the test folder.
res_dir_frame (Frame): The frame for the result directory.
e1_label (Label): The label for the result directory.
result_dir (StringVar): The variable storing the result directory path.
e1 (Entry): The entry widget for the result directory path.
detect_frame (Frame): The frame for the detect button.
detect_btn (Button): The button for starting detection.
file_frame (Frame): The frame for the file browse button.
label (Label): The label for displaying the selected file.
file (Button): The button for browsing and selecting a file.
exit_frame (Frame): The frame for the exit button.
exit (Button): The button for exiting the program.
Methods:
show_value: Displays the selected model value.
choose_folder: Chooses the test folder path.
detect: Starts detection.
choose: Allows the user to browse and select a file.
"""
def __init__(self, master=None):
Frame.__init__(self, master)
# w, h = 650, 650
w = root.winfo_screenwidth()
h = root.winfo_screenheight()
master.minsize(width=w, height=h)
master.maxsize(width=w, height=h)
self.pack()
# user defined hyperparameters frame
self.option_frame = Frame(master)
self.option_frame.pack()
self.model_options = ["Yolov5", "Yolov8"]
self.model_var = StringVar(self.option_frame)
# self.model_var_label = Label(self.option_frame)
# default value
self.model_var.set(self.model_options[0])
self.model_menu = OptionMenu(
self.option_frame, self.model_var, *self.model_options
)
self.model_menu.pack(side=LEFT)
self.model_var_button = Button(
self.option_frame, text="Show selected values",
command=self.show_value)
self.model_var_button.pack(side=RIGHT)
self.model_var_label = Label(self.option_frame)
self.model_var_label.pack(side=RIGHT)
self.data_options = ["Mars", "Moon"]
self.data_var = StringVar(self.option_frame)
# default value
self.data_var.set(self.data_options[0])
self.data_menu = OptionMenu(
self.option_frame, self.data_var, *self.data_options
)
self.data_menu.pack(side=LEFT)
# group test folder path to a frame
self.test_folder_frame = Frame(master)
self.test_folder_frame.pack()
self.test_folder_path = Button(
self.test_folder_frame, text="Test folder",
command=self.choose_folder)
self.test_folder_path.pack(side=LEFT)
# pack to result directory frame
self.res_dir_frame = Frame(master)
self.res_dir_frame.pack()
# user defined directory for result
self.e1_label = Label(
self.res_dir_frame, text="Result directory").pack(
side=LEFT)
self.result_dir = StringVar(master)
self.e1 = Entry(self.res_dir_frame, textvariable=self.result_dir)
self.e1.pack(side=RIGHT)
# detect frame
self.detect_frame = Frame(master)
self.detect_frame.pack()
self.detect_btn = Button(
self.detect_frame, text="Detect", command=self.detect)
self.detect_btn.pack(side=LEFT)
# compute physical location and crater size
self.analysis_frame = Frame(master)
self.analysis_frame.pack()
self.analysis_btn = Button(
self.analysis_frame,
text="Compute crater size and location",
command=self.analysis,
)
self.analysis_btn.pack()
# file frame
self.file_frame = Frame(master)
self.file_frame.pack()
self.label = Label(self.file_frame)
self.file = Button(self.file_frame, text="Browse", command=self.choose)
self.file.pack()
self.label.pack()
# exit btn
self.exit_frame = Frame(master)
self.exit_frame.pack()
self.exit = Button(self.exit_frame, text="Exit", command=root.destroy)
self.exit.pack()
def show_value(self):
"""
This function is used to show what kinds of values users
choose in the interface.
"""
self.model_var_label.config(
text="Selected model: "
+ self.model_var.get()
+ "\nSelected data: "
+ self.data_var.get()
)
def choose(self):
"""
This function is used to choose the specified image file.
"""
ifile = filedialog.askopenfile(
parent=self, mode="rb", title="Choose a file")
self.image = PhotoImage(file=ifile.name)
self.label.configure(image=self.image)
self.label.image = self.image
def choose_folder(self):
"""
This function is used to choose the specified folder.
"""
path = filedialog.askdirectory()
self.test_folder_var = path
Label(
self.test_folder_frame, text=self.test_folder_var).pack(side=RIGHT)
def detect(self):
"""
This function is used to generate results from user's inputs.
"""
# get model to detect
self.test_img_path = self.test_folder_var + "/images/"
self.test_labels_path = self.test_folder_var + "/labels/"
self.user_specified_directory = self.result_dir.get()
# yolov5
mars_model_path = "models/Mars_best.pt"
moon_model_left_path = "models/Moon_Left_Model.pt"
moon_model_right_path = "models/Moon_Right_Model.pt"
if self.data_var.get() == "Mars":
self.craterPredictor = CraterPredictor(
mars_model=mars_model_path,
moon_model1=moon_model_left_path,
moon_model2=moon_model_right_path,
results_path=self.user_specified_directory,
test_images_path=self.test_img_path,
)
# predict with mars
self.craterPredictor.predict_mars_craters(self.test_img_path)
else:
self.craterPredictor = CraterPredictor(
mars_model=mars_model_path,
moon_model1=moon_model_left_path,
moon_model2=moon_model_right_path,
results_path=self.user_specified_directory,
test_images_path=self.test_img_path,
)
# predict with moons
# crop_func()
self.craterPredictor.predict_moon_craters(self.test_img_path)
# craterPredictor.draw_boxes()
self.craterPredictor.draw_boxes(self.test_labels_path)
# craterPredictor.get_statistics()
self.craterPredictor.get_statistics(self.test_labels_path)
self.detect_btn_label = Label(
self.detect_frame, text="Detection complete"
).pack(side=BOTTOM)
stats_path = self.user_specified_directory + \
"/statistics/statistics.csv"
df = pd.read_csv(stats_path)
self.stats = Label(
self.detect_frame,
text=f"Statistics\ntp: {df['tp'][0]}"
+ f"\nfp: {df['fp'][0]}"
+ f"\nfn: {df['fn'][0]}",
).pack()
def analysis(self):
self.craterPredictor.idx_labels(
self.user_specified_directory + "detections/")
Label(text="Analysis computed").pack()
root = Tk()
root.title("Crater prediction")
app = GUI(master=root)
app.mainloop()
|
ApolloCraterDetectionTool
|
/ApolloCraterDetectionTool-1.0.tar.gz/ApolloCraterDetectionTool-1.0/apollo/UI.py
|
UI.py
|
# apolloInterface
python实现对apollo命名空间下item字段的crud
##### 1、命名空间下items的增删改查
通过浏览器抓包的方式,抓取发生的网络事件、请求方法、header、url、标头、载荷,经postman接口验证后,可以正常实现增删改查。
##### 2、命名空间下面的版本回滚
python实现apollo目标版本的回滚,因为apollo的每个版本都是基于不同维度发布的,所有要执行版本回滚,需要同事输入应用名称application、命名空间namespaces、集群名称clusters、环境名称env,以及最重要的要回滚的版本号release_version
##### 3、当前已经添加的接口
| 接口 | |
| -------------------- | ------ |
| key-value字段增减 | ✔ |
| key-value字段更新 | ✔ |
| key-value字段删除 | ✔ |
| 命名空间所有字段查询 | ✔ |
| 已发布版本查询 | ✔ |
| 版本回滚 | ✔ |
| 配置中心数据回滚 | ✔ |
| 不同版本数据对比 | **✔** |
| 版本发布 | ✔ |
| ......后续视情况追加 | ...... |
|
ApolloInterface
|
/ApolloInterface-0.0.2.tar.gz/ApolloInterface-0.0.2/README.md
|
README.md
|
import json
import datetime
import sys
import logging
import os
from .apolloInterface import ApolloUpdateItems
def getLog():
logger = logging.getLogger() # 创建一个logger
logger.setLevel(logging.DEBUG)
ch = logging.StreamHandler() # 创建一个handler,用于输出到控制台
ch.setLevel(logging.INFO)
# 定义handler的输出格式
formatter = logging.Formatter(fmt="%(asctime)s-[%(levelname)s] : %(message)s")
ch.setFormatter(formatter)
logger.addHandler(ch)
return logger
def backup_config(config_msg, version_cache_dir='/tmp/backUpApolloVersion', app="", namespace=''):
try:
if not os.path.exists(version_cache_dir):
os.makedirs(version_cache_dir)
if not isinstance(config_msg, str):
config_msg = json.dumps(config_msg, ensure_ascii=False)
now = datetime.datetime.now().strftime("%Y%m%d%H%M%S")
filename = "{}_{}_{}.json".format(app, namespace, now)
cache_file = os.path.join(version_cache_dir, filename)
with open(cache_file, 'w') as fw:
fw.write(config_msg)
fw.close()
logger.info("[备份]:版本数据备份完成,数据写入{}文件完成".format(cache_file))
except Exception as e:
logger.error("[备份]:版本文件备份失败,出现异常:{}".format(e))
ExitCode = 0
OLD_VERSIONS = {}
logger = getLog()
class ApolloUpdate(ApolloUpdateItems):
def __init__(self, addr, name, password):
"""
:param addr:apollo地址信息
:param name: 用户名
:param password: 用户密码
:return
"""
super().__init__(addr, name, password)
def run_action(self, action, key="", value="", **kwargs):
"""
apollo 配置执行操作
:param action: 执行操作CRUD
:param key: 执行操作的字段app.namespaces.key值
:param value: 执行操作的字段app.namespaces.value值,action为delete时可不提供
:return:
"""
first_release_dict = self.get_releases_msg(0, 2)[1] # 查询最新的版本基线信息
first_release_name = first_release_dict.get("name", "")
first_release_id = first_release_dict.get("id", "")
logger.info("修改前的版本基线信息,name:{},ID:{}".format(first_release_name, first_release_id))
if action == 'create': # 创建配置字段
run_flag, run_result = self.create_msg(key, value)
elif action == 'update': # 修改配置字段值
run_flag, run_result = self.update_msg(key, value)
elif action == 'search': # 查询app.namespaces.items下的值
run_flag, run_result = self.search_msg()
elif action == 'delete': # 单个字段的删除
run_flag, run_result = self.delete_msg(key)
else:
run_flag, run_result = False, "动作参数输入错误"
logger.info("{}执行{},运行Result:{},Message:{}".format("=" * 10, action, run_flag, run_result))
if run_flag:
# 数据变更后的版本发布
# 执行一次操作,需要发布一次,因为批量传入的数据不能保证是同一个app.namespaces下,所以一次crud发布一次版本
release_flag, release_title = self.release_msg()
logger.info("{}执行发布,运行Result:{},基线版本号:{}".format("=" * 10, release_flag, release_title))
else:
logger.warning("=" * 10 + "当前操作不支持执行发布")
last_release_dict = self.get_releases_msg(0, 2)[1] # 查询最新的版本基线信息
last_release_name = last_release_dict.get("name", "")
last_release_id = last_release_dict.get("id", "")
logger.info("修改后的版本基线信息,name:{},ID:{}".format(last_release_name, last_release_id))
change_msg = self.compare_release_msg(first_release_id, last_release_id) # 查询打印变更数据
changes_list = change_msg.get("changes", [])
changes_str_list = list(map(str, changes_list))
logger.info("{}当前操作变更字段{}个,变更数据为{}".format("=" * 10, len(changes_str_list), ''.join(changes_str_list)))
vertify_result = False
if changes_list: # 变更数据验证
key_cg = changes_list[0].get("entity", {}).get("secondEntity", {}).get("key", '') # 变更的字段名
value_cg = changes_list[0].get("entity", {}).get("secondEntity", {}).get("value", '') # 变更的字段值
if action in ["create", "update"] and key == key_cg and value == value_cg:
vertify_result = True
elif action == 'delete' and key_cg == key and value_cg == '':
vertify_result = True
else:
vertify_result = False
elif action == "create" and run_flag and not first_release_id:
vertify_result = True
logger.warning("无法查询到上一个发布版本信息,当前新增数据{}".format(last_release_dict.get('configurations', {})))
logger.info("数据验证结果:{} \n".format(vertify_result))
return vertify_result, last_release_name
def main_CRUD2(self, items_data, version_cache_dir="/tmp/backUpApolloVersion"):
"""
数据增删改查
:step1 解析入参items_data
:step2 将同样的命名空间的数据放到同一个字典之中dic_data={app.namespace: {items: [], # 所有要修改的数据
firstversion: {}, # 修改前当前空间下的最新版本数据
lastVersion: {},# 修改后的当前空间下的最新版本数据
flag:""# 变更状况,如果有数据变更失败,不执行发布
}...}
:step3 修改字典里面的初始版本数据,注意无初始版本的状况
:step4 解析字典,对item里面数据实现变更,记录执行结果数据flag
:step5 解析遍历dic_data,实现版本发布,对flag为False的app.namespace下的数据不执行发布;然后获取app.namespace最新的版本
数据,进行数据验证,记录数据验证状况
:step6 整体执行状况进行判断,如果有版本未发布、或者数据校验错误,整体执行失败,上报告警
:param items_data:
:return:
"""
result_json = {}
result_fflag = True
global OLD_VERSIONS, ExitCode
if not items_data:
logger.error("=============================变更数据异常,检测传入数据列表===================================")
ExitCode = -1
sys.exit(-1)
all_data = {}
for items in items_data: # 解析数据,按命名空间归类数据,查询各个空间的最新版本数据
apps = items.get("apps")
namespace = items.get("namespaces")
env = items.get("env")
cluster = items.get("clusters")
tempKey = "{}.{}".format(apps, namespace)
if all_data.get(tempKey, {}):
all_data[tempKey]["items"].append(items)
else:
all_data[tempKey] = {
"items": [items], # 所有要修改的数据
"apps": apps,
"namespaces": namespace,
"clusters": cluster,
"env": env,
"first_release_id": '', # 修改前当前空间下的最新版本数据
"flag": "", # 变更状况,如果有数据变更失败,不执行发布
}
for dt_json in all_data.values():
apps = dt_json.get("apps")
namespace = dt_json.get("namespaces")
env = dt_json.get("env")
cluster = dt_json.get("clusters")
items = dt_json.get("items")
tempKey = "{}.{}".format(apps, namespace)
logger.info("{fmt}{msg}{fmt}".format(fmt="=" * 25,
msg="app.namespace【{}.{}】配置执行变更".format(apps, namespace)))
self.set_apollo_cfg(app=apps, namespaces=namespace, clusters=cluster, env=env)
first_release_dict = self.get_releases_msg(0, 2)[1] # 查询最新的版本基线信息
first_release_name = first_release_dict.get("name", "")
first_release_id = first_release_dict.get("id", "")
logger.info("[版本查询]:命名空间【{}.{}】配置变更前版本查询,最新版本为:{}".format(apps, namespace, first_release_name))
OLD_VERSIONS["{app}.{namespaces}".format(app=apps, namespaces=namespace)] = first_release_name
backup_config(first_release_dict, version_cache_dir=version_cache_dir,
app=apps, namespace=namespace) # 最新配置备份
flag_crud = True
kv_input = {}
for item in list(items):
key = item.get("key")
value = item.get("value") #
action = item.get("action")
kv_input[key] = value
if action == 'create': # 创建配置字段
run_flag, run_result = self.create_msg(key, value)
elif action == 'update': # 修改配置字段值
run_flag, run_result = self.update_msg(key, value)
elif action == 'delete': # 单个字段的删除
run_flag, run_result = self.delete_msg(key)
else:
run_flag, run_result = False, "动作参数输入错误"
if not run_flag:
flag_crud = False # 如果有一条数据执行出错,改空间下整体数据标记异常
logger.error("[配置修改]:执行{},运行Result:{},Message:{}".format(action, run_flag, run_result))
else:
logger.info("[配置修改]:执行{},运行Result:{},Message:{}".format(action, run_flag, run_result))
dt_json["flag"] = flag_crud # todo 此处开始往下,代码可以注释
dt_json["kv_input"] = kv_input
dt_json["first_release_id"] = first_release_id
all_data[tempKey] = dt_json
logger.info("{fmt}".format(fmt="-" * 80))
for dt_json in all_data.values():
apps = dt_json.get("apps")
namespace = dt_json.get("namespaces")
env = dt_json.get("env")
cluster = dt_json.get("clusters")
items = dt_json.get("items")
flag_crud = dt_json["flag"]
kv_input = dt_json["kv_input"]
first_release_id = dt_json["first_release_id"]
tempKey = "{}.{}".format(apps, namespace) # todo 此处结束,此区间内的代码注释后不影响功能运行,影响日志显示样式
logger.info("{fmt}{msg}{fmt}".format(fmt="=" * 20,
msg="app.namespace【{}.{}】版本发布及数据校验".format(apps, namespace)))
self.set_apollo_cfg(app=apps, namespaces=namespace, clusters=cluster, env=env)
if flag_crud:
# 数据变更后的版本发布
# 执行一次操作,需要发布一次,因为批量传入的数据不能保证是同一个app.namespaces下,所以一次crud发布一次版本
release_flag, release_title = self.release_msg()
logger.info("[版本发布]:执行发布,运行Result:{},基线版本号:{}".format(release_flag, release_title))
else:
logger.error("[版本发布]:当前空间下数据变更异常,不支持执行发布")
result_fflag = False
result_json[tempKey] = "当前未发布新版本"
continue
last_release_dict = self.get_releases_msg(0, 2)[1] # 查询最新的版本基线信息
last_release_name = last_release_dict.get("name", "")
last_release_id = last_release_dict.get("id", "")
last_release_cfg = last_release_dict.get('configurations', {})
last_release_cfg = last_release_cfg if isinstance(last_release_cfg, dict) else eval(last_release_cfg)
logger.info("[版本查询]:命名空间【{}.{}】配置变更后版本查询,最新版本为:{}".format(apps, namespace, last_release_name))
change_msg = self.compare_release_msg(first_release_id, last_release_id) # 查询打印变更数据
change_data = {}
changes_list = change_msg.get("changes", [])
if first_release_id and changes_list:
for change_dt in changes_list:
firstEntity = change_dt.get("entity", {}).get('firstEntity', {})
secondEntity = change_dt.get("entity", {}).get('secondEntity', {})
k = firstEntity.get("key") if firstEntity.get("key", '') else secondEntity.get("key")
v = firstEntity.get("value") if firstEntity.get("value", '') else secondEntity.get("value")
change_data[k] = v
elif first_release_id and not changes_list:
pass
else:
change_data = kv_input
different1 = set(kv_input.keys()).difference(set(change_data.keys())) # 差集 输入有此字段,比对结果无改字段’
different2 = list(
set(kv_input.keys()).symmetric_difference(set(change_data.keys()))) # 有对称差集(项在a或b中,但不会同时出现在二者中)
if different1 and different1 < set(last_release_cfg.keys()):
# 此处校验针对update,update时,a1原始值为1,要修改为1,此时输入数据集与修改后比对数据结果集会出现数据偏差
logger.info("[数据验证]:输入数据与变更后的数据一致")
result_json[tempKey] = last_release_name
elif different2 and not (different1 and different1 < set(last_release_cfg.keys())):
# 比对输入参数与变更数据是否一致
logger.error("[数据验证]:命名空间下入参数据与变更的数据内容存在差异")
logger.info("[数据验证]:入参数据{}".format(kv_input))
logger.info("[数据验证]:变更后的数据{}".format(change_data))
result_fflag = False
result_json[tempKey] = last_release_name
# elif len(items) != len(change_data):
# # 此处验证输入数据个数和被修改的数据个数是否一致
# result_json[tempKey] = ''
# logger.error("[数据验证]:命名空间下入参数据与变更的数据数量存在差异")
# logger.info("[数据验证]:入参数据{}".format(kv_input))
# logger.info("[数据验证]:变更后的数据{}".format(change_data))
# result_fflag = False
# result_json[tempKey] = last_release_name
else:
logger.info("[数据验证]:输入数据与变更后的数据一致")
result_json[tempKey] = last_release_name
return result_fflag, result_json
# todo 记录执行状况
def main_CRUD(self, items_data):
"""
数据增删改查
:param items_data:
:return:
"""
global ExitCode
result_vertify = True
release_version_list = []
if not items_data:
logger.error("=============================变更数据异常,检测传入数据列表===================================")
ExitCode = -1
sys.exit(-1)
for items in items_data:
action = items.get("action")
item_key = items.get("key")
item_value = items.get("value")
apps = items.get("apps")
namespace = items.get("namespaces")
env = items.get("env")
cluster = items.get("clusters")
self.set_apollo_cfg(app=apps, namespaces=namespace, clusters=cluster, env=env)
run_reault, release_title = self.run_action(action, key=item_key, value=item_value)
release_version_list.append(release_title)
if not run_reault:
result_vertify = False
if not result_vertify:
logger.error("执行结果中存在执行失败或执行前后无数据改变的状况存在,请排查日志")
ExitCode = -1
sys.exit(-1)
else:
logger.info("所有数据已变更完成,配置修改成功")
return release_version_list[0]
def update_data_2_roll_release(self, release_title, last_release_data):
"""
使用增删改查修改版本数据,然后使用目标版本发布,实现回滚的目的
:param release_title: 要回滚的目标版本号
:param last_release_data: 当前版本的配置数据信息
:return:
"""
global ExitCode
result_flag = True
release_flag, release_dicts = self.get_releases_msg(get_all=True) # 获取所有发布版本信息
goal_dict = {}
for release_dict in release_dicts:
release_name = release_dict.get("name", "")
if release_name == release_title:
goal_dict = release_dict
try:
self.rollback_release(release_dict)
except Exception as e:
logger.error(e)
if not goal_dict:
logger.error("回滚版本id数据获取失败,取消数据回滚")
ExitCode = -1
sys.exit(-1)
else:
logger.info("[Data]:需要回滚到的版本基线信息,name:{} ,Data:{}\n\t".format(release_title, goal_dict))
# fun 1 :对比release_dict和last_release_data数据,执行增删改查 发布使用要回滚的版本号
logger.info("{fs}版本号回滚完成,开始修改配置{fs}".format(fs="=" * 20))
curren_cfg = last_release_data.get('configurations', {})
goal_cfg = goal_dict.get('configurations', {})
curren_cfg = curren_cfg if isinstance(curren_cfg, dict) else eval(curren_cfg) # 无语,奇葩,怎么前面json转化过了,还要再转化
goal_cfg = goal_cfg if isinstance(goal_cfg, dict) else eval(goal_cfg)
curren_cfg_keys = list(curren_cfg.keys()) # 当前版本字段
goal_cfg_keys = list(goal_cfg.keys()) # 目标版本字段
others = list(set(goal_cfg_keys).difference(set(curren_cfg_keys))) # 目标版本存在、当前版本不存在的字段
logger.info("[Data]:当前版本的字段信息:{}".format(curren_cfg_keys))
logger.info("[Data]:需要回滚版本的字段信息:{}".format(goal_cfg_keys))
for key in curren_cfg_keys + others:
if key in goal_cfg_keys and key in curren_cfg_keys: # 两个版本都存在的字段
value = goal_cfg.get(key, "")
run_flag, run_result = self.update_msg(key, value) # 修改字段
logger.info("修改字段{}的为{}".format(key, value))
elif key in goal_cfg_keys and key not in curren_cfg_keys: # 当前版本不存在、目标版本存在的字段
value = goal_cfg.get(key, "")
run_flag, run_result = self.create_msg(key, value) # 创建配置字段
logger.info("新增字段{}的为{}".format(key, value))
else: # 当前版本存在、目标版本不存在的字段
run_flag, run_result = self.delete_msg(key)
logger.info("删除字段{}".format(key))
if not run_flag:
result_flag = False
release_flag, release_title = self.release_msg(release_title)
logger.info("[版本发布]:执行发布,运行Result:{},基线版本号:{}".format(release_flag, release_title))
if not release_flag:
result_flag = False
return result_flag, release_title
def main_rollback(self, release_version, apps, namespace, env="PRO", cluster="default"):
"""
数据回滚
:param release_version: 版本号
:param apps: app名称
:param namespace: 命名空间
:param env: 环境名称
:param cluster: 集群名称
:return:
"""
global OLD_VERSIONS, ExitCode
self.set_apollo_cfg(app=apps, namespaces=namespace, clusters=cluster, env=env) # 设置操作维度空间
if (not release_version) or (not apps) or (not namespace):
logger.error("=============================回滚异常,检测传入数据列表===================================")
logger.error("apps:{},namespaces:{},release_version:{}".format(apps, namespace, release_version))
ExitCode = -1
sys.exit(-1)
current_release_dict = self.get_releases_msg(0, 2)[1] # 查询最新的版本基线信息
first_release_name = current_release_dict.get("name", "")
first_release_id = current_release_dict.get("id", "")
logger.info("[版本查询]:命名空间【{}.{}】配置变更前版本查询,最新版本为:{}".format(apps, namespace, first_release_name))
OLD_VERSIONS["{app}.{namespaces}".format(app=apps, namespaces=namespace)] = first_release_name
if first_release_name == release_version:
logger.error("需要回滚的版本与当前正在使用的版本号相同,取消版本回滚")
ExitCode = -1
sys.exit(-1)
logger.info("{fmt}{msg}{fmt}".format(fmt="=" * 30, msg="命名空间【{}.{}】执行版本回滚".format(apps, namespace)))
run_flag, run_result = self.update_data_2_roll_release(release_version, current_release_dict) # 执行版本回滚
logger.info("[Result]:执行rollback结束,运行Result:{},Message:{}".format(run_flag, run_result))
last_release_dict = self.get_releases_msg(0, 2)[1] # 查询最新的版本基线信息
last_release_name = last_release_dict.get("name", "")
last_release_id = last_release_dict.get("id", "")
logger.info("[版本查询]:命名空间【{}.{}】配置变更后版本查询,最新版本为:{}".format(apps, namespace, first_release_name))
change_msg = self.compare_release_msg(first_release_id, last_release_id) # 查询打印变更数据
changes_list = change_msg.get("changes", [])
changes_str_list = list(map(str, changes_list))
logger.info(
"[变更数据]:{}当前操作变更字段{}个,变更数据为{} \n".format("=" * 10, len(changes_str_list), ''.join(changes_str_list)))
if last_release_name != release_version:
logger.error("[执行结果]:回滚执行异常,请排查日志")
ExitCode = -1
sys.exit(-1)
else:
logger.info("[执行结果]:所有数据已回滚完成,执行成功")
return last_release_name
|
ApolloInterface
|
/ApolloInterface-0.0.2.tar.gz/ApolloInterface-0.0.2/ApolloItemsCRUD/apolloService.py
|
apolloService.py
|
from ApolloItemsCRUD.apolloService import ApolloUpdate
def test_CRUD():
items_data = [
{"action": "create", "apps": "automation", "namespaces": "common", "key": "test.testt.nm1", "value": "123", "clusters": "default","env": "PRO"},
{"action": "create", "apps": "automation", "namespaces": "common", "key": "test.testt.nm2", "value": "123", "clusters": "default","env": "PRO"},
{"action": "create", "apps": "automation", "namespaces": "common", "key": "test.testt.nm3", "value": "123", "clusters": "default","env": "PRO"},
{"action": "create", "apps": "cmdb", "namespaces": "Test", "key": "nm1", "value": "123", "clusters": "default","env": "PRO"},
{"action": "create", "apps": "cmdb", "namespaces": "Test", "key": "nm2", "value": "123", "clusters": "default","env": "PRO"},
{"action": "create", "apps": "cmdb", "namespaces": "Test", "key": "nm3", "value": "123", "clusters": "default","env": "PRO"},
{"action": "create", "apps": "platform", "namespaces": "T1", "key": "nm1", "value": "123", "clusters": "default","env": "PRO"},
{"action": "create", "apps": "platform", "namespaces": "T1", "key": "nm2", "value": "123", "clusters": "default","env": "PRO"},
{"action": "create", "apps": "platform", "namespaces": "T1", "key": "nm3", "value": "123", "clusters": "default","env": "PRO"},
{"action": "create", "apps": "platform", "namespaces": "T2", "key": "nm1", "value": "123", "clusters": "default","env": "PRO"},
{"action": "create", "apps": "platform", "namespaces": "T2", "key": "a2", "value": "123", "clusters": "default","env": "PRO"},
{"action": "create", "apps": "platform", "namespaces": "T2", "key": "w3", "value": "123", "clusters": "default","env": "PRO"},
]
# items_data = json.loads(items_data, encoding='utf-8')
apollo = ApolloUpdate(addr, name, password)
version = apollo.main_CRUD2(items_data)
print(version)
def test_rollback():
apps = 'platform'
namespace = 'T1'
release_version = "20220902141720_release_by_automation"
env = "PRO"
cluster = "default"
apollo = ApolloUpdate(addr, name, password)
version = apollo.main_rollback(release_version, apps, namespace, env, cluster)
addr = '10.1.1.1:7547'
name = 'apollo'
password = 'Apollo@086)*^'
version_cache_dir = r'\tmp\backUpApolloVersion'
if __name__ == '__main__':
# items_data = json.loads(items_data, encoding='utf-8')
# apollo = ApolloUpdate(addr, name, password)
# flag,version = apollo.main_CRUD2(items_data,version_cache_dir)
# if not flag:
# ExitCode = -1
# apollo = ApolloUpdate(addr, name, password)
# version = apollo.main_rollback(release_version, apps, namespace, env, cluster)
test_CRUD()
# test_rollback()
|
ApolloInterface
|
/ApolloInterface-0.0.2.tar.gz/ApolloInterface-0.0.2/ApolloItemsCRUD/demo.py
|
demo.py
|
import requests
import json
import datetime
import sys
class ApolloUpdateItems():
def __init__(self, addr, name, password):
"""
:param addr:apollo地址信息
:param name: 用户名
:param password: 用户密码
:return
"""
self.addr = addr
self.name = name
self.password = password
self.header = {
'content-type': "application/json",
"Accept-Encoding": "gzip, deflate, br",
"Connection": "keep-alive",
}
self.session = None
self.login()
def set_apollo_cfg(self, app='', namespaces='', clusters='default', env="PRO"):
"""
Apollo支持4个维度管理Key-Value格式的配置,环境设置,变更操作维度
:param app: app应用名称
:param namespaces:命名空间
:param clusters:集群名称
:param env:环境信息
:return:
"""
self.app = app
self.namespaces = namespaces
self.clusters = clusters
self.env = env
def login(self):
try:
url_login = 'http://{}/signin?'.format(self.addr)
self.session = requests.Session()
self.session.get(url_login, auth=(self.name, self.password), verify=False) # 登录
print("apollo登录成功")
except Exception as e:
print("登录失败,请排查登录信息是否输入正确")
sys.exit()
def create_msg(self, key, value):
"""
app.namespaces下创建key-value字段值
:param key: 字段名称
:param value: 字段值
:return: 执行结果
"""
url_create = "http://{addr}/apps/{apps}/envs/{env}/clusters/{clusters}/namespaces/{namespaces}/item".format(
addr=self.addr,
env=self.env,
apps=self.app,
clusters=self.clusters,
namespaces=self.namespaces
)
body = {
"tableViewOperType": "create",
"key": key,
"value": value,
"addItemBtnDisabled": True
}
response = self.session.post(url_create, data=json.dumps(body), headers=self.header)
code, text = response.status_code, response.text
result_flag = True if code == 200 else False
if result_flag:
result_msg = '{}.{}.{}字段创建成功,执行结果code:{}'.format(self.app, self.namespaces, key, code)
else:
print("数据创建请求执行错误,text:{}".format(text))
result_msg = '{}.{}.{}字段创建失败,执行结果code:{}'.format(self.app, self.namespaces, key, code)
return result_flag, result_msg
def update_msg(self, key, value):
"""
app.namespaces下查找key字段信息,更新字段字典的value值
:param key: 查找和更新的item key
:param value: 更新的value值
:return: 执行结果信息
"""
result_flag, result_update = False, ''
url_update = "http://{addr}/apps/{apps}/envs/{env}/clusters/{clusters}/namespaces/{namespaces}/item".format(
addr=self.addr,
env=self.env,
apps=self.app,
clusters=self.clusters,
namespaces=self.namespaces)
flag_search, json_data = self.search_msg() # 查询当前app.namespaces下的字段信息
if flag_search and json_data:
for json_d in json_data:
jd_key = json_d.get("key", "")
if jd_key != key:
continue
json_d["tableViewOperType"] = "update"
json_d['value'] = value # 匹配到字段,更新数据
response = self.session.put(url_update, data=json.dumps(json_d), headers=self.header)
code, text = response.status_code, response.text
result_flag = True if code == 200 else False
if not result_flag:
print("数据更新请求执行错误,text:{}".format(text))
result_update = '匹配到字段{}.{}.{}信息,更新结束,执行结果code:{}'.format(self.app,
self.namespaces,
key, code)
break
else:
result_update = '查询结果无法匹配到字段{}.{}.{}信息,请人工排查数据准确性,更新结束'.format(self.app,
self.namespaces, key)
else:
result_update = '数据更新失败,未查询到namespaces下字段信息或{}.{}信息查询失败或,请人工排查确认'.format(self.app,
self.namespaces)
return result_flag, result_update
def delete_msg(self, key, value=''):
"""
app.namespaces下删除字段信息
:param key: 需要删除的key
:param value: 需要删除的value,此处未使用
:return: 执行结果信息
"""
result_flag, result_delete = False, ''
url_del = "http://{addr}/apps/{apps}/envs/{env}/clusters/{clusters}/namespaces/{namespaces}/items/{i_d}"
flag_search, json_data = self.search_msg() # 查询当前app.namespaces下的字段信息
if flag_search and json_data:
for json_d in json_data:
jd_key = json_d.get("key", "")
key_id = json_d.get("id")
if jd_key != key:
continue
url_del = url_del.format(addr=self.addr,
apps=self.app,
env=self.env,
clusters=self.clusters,
namespaces=self.namespaces,
i_d=key_id)
response = self.session.delete(url_del, headers=self.header)
code, text = response.status_code, response.text
result_flag = True if code == 200 else False
if not result_flag:
print("数据删除请求执行错误,text:{}".format(text))
result_delete = '匹配到字段{}.{}.{}信息,删除结束,执行结果code:{}'.format(self.app,
self.namespaces,
key, code)
break
else:
result_delete = '查询结果无法匹配到字段{}.{}.{}信息,请人工排查数据准确性,删除结束'.format(self.app,
self.namespaces, key)
else:
result_delete = '数据删除失败,未查询到namespaces下字段信息或{}.{}信息查询失败或,请人工排查确认'.format(self.app, self.namespaces)
return result_flag, result_delete
def search_msg(self, ifText=False, ifEval=False):
"""
查询app.namespaces字段信息
:return:
"""
url_search = "http://{addr}/apps/{apps}/envs/{env}/clusters/{clusters}/namespaces/{namespaces}/items".format(
addr=self.addr,
apps=self.app,
env=self.env,
clusters=self.clusters,
namespaces=self.namespaces
)
response = self.session.get(url_search, headers=self.header)
json_data = response.json()
text = response.text
flag = True if response.status_code == 200 else False
if ifEval:
return flag, eval(text)
elif ifText:
return flag, text
else:
return flag, json_data
def release_msg(self, releaseTitle=""):
"""
版本发布
:return:
"""
url_release = "http://{addr}/apps/{apps}/envs/{env}/clusters/{clusters}/namespaces/{namespaces}/releases".format(
addr=self.addr,
apps=self.app,
env=self.env,
clusters=self.clusters,
namespaces=self.namespaces)
if not releaseTitle:
releaseTitle = datetime.datetime.now().strftime("%Y%m%d%H%M%S_release_by_automation")
release_data = {
"releaseTitle": releaseTitle,
"releaseComment": "",
"isEmergencyPublish": False
}
response = self.session.post(url_release, data=json.dumps(release_data), headers=self.header)
flag = True if response.status_code == 200 else False
return flag, releaseTitle
def get_releases_msg(self, page=0, size=10000, release_title='', get_all=False):
"""
查询发布版本信息
:param page: 查询分页页码
:param size: 查询分页页面数据量
:param release_title: 发布版本名称,有值执行匹配,物质返回最新的发布版本id数据信息
:return:
"""
url = ("http://{addr}/apps/{apps}/envs/{env}/clusters/{clusters}/namespaces/{namespaces}/releases/active?"
"page={page}&size={size}".format(addr=self.addr,
apps=self.app,
env=self.env,
clusters=self.clusters,
namespaces=self.namespaces,
page=page,
size=size))
response = self.session.get(url, headers=self.header)
json_data = response.json()
result_flag, result_dict = False, {}
if response.status_code != 200: # 数据查询请求执行失败
print("数据删除请求执行错误,code:{}".format(response.status_code), response.text)
else:
if release_title: # 有发布版本名称传入,返回匹配到的数据
for json_d in json_data:
name = json_d.get("name", '')
if name == release_title:
result_flag, result_dict = True, json_d
break
elif get_all:
result_flag, result_dict = True, json_data
else:
result_flag, result_dict = True, json_data[0] if json_data else {}
return result_flag, result_dict
def compare_release_msg(self, first_release_id, last_release_id):
"""
比较发布前后数据变更详情
:param first_release_id: 上一个发布版本的id信息
:param last_release_id: 后一个发布版本的id信息
:return:
"""
url = ('http://{addr}/envs/{env}/releases/compare?baseReleaseId={first_release_id}&'
'toCompareReleaseId={last_release_id}'.format(addr=self.addr,
apps=self.app,
env=self.env,
first_release_id=first_release_id,
last_release_id=last_release_id))
response = self.session.get(url, headers=self.header)
json_data = response.json()
code, text = response.status_code, response.text
result_flag = True if code == 200 else False
if not result_flag:
print("数据比对请求执行错误.{}".format(text))
return json_data
def rollback_release(self, release_dict):
"""
接口实现发布的版本回滚
:param kwargs:
:return:
"""
# release_flag, release_dict = self.get_releases_msg(release_title=release_title)
release_id = release_dict.get("id", "")
release_title = release_dict.get("name", "")
if not release_id:
print("回滚版本id数据获取失败,取消数据回滚")
sys.exit(-1)
url = "http://{addr}/envs/{env}/releases/{release_id}/rollback".format(addr=self.addr, env=self.env,
release_id=release_id)
print("开始回滚版本name:{},id:{},url:{}".format(release_title, release_id, url))
response = self.session.put(url, headers=self.header)
if response.status_code != 200: # 数据查询请求执行失败
print("数据删除请求执行错误,code:{},text:{}".format(response.status_code, response.text))
sys.exit(-1)
else:
print("回滚版本请求发送执行成功,code:{},text:{}".format(response.status_code, response.text))
return True, "版本回滚执行成功"
|
ApolloInterface
|
/ApolloInterface-0.0.2.tar.gz/ApolloInterface-0.0.2/ApolloItemsCRUD/apolloInterface.py
|
apolloInterface.py
|
import requests
import pandas as pd
import concurrent.futures
from tqdm import tqdm
import math
class APIParser:
def __init__(self, api_key, league_id):
self.league_id = league_id
self.api_key = api_key
self.session = requests.Session()
self.headers = {
"X-RapidAPI-Key": self.api_key,
"X-RapidAPI-Host": "api-football-v1.p.rapidapi.com"
}
self.season_list = [str(i) for i in self.get_all_seasons()['year'].to_list()]
self.figure_list = self.get_all_fixtures_info().fixture_id.to_list()
self.country_list = self.get_all_countries().name.unique().tolist()
def replace_period(self,df):
df.columns = df.columns.str.replace('.', '_',regex=False)
return(df)
def get_response_df(self, url, querystring=None, iter_list=None):
""" This is a general method to get single response"""
if querystring is None and iter_list is None:
response = requests.get(url, headers=self.headers)
else:
response = requests.get(url, headers=self.headers, params=querystring)
response_dic = response.json()['response']
df_response = pd.json_normalize(response_dic)
return df_response
def json_decomposer(self, df, left_cols, right_cols):
frames = []
data_range = range(len(df))
# Conditionally display progress bar
if right_cols not in ['transfers','career']:
data_range = tqdm(data_range)
for i in data_range:
left = df.iloc[[i]][left_cols].reset_index(drop='index')
# Check if it's not an empty list and it's a list of dictionaries
if df[right_cols][i] and (isinstance(df[right_cols][i], list) and all(isinstance(x, dict) for x in df[right_cols][i])):
right = pd.json_normalize(df[right_cols][i])
for _ in range(len(right) - 1):
left = pd.concat([left, left.loc[[0]]], ignore_index=True)
df_combined = pd.concat([left, right], axis=1)
frames.append(df_combined)
if frames:
df_final = pd.concat(frames).reset_index(drop=True)
df_final.columns = df_final.columns.str.replace('.', '_', regex=False)
return df_final
else:
print("No data to decompose")
return None
def get_all_countries(self):
url = "https://api-football-v1.p.rapidapi.com/v3/countries"
response = requests.get(url, headers=self.headers)
response_dic = response.json()['response']
get_all_countries = pd.json_normalize(response_dic)
return get_all_countries
def get_all_venues(self):
url = "https://api-football-v1.p.rapidapi.com/v3/venues"
frames = []
for i in self.country_list:
querystring = {"country": i}
df_temp = self.get_response_df(url=url, querystring=querystring)
frames.append(df_temp)
df_all_venues = pd.concat(frames).reset_index(drop=True)
return df_all_venues
def get_all_seasons(self):
"""get all seasons for a league"""
url = "https://api-football-v1.p.rapidapi.com/v3/leagues"
querystring = {"id": self.league_id}
df_seasons = self.get_response_df(url=url, querystring = querystring)
frames = []
for _ , row in df_seasons.iterrows():
# Access the list of dictionaries in 'seasons' column
list_of_season_dicts = row['seasons']
# Convert the list of dictionaries to a DataFrame
df_season = pd.json_normalize(list_of_season_dicts)
# Copy other columns from the row to the new DataFrame
for col in df_seasons.columns:
if col != 'seasons':
df_season[col] = row[col]
# Append the new DataFrame to the list
frames.append(df_season)
df_all_seasons = pd.concat(frames).reset_index(drop=True)
df_all_seasons = self.replace_period(df_all_seasons)
return df_all_seasons
def get_all_teams(self):
frames = []
url = "https://api-football-v1.p.rapidapi.com/v3/teams"
total_seasons = len(self.season_list)
# Create a progress bar
progress_bar = tqdm(total=total_seasons, desc="Fetching teams", unit="season")
for i in self.season_list:
querystring = {"league": self.league_id, "season": i}
df_temp = self.get_response_df(url=url, querystring=querystring)
df_temp.insert(0, 'season', int(i))
df_temp.insert(0, 'league_id', self.league_id)
frames.append(df_temp)
# Update the progress bar
progress_bar.update(1)
# Close the progress bar
progress_bar.close()
df_teams_info = pd.concat(frames).reset_index(drop=True)
df_teams_info = self.replace_period(df_teams_info)
return df_teams_info
def get_all_fixtures_info(self):
frames = []
url = "https://api-football-v1.p.rapidapi.com/v3/fixtures"
for i in self.season_list:
querystring = {"league": self.league_id, "season": i}
df_temp = self.get_response_df(url=url, querystring=querystring)
df_temp.insert(0, 'season', int(i))
df_temp.insert(0, 'league_id', self.league_id)
frames.append(df_temp)
df_fixtures_info = pd.concat(frames).reset_index(drop=True)
df_fixtures_info = self.replace_period(df_fixtures_info)
return df_fixtures_info
def get_df_stats_raw(self):
breakdown_list = ['-'.join(map(str, self.figure_list[i:i+20]))
for i in range(0, len(self.figure_list), 20)]
df_stats = pd.DataFrame()
def process_fixtures(fixtures):
url = "https://api-football-v1.p.rapidapi.com/v3/fixtures"
querystring = {"ids": fixtures}
response = self.session.get(
url, headers=self.headers, params=querystring)
response_dic = response.json()['response']
df_temp = pd.json_normalize(response_dic)
df_temp.columns = df_temp.columns.str.replace(
'.', '_', regex=False)
return df_temp[['fixture_id', 'fixture_date', 'events', 'lineups', 'statistics', 'players']]
with concurrent.futures.ThreadPoolExecutor(max_workers=5) as executor:
futures = [executor.submit(process_fixtures, fixtures)
for fixtures in breakdown_list]
# Create a progress bar
progress_bar = tqdm(total=len(futures), desc="Processing raw fixtures")
for future in concurrent.futures.as_completed(futures):
df_temp = future.result()
df_stats = pd.concat([df_stats, df_temp], ignore_index=True)
# Update the progress bar
progress_bar.update(1)
progress_bar.close()
return df_stats
def get_all_lineups_general_stats(self):
df_stats = self.get_df_stats_raw()
all_lineups_raw_stats = self.json_decomposer(
df_stats, ['fixture_id', 'fixture_date'], 'lineups')
all_lineups_general_stats = all_lineups_raw_stats[[
i for i in all_lineups_raw_stats.columns if i not in ['startXI', 'substitutes']]]
return all_lineups_general_stats
def all_lineups_start_stats(self):
lineups_frames = []
df_stats = self.get_df_stats_raw()
df_lineups = self.json_decomposer(df_stats, ['fixture_id','fixture_date'], 'lineups')
lineups_frames.append(df_lineups)
all_lineups_raw_stats = pd.concat(lineups_frames).reset_index(drop=True)
l = ['fixture_id', 'fixture_date', 'formation', 'team_id', 'team_name']
r = 'startXI'
all_lineups_start_stats = self.json_decomposer(all_lineups_raw_stats, l, r)
return all_lineups_start_stats
def all_lineups_substitutes_stats(self):
lineups_frames = []
df_stats = self.get_df_stats_raw()
df_lineups = self.json_decomposer(df_stats, ['fixture_id','fixture_date'], 'lineups')
all_lineups_general_stats = self.get_all_lineups_general_stats()
lineups_frames.append(df_lineups)
all_lineups_raw_stats = pd.concat(lineups_frames).reset_index(drop=True)
l = ['fixture_id', 'fixture_date', 'formation', 'team_id', 'team_name']
r = 'substitutes'
all_lineups_substitutes_stats = self.json_decomposer(all_lineups_raw_stats, l, r)
return all_lineups_substitutes_stats
def get_all_injuries(self):
frames = []
url = "https://api-football-v1.p.rapidapi.com/v3/injuries"
total_seasons = len(self.season_list)
# Create a progress bar
progress_bar = tqdm(total=total_seasons, desc="Fetching injuries", unit="season")
for i in self.season_list:
querystring = {"league": self.league_id, "season": i}
# Send the request
response = requests.get(url, headers=self.headers, params=querystring)
df_temp = self.replace_period(pd.json_normalize(response.json()['response']))
df_temp.insert(0, 'season', int(i))
frames.append(df_temp)
# Update the progress bar
progress_bar.update(1)
# Close the progress bar
progress_bar.close()
df_injuries = pd.concat(frames).reset_index(drop=True)
return df_injuries
def get_all_transfers(self, players_list):
frames = []
# Create a progress bar
progress_bar = tqdm(players_list, desc="Processing players")
for player_id in progress_bar:
url = "https://api-football-v1.p.rapidapi.com/v3/transfers"
querystring = {"player": str(player_id)}
# Send the request
response = requests.get(url, headers=self.headers, params=querystring)
response_dic = response.json()['response']
df_temp = self.replace_period(pd.json_normalize(response_dic))
df_temp1 = self.json_decomposer(df_temp, ['player_id', 'player_name'], 'transfers')
if df_temp1 is not None:
frames.append(df_temp1)
progress_bar.close()
# Concatenate all the frames
if frames:
df_transfers = pd.concat(frames).reset_index(drop=True)
else:
print("No data to concatenate")
return df_transfers
def get_all_players(self):
def fetch_player_data(season, page_number):
params = {"league": self.league_id,
"season": season, "page": str(page_number)}
response = self.session.get(
'https://api-football-v1.p.rapidapi.com/v3/players', params=params)
return pd.DataFrame(response.json()['response']) if response.status_code == 200 else None
self.session = requests.Session()
self.session.headers.update({
"X-RapidAPI-Key": api_key,
"X-RapidAPI-Host": "api-football-v1.p.rapidapi.com"
})
player_frames = []
for season in self.season_list:
params = {"league": self.league_id, "season": season, "page": "1"}
total_pages = self.session.get(
'https://api-football-v1.p.rapidapi.com/v3/players', params=params).json()['paging']['total']
with concurrent.futures.ThreadPoolExecutor(max_workers=5) as executor:
fetch_job = [executor.submit(
fetch_player_data, season, i) for i in range(1, total_pages + 1)]
data_frames = [f.result() for f in tqdm(concurrent.futures.as_completed(
fetch_job), total=total_pages, desc=f"Processing season {season}")]
data_frames = [df for df in data_frames if df is not None]
if data_frames:
df_all_players = pd.concat(data_frames).reset_index(drop=True)
frames = [pd.concat([pd.json_normalize(df_all_players['player'].iloc[i]), pd.json_normalize(
df_all_players['statistics'].iloc[i])], axis=1) for i in range(len(df_all_players))]
df_all_players_xxxx = pd.concat(frames).reset_index(drop=True)
df_all_players_xxxx.columns = df_all_players_xxxx.columns.str.replace(
'.', '_', regex=False)
df_all_players_xxxx.insert(0, 'season', int(season))
player_frames.append(df_all_players_xxxx)
final_df = pd.concat(player_frames).reset_index(drop=True)
# Drop rows with null 'id' values and convert 'id' column to string
final_df = final_df.dropna(subset=['id'])
final_df['id'] = final_df['id'].values.astype(int).astype(str)
return final_df
def get_all_sidelined(self, players_list):
frames = []
session = requests.Session()
session.headers.update({
"X-RapidAPI-Key": api_key,
"X-RapidAPI-Host": "api-football-v1.p.rapidapi.com"
})
for i in tqdm(players_list, desc="Processing players"):
try:
response = session.get(
"https://api-football-v1.p.rapidapi.com/v3/sidelined", params={"player": str(i)})
response.raise_for_status()
except requests.exceptions.HTTPError as err:
print(f"HTTP error occurred for player_id: {i} - {err}")
else:
response_dic = response.json()['response']
df_temp = pd.json_normalize(response_dic)
df_temp.insert(0, 'player_id', str(i))
frames.append(df_temp)
if frames:
df_sidelined = pd.concat(frames).reset_index(drop=True)
else:
print("No data to concatenate")
return df_sidelined
def get_all_coaches(self, coach_list, max_workers=3):
'''here is how to get coach_list:
lineups = parser.get_all_lineups_general_stats()
coach_list = [str(int(i)) for i in lineups.coach_id.unique().tolist() if not math.isnan(i)]
'''
def fetch_data(i):
df_temp = self.get_response_df(url= "https://api-football-v1.p.rapidapi.com/v3/coachs", querystring={"id": str(i)})
df_temp1 = self.json_decomposer(df_temp, [i for i in df_temp.columns if i != 'career'], 'career')
return df_temp1
frames = []
# Adjust max_workers as necessary
with concurrent.futures.ThreadPoolExecutor(max_workers=max_workers) as executor:
df_coach = {executor.submit(fetch_data, i): i for i in coach_list}
for future in tqdm(concurrent.futures.as_completed(df_coach), total=len(coach_list), desc="Processing coaches"):
df_temp = future.result()
if df_temp is not None:
frames.append(df_temp)
if frames:
df_coaches = pd.concat(frames).reset_index(drop=True)
else:
print("No data to concatenate")
columns_to_rename = {'team_id': 'career_team_id','team_name': 'career_team_name','team_logo': 'career_team_logo'}
df_r = df_coaches.iloc[:, -3:].rename(columns=columns_to_rename)
df_l = df_coaches.iloc[:, :-3]
df_coaches = pd.concat([df_l, df_r], axis=1)
df_coaches['career_team_id'] = df_coaches['career_team_id'].values.astype(int).astype(str)
return df_coaches
|
ApolloV2-Api-Parser
|
/ApolloV2_Api_Parser-0.1-py3-none-any.whl/package_dir/apollov2_api_parser.py
|
apollov2_api_parser.py
|
import sys
DEFAULT_VERSION = "0.6c11"
DEFAULT_URL = "http://pypi.python.org/packages/%s/s/setuptools/" % sys.version[:3]
md5_data = {
'setuptools-0.6b1-py2.3.egg': '8822caf901250d848b996b7f25c6e6ca',
'setuptools-0.6b1-py2.4.egg': 'b79a8a403e4502fbb85ee3f1941735cb',
'setuptools-0.6b2-py2.3.egg': '5657759d8a6d8fc44070a9d07272d99b',
'setuptools-0.6b2-py2.4.egg': '4996a8d169d2be661fa32a6e52e4f82a',
'setuptools-0.6b3-py2.3.egg': 'bb31c0fc7399a63579975cad9f5a0618',
'setuptools-0.6b3-py2.4.egg': '38a8c6b3d6ecd22247f179f7da669fac',
'setuptools-0.6b4-py2.3.egg': '62045a24ed4e1ebc77fe039aa4e6f7e5',
'setuptools-0.6b4-py2.4.egg': '4cb2a185d228dacffb2d17f103b3b1c4',
'setuptools-0.6c1-py2.3.egg': 'b3f2b5539d65cb7f74ad79127f1a908c',
'setuptools-0.6c1-py2.4.egg': 'b45adeda0667d2d2ffe14009364f2a4b',
'setuptools-0.6c10-py2.3.egg': 'ce1e2ab5d3a0256456d9fc13800a7090',
'setuptools-0.6c10-py2.4.egg': '57d6d9d6e9b80772c59a53a8433a5dd4',
'setuptools-0.6c10-py2.5.egg': 'de46ac8b1c97c895572e5e8596aeb8c7',
'setuptools-0.6c10-py2.6.egg': '58ea40aef06da02ce641495523a0b7f5',
'setuptools-0.6c11-py2.3.egg': '2baeac6e13d414a9d28e7ba5b5a596de',
'setuptools-0.6c11-py2.4.egg': 'bd639f9b0eac4c42497034dec2ec0c2b',
'setuptools-0.6c11-py2.5.egg': '64c94f3bf7a72a13ec83e0b24f2749b2',
'setuptools-0.6c11-py2.6.egg': 'bfa92100bd772d5a213eedd356d64086',
'setuptools-0.6c2-py2.3.egg': 'f0064bf6aa2b7d0f3ba0b43f20817c27',
'setuptools-0.6c2-py2.4.egg': '616192eec35f47e8ea16cd6a122b7277',
'setuptools-0.6c3-py2.3.egg': 'f181fa125dfe85a259c9cd6f1d7b78fa',
'setuptools-0.6c3-py2.4.egg': 'e0ed74682c998bfb73bf803a50e7b71e',
'setuptools-0.6c3-py2.5.egg': 'abef16fdd61955514841c7c6bd98965e',
'setuptools-0.6c4-py2.3.egg': 'b0b9131acab32022bfac7f44c5d7971f',
'setuptools-0.6c4-py2.4.egg': '2a1f9656d4fbf3c97bf946c0a124e6e2',
'setuptools-0.6c4-py2.5.egg': '8f5a052e32cdb9c72bcf4b5526f28afc',
'setuptools-0.6c5-py2.3.egg': 'ee9fd80965da04f2f3e6b3576e9d8167',
'setuptools-0.6c5-py2.4.egg': 'afe2adf1c01701ee841761f5bcd8aa64',
'setuptools-0.6c5-py2.5.egg': 'a8d3f61494ccaa8714dfed37bccd3d5d',
'setuptools-0.6c6-py2.3.egg': '35686b78116a668847237b69d549ec20',
'setuptools-0.6c6-py2.4.egg': '3c56af57be3225019260a644430065ab',
'setuptools-0.6c6-py2.5.egg': 'b2f8a7520709a5b34f80946de5f02f53',
'setuptools-0.6c7-py2.3.egg': '209fdf9adc3a615e5115b725658e13e2',
'setuptools-0.6c7-py2.4.egg': '5a8f954807d46a0fb67cf1f26c55a82e',
'setuptools-0.6c7-py2.5.egg': '45d2ad28f9750e7434111fde831e8372',
'setuptools-0.6c8-py2.3.egg': '50759d29b349db8cfd807ba8303f1902',
'setuptools-0.6c8-py2.4.egg': 'cba38d74f7d483c06e9daa6070cce6de',
'setuptools-0.6c8-py2.5.egg': '1721747ee329dc150590a58b3e1ac95b',
'setuptools-0.6c9-py2.3.egg': 'a83c4020414807b496e4cfbe08507c03',
'setuptools-0.6c9-py2.4.egg': '260a2be2e5388d66bdaee06abec6342a',
'setuptools-0.6c9-py2.5.egg': 'fe67c3e5a17b12c0e7c541b7ea43a8e6',
'setuptools-0.6c9-py2.6.egg': 'ca37b1ff16fa2ede6e19383e7b59245a',
}
import sys, os
try: from hashlib import md5
except ImportError: from md5 import md5
def _validate_md5(egg_name, data):
if egg_name in md5_data:
digest = md5(data).hexdigest()
if digest != md5_data[egg_name]:
print >>sys.stderr, (
"md5 validation of %s failed! (Possible download problem?)"
% egg_name
)
sys.exit(2)
return data
def use_setuptools(
version=DEFAULT_VERSION, download_base=DEFAULT_URL, to_dir=os.curdir,
download_delay=15
):
"""Automatically find/download setuptools and make it available on sys.path
`version` should be a valid setuptools version number that is available
as an egg for download under the `download_base` URL (which should end with
a '/'). `to_dir` is the directory where setuptools will be downloaded, if
it is not already available. If `download_delay` is specified, it should
be the number of seconds that will be paused before initiating a download,
should one be required. If an older version of setuptools is installed,
this routine will print a message to ``sys.stderr`` and raise SystemExit in
an attempt to abort the calling script.
"""
was_imported = 'pkg_resources' in sys.modules or 'setuptools' in sys.modules
def do_download():
egg = download_setuptools(version, download_base, to_dir, download_delay)
sys.path.insert(0, egg)
import setuptools; setuptools.bootstrap_install_from = egg
try:
import pkg_resources
except ImportError:
return do_download()
try:
pkg_resources.require("setuptools>="+version); return
except pkg_resources.VersionConflict, e:
if was_imported:
print >>sys.stderr, (
"The required version of setuptools (>=%s) is not available, and\n"
"can't be installed while this script is running. Please install\n"
" a more recent version first, using 'easy_install -U setuptools'."
"\n\n(Currently using %r)"
) % (version, e.args[0])
sys.exit(2)
else:
del pkg_resources, sys.modules['pkg_resources'] # reload ok
return do_download()
except pkg_resources.DistributionNotFound:
return do_download()
def download_setuptools(
version=DEFAULT_VERSION, download_base=DEFAULT_URL, to_dir=os.curdir,
delay = 15
):
"""Download setuptools from a specified location and return its filename
`version` should be a valid setuptools version number that is available
as an egg for download under the `download_base` URL (which should end
with a '/'). `to_dir` is the directory where the egg will be downloaded.
`delay` is the number of seconds to pause before an actual download attempt.
"""
import urllib2, shutil
egg_name = "setuptools-%s-py%s.egg" % (version,sys.version[:3])
url = download_base + egg_name
saveto = os.path.join(to_dir, egg_name)
src = dst = None
if not os.path.exists(saveto): # Avoid repeated downloads
try:
from distutils import log
if delay:
log.warn("""
---------------------------------------------------------------------------
This script requires setuptools version %s to run (even to display
help). I will attempt to download it for you (from
%s), but
you may need to enable firewall access for this script first.
I will start the download in %d seconds.
(Note: if this machine does not have network access, please obtain the file
%s
and place it in this directory before rerunning this script.)
---------------------------------------------------------------------------""",
version, download_base, delay, url
); from time import sleep; sleep(delay)
log.warn("Downloading %s", url)
src = urllib2.urlopen(url)
# Read/write all in one block, so we don't create a corrupt file
# if the download is interrupted.
data = _validate_md5(egg_name, src.read())
dst = open(saveto,"wb"); dst.write(data)
finally:
if src: src.close()
if dst: dst.close()
return os.path.realpath(saveto)
def main(argv, version=DEFAULT_VERSION):
"""Install or upgrade setuptools and EasyInstall"""
try:
import setuptools
except ImportError:
egg = None
try:
egg = download_setuptools(version, delay=0)
sys.path.insert(0,egg)
from setuptools.command.easy_install import main
return main(list(argv)+[egg]) # we're done here
finally:
if egg and os.path.exists(egg):
os.unlink(egg)
else:
if setuptools.__version__ == '0.0.1':
print >>sys.stderr, (
"You have an obsolete version of setuptools installed. Please\n"
"remove it from your system entirely before rerunning this script."
)
sys.exit(2)
req = "setuptools>="+version
import pkg_resources
try:
pkg_resources.require(req)
except pkg_resources.VersionConflict:
try:
from setuptools.command.easy_install import main
except ImportError:
from easy_install import main
main(list(argv)+[download_setuptools(delay=0)])
sys.exit(0) # try to force an exit
else:
if argv:
from setuptools.command.easy_install import main
main(argv)
else:
print "Setuptools version",version,"or greater has been installed."
print '(Run "ez_setup.py -U setuptools" to reinstall or upgrade.)'
def update_md5(filenames):
"""Update our built-in md5 registry"""
import re
for name in filenames:
base = os.path.basename(name)
f = open(name,'rb')
md5_data[base] = md5(f.read()).hexdigest()
f.close()
data = [" %r: %r,\n" % it for it in md5_data.items()]
data.sort()
repl = "".join(data)
import inspect
srcfile = inspect.getsourcefile(sys.modules[__name__])
f = open(srcfile, 'rb'); src = f.read(); f.close()
match = re.search("\nmd5_data = {\n([^}]+)}", src)
if not match:
print >>sys.stderr, "Internal error!"
sys.exit(2)
src = src[:match.start(1)] + repl + src[match.end(1):]
f = open(srcfile,'w')
f.write(src)
f.close()
if __name__=='__main__':
if len(sys.argv)>2 and sys.argv[1]=='--md5update':
update_md5(sys.argv[2:])
else:
main(sys.argv[1:])
|
AppDispatch
|
/AppDispatch-0.3.4.tar.gz/AppDispatch-0.3.4/ez_setup.py
|
ez_setup.py
|
import os
import sys
import logging
from bn import AttributeDict, absimport, uniform_path
from pipestack.pipe import Pipe, DispatchMarble, Marble
from pipestack.app import PipeError
from urlconvert import extract_url_parts, RuleSet
from conversionkit import Field
log = logging.getLogger(__name__)
def validPath():
def validPath_converter(conversion, state):
if not conversion.value.startswith('/') and conversion.value.endswith('/'):
conversion.error = 'Paths should start and end with a \'/\' character'
else:
conversion.result = conversion.value
return validPath_converter
class BaseArea(object):
pass
class URLMarble(DispatchMarble):
def __init__(
marble,
bag,
name,
config,
aliases=None,
default_aliases=None,
persistent_state=None,
flow_state=None
):
DispatchMarble.__init__(
marble,
bag,
name,
config,
aliases,
default_aliases,
persistent_state,
flow_state,
)
if not hasattr(marble, 'urls'):
raise Exception(
'%r marble has no \'urls\' attribute specified'%(
marble.__class__.__name__,
)
)
else:
marble.ruleset = RuleSet(marble.urls)
marble.log = persistent_state['log']
#marble.path = persistent_state['path']
marble._area_cache = persistent_state['_area_cache']
marble.dispatch()
def url(marble, **p):
try:
marble.apps
except PipeError:
uses_apps = False
else:
uses_apps = True
if 'pipe' in p and uses_apps:
# Use the global version which will call this one
return marble.apps.url(**p)
else:
conversion = marble.ruleset.generate_url(p, marble.url_parts)
return conversion.result.url
def redirect(marble, **p):
url = marble.url(**p)
# @@@ Could put a check in here for redirecting back to current URL.
marble.bag.http_response.status = '301 Redirect'
marble.bag.http_response.header_list.append(
dict(
name='Location',
value=url.encode('utf8')
)
)
marble.bag.http_response.body = ['Redirecting...']
return 'Redirecting...'
def dispatch(marble):
try:
marble.apps
except PipeError:
uses_apps = False
else:
uses_apps = True
if uses_apps and not marble.apps.dispatch:
# We just want the marble created for URL-generating purposes, not dispatched
return
url_parts = extract_url_parts(marble.bag)
log.info('Extracted URL Parts: %r', url_parts)
if not url_parts['path'].startswith(marble.config.path[1:]):
return
else:
# Modify script_name and path_info for this app
# (REMOVING and external SCRIPT_NAME)
log.info(
(
"Script parts - url_parts script: %r, "
"environ SCRIPT_NAME: %r, url_parts path: %r"
),
url_parts['script'],
marble.environ.get('SCRIPT_NAME', ''),
marble.config.path,
)
url_parts['script'] = url_parts['script'][\
len(marble.environ.get('SCRIPT_NAME', '')):]\
+ marble.config.path.strip('/')
url_parts['path'] = url_parts['path'][len(marble.config.path)-1:]
log.info('Modified URL Parts: %r', url_parts)
conversion = marble.ruleset.match(url_parts=url_parts)
if not conversion.successful:
# Not for this URL.
log.info(
'Could not route %r to an action, no URL vars found',
marble.environ['PATH_INFO'],
)
return
res = conversion.result
if res.extra and res.extra.has_key('handler'):
handler = res.extra['handler']
log.debug(
'Using the handler %r specified as an extra arg in the URLs '
'to process the request',
handler,
)
handler(marble.bag, match=res)
marble.bag.interrupt_flow()
return
vars = res.vars
if not vars.get('area'):
log.info(
'Could not route %r to an action, no area specified',
marble.environ['PATH_INFO'],
)
return
else:
area_name = vars.get('area')
if area_name in ['action', 'area']:
raise Exception(
'You cannot name an area %r, this is a reserved word'%(
area_name,
)
)
# At this point we have our area. Let's get the left over path
area = marble._area_cache.get(area_name)
if not area:
app_dir = os.path.dirname(sys.modules[marble.__class__.__module__].__file__)
if res.extra and res.extra.has_key('area_class'):
v = vars.copy()
v['camel_area'] = area_name.capitalize()
area_class = res.extra['area_class']%v
else:
area_class = None
if os.path.exists(
os.path.join(
app_dir,
'area',
area_name+'.py'
)
):
if sys.modules[marble.__class__.__module__].__file__.split('/')[-1].split('.')[-2] == '__init__':
mod_path = marble.__class__.__module__+'.area.%s'
else:
mod_path = ('.'.join(marble.__class__.__module__.split('.')[:-1]))+'.area.%s'
elif os.path.exists(
os.path.join(
app_dir,
area_name+'.py'
)
):
if sys.modules[marble.__class__.__module__].__file__.split('/')[-1].split('.')[-2] == '__init__':
mod_path = marble.__class__.__module__+'.%s'
else:
mod_path = ('.'.join(marble.__class__.__module__.split('.')[:-1]))+'.%s'
elif os.path.exists(
os.path.join(
app_dir,
'area',
area_name,
'action.py'
)
):
if sys.modules[marble.__class__.__module__].__file__.split('/')[-1].split('.')[-2] == '__init__':
mod_path = marble.__class__.__module__+'.area.%s.action'
else:
mod_path = ('.'.join(marble.__class__.__module__.split('.')[:-1]))+'area.%s.action'
else:
log.info(
'The Python module in %r for area %r matched from %r '
'does not exist ',
app_dir, area_name,
marble.environ['PATH_INFO'],
)
return
# We support two types of area: modules and classes.
if area_class:
area = getattr(
absimport(mod_path%area_name),
area_class,
)
log.info(
'Loading %r app \'%s.%s\' area into cache',
marble.name,
area_name,
area_class,
)
marble._area_cache[area_name] = area()
else:
mod_name = mod_path%area_name
marble._area_cache[area_name] = absimport(mod_name)
# Now set up the marble
marble.vars = AttributeDict(vars)
marble.url_parts = url_parts
marble.area = area = marble._area_cache[area_name]
log.debug('Vars matched: %r', vars)
if not vars.get('action') and not vars.get('api'):
log.info(
'Could not route %r to an action or API call, no action or '
'api specified by matched rule',
marble.environ['PATH_INFO'],
)
return
elif vars.get('action'):
action_name = 'action_'+vars['action']
if not hasattr(area, action_name):
log.info(
'No such action %r in area %r matched from %r ',
action_name,
area_name,
marble.environ['PATH_INFO'],
)
return
else:
action = getattr(area, action_name)
result = action(marble)
if result is None:
if marble.http_response.body:
log.info(
'No response from action but body present so '
'not raising an error'
)
else:
raise Exception(
'No response from action %r'%action_name
)
else:
if isinstance(result, (unicode, str)):
marble.http_response.body = [result]
else:
marble.http_response.body = result
marble.bag.interrupt_flow()
class URLPipe(Pipe):
"""\
An ``AppPipe`` is a specical type of marbles in that in addition to
configuration and an optional marble, it also takes an ``App`` object.
As well as being an ordinary marbles which is placed in the marblesline and
dispatches to the ``App`` if the URL path matches the name of the app, an
``AppPipe`` can also be used outside of the marblesline. In this case an
``AppDispatch()`` marbles must be in the marblesline and it will dispatch to
the app based on the location of an ``index.app`` file in the static directory
whose contents corresponds to the application name.
"""
default_aliases = dict(
resolve='resolve',
static='static',
environ='environ',
http_response='http_response',
apps='apps',
)
marble_class = URLMarble
options = dict(
path = Field(
validPath(),
missing_or_empty_error = 'Please specify %(name)s.path'
)
)
def __init__(marble, bag, name, aliases=None, **pextras):
Pipe.__init__(marble, bag, name, aliases, **pextras)
marble.persistent_state=dict(
_area_cache = {},
log=log,
)
def enter(self, bag):
Pipe.enter(self, bag)
BaseApp = URLPipe
AppMarble = URLMarble
class URLDispatchPipe(Pipe):
class marble_class(Marble):
dispatch = True
url_parts = None
def url(self, **args):
if not 'pipe' in args:
raise Exception("Expected a 'pipe' argument to render()")
elif args['pipe'] not in self.persistent_state['apps']:
# You can't just enter the pipe because a dispatch marble will
# also dispatch so you'll probably get an infinte loop as the
# handler tries to generate URLs. Instead we set this variable
# which the dispatch marble will check to see if it is
# supposed to dispatch or not.
self.dispatch = False
if not self.bag.has_key(args['pipe']):
self.bag.enter(args['pipe'])
self.dispatch = True
# We should be able to find its name, path and ruleset now
name = self.bag[args['pipe']].name
if name != args['pipe']:
raise Exception('What? The name of the pipe being loaded and the name asked for are different!')
path = self.bag[args['pipe']].config.path
ruleset = self.bag[args['pipe']].ruleset
if name in self.persistent_state['apps']:
raise Exception('An app named %r is already registered with URLManager'%app_info.name)
for k, v in self.persistent_state['apps'].items():
if v.path == path:
raise Exception('The path %r specified for %r had already been registered with URLManager by %r'%(path, name, k))
# Since this shouldn't change, we cache it here
self.persistent_state['apps'][args['pipe']] = AttributeDict(path=path, ruleset=ruleset)
if not self.url_parts:
self.url_parts = extract_url_parts(self.bag)
ruleset = self.persistent_state['apps'][args['pipe']].ruleset
del args['pipe']
conversion = ruleset.generate_url(args, self.url_parts)
return conversion.result.url
default_aliases = dict(environ='environ')
def create(self, bag):
self.persistent_state = {
'plugins': bag.app.fetch_plugins(self.name, 'app'),
'apps': {},
}
|
AppDispatch
|
/AppDispatch-0.3.4.tar.gz/AppDispatch-0.3.4/appdispatch/__init__.py
|
__init__.py
|
============================
AppDynamics REST API Library
============================
Current version: 0.4.22
Introduction
------------
AppDynamicsRESTx is a library that provides a clean Python interface to the
REST API of an AppDynamics controller.
AppDynamicsRESTx is developed using Python 2.7.6 on Mac OSX. It is known to
work on most Linux distributions and on Windows, with your choice of Python 2.6, 2.7,
3.3, 3.4, 3.5, 3.6, 3.7, or 3.8.
Installation
------------
Install via ``pip``::
$ pip install AppDynamicsRESTx
Install from source::
$ git clone https://github.com/homedepot/AppDynamicsRESTx.git
$ cd AppDynamicsRESTx
$ python setup.py install
Prerequisites
-------------
* `requests <https://pypi.python.org/pypi/requests>`_
* `argparse <https://pypi.python.org/pypi/argparse>`_
* `nose <https://pypi.python.org/pypi/nose>`_ (for running unit tests)
* `tzlocal <https://pypi.python.org/pypi/tzlocal>`_ and
`lxml <https://pypi.python.org/pypi/lxml>`_ (used by some of the example scripts)
* `jinja2 <https://pypi.python,org/pypi/jinja2>`_ (used by the audit report example)
Documentation
-------------
The documentation is hosted online at readthedocs.org_.
A Quick Example
---------------
Here's a simple example that retrieves a list of business applications
from a controller on localhost, and prints them out:
.. code:: python
from appd.request import AppDynamicsClient
c = AppDynamicsClient('http://localhost:8090', 'user1', 'password', 'customer1', verbose=True)
for app in c.get_applications():
print app.name, app.id
Testing
-------
If you have cloned the repo, you can run the unit tests from ``setup.py``::
python setup.py test
Or, if you have ``nose`` installed, you can use that::
nosetests
For More Information
--------------------
The main source repo is on Github_.
.. _AppDynamics: http://www.appdynamics.com/
.. _Github: https://github.com/homedepot/AppDynamicsRESTx
.. _readthedocs.org: http://AppDynamicsREST.readthedocs.org/en/latest/
|
AppDynamicsRESTx
|
/AppDynamicsRESTx-0.4.22.tar.gz/AppDynamicsRESTx-0.4.22/README.rst
|
README.rst
|
Contributing to AppDynamics Extensions
======================================
If you would like to contribute code you can do so through GitHub by
forking the repository and sending a pull request (on a branch other
than ``master`` or ``gh-pages``).
When submitting code, please make every effort to follow existing
conventions and style in order to keep the code as consistent as
possible.
License
-------
By contributing your code, you agree to license your contribution under
the terms of the APLv2:
https://github.com/extensions-commons/blob/master/LICENSE
All files are released with the Apache 2.0 license.
If you are adding a new file it should include a copyright header::
/**
* Copyright 2013 AppDynamics, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
|
AppDynamicsRESTx
|
/AppDynamicsRESTx-0.4.22.tar.gz/AppDynamicsRESTx-0.4.22/CONTRIBUTING.rst
|
CONTRIBUTING.rst
|
## FAQ
**I get errors like `ImportError: No module named appd.cmdline` when I try to run the examples scripts.**
You'll see this if you try to run the example scripts before installing the package into your Python `site-packages`
folder. Either follow the installation instructions above, or set the `PYTHONPATH` environment variable before
running the script, like this:
``` bash
PYTHONPATH=. python examples/bt_metrics.py
```
**I can't seem to get the authentication right. I keep getting `HTTPError: 401 Client Error: Unauthorized`.**
Use the same username, password, and account you use when you log into your controller. If your login screen
only has two fields in it (username and password), then you can omit the account.
|
AppDynamicsRESTx
|
/AppDynamicsRESTx-0.4.22.tar.gz/AppDynamicsRESTx-0.4.22/docs/faq.rst
|
faq.rst
|
.. AppDynamicsREST documentation master file, created by
sphinx-quickstart on Tue Jul 9 22:26:36 2013.
You can adapt this file completely to your liking, but it should at least
contain the root `toctree` directive.
Welcome to AppDynamics REST Library's documentation!
======================================
Contents:
.. toctree::
:maxdepth: 2
readme
installation
usage
contributing
authors
history
Indices and tables
==================
* :ref:`genindex`
* :ref:`modindex`
* :ref:`search`
|
AppDynamicsRESTx
|
/AppDynamicsRESTx-0.4.22.tar.gz/AppDynamicsRESTx-0.4.22/docs/index.rst
|
index.rst
|
## Command Line Options
This package includes a module called `appd.cmdline` that provides a simple command-line parser for use
in your scripts. You're not required to use it, but it allows you to point your script at different controllers
without making any code changes, and if you use it consistently, your scripts will all have a common
command-line syntax, which is nice. It supports the following options:
- **-c** or **--url** for the controller URL. Required.
- **-a** or **--account** for the account name. Optional and defaults to "customer1", which is the account
name on single-tenant controllers.
- **-u** or **--username** for the user name. Required.
- **-p** or **--password** for the password. Required.
- **-v** or **--verbose** will print out the URLs before they are retrieved.
- **-h** or **--help** will display a summary of the command line options.
The example scripts all use the parser, so you can look at their source to see how to use it.
|
AppDynamicsRESTx
|
/AppDynamicsRESTx-0.4.22.tar.gz/AppDynamicsRESTx-0.4.22/docs/api/cmdline.rst
|
cmdline.rst
|
from __future__ import print_function
import itertools
import csv
from appd.cmdline import parse_argv
from appd.request import AppDynamicsClient
__author__ = 'Todd Radel'
__copyright__ = 'Copyright (c) 2013-2015 AppDynamics Inc.'
args = parse_argv()
c = AppDynamicsClient(args.url, args.username, args.password, args.account, args.verbose)
nodes = []
for app in c.get_applications():
for node in c.get_nodes(app.id):
node_type = node.type
if node.has_machine_agent and not node.has_app_agent:
node.type = 'Machine Agent'
nodes.append(node)
# Sort and group the nodes by machine_id.
group_func = lambda x: (x.machine_id, x.type)
nodes.sort(key=group_func)
tier_names = set()
tiers = dict()
for machine_id, nodes_on_machine_iter in itertools.groupby(nodes, key=group_func):
nodes_on_machine = list(nodes_on_machine_iter)
agent_type = nodes_on_machine[0].type
tier_name = nodes_on_machine[0].tier_name.split('.')[0]
license_count = len(nodes_on_machine)
if 'PHP' in agent_type:
agent_type = 'PHP'
if 'IIS' in agent_type:
agent_type = 'DOT_NET'
license_count = 1
elif agent_type == 'Machine Agent':
agent_type = 'MACHINE'
license_count = 1
assert len(nodes_on_machine) == 1
else:
agent_type = 'JAVA'
def key(name):
return '%s|%s|%s' % (tier_name, agent_type, name)
def incr(name, amt=1):
k = key(name)
tiers[k] = tiers.get(k, 0) + amt
incr('licenses', license_count)
incr('agents', len(nodes_on_machine))
tiers.setdefault(key('host_set'), set()).add(machine_id[0])
tiers[key('hosts')] = len(tiers[key('host_set')])
tier_names.add(tier_name)
AGENT_TYPES = ('JAVA', 'DOT_NET', 'PHP', 'MACHINE')
with open('data/licenses.csv', 'w') as f:
w = csv.writer(f, quoting=csv.QUOTE_NONNUMERIC, dialect=csv.excel)
hdr = ['Tier Name', 'Java Licenses', 'Java Agents', 'Java Hosts', '.NET Licenses', '.NET Agents', '.NET Hosts',
'PHP Licenses', 'PHP Agents', 'PHP Hosts', 'Machine Agent Licenses', 'Machine Agents', 'Machine Agent Hosts']
w.writerow(hdr)
for tier_name in sorted(tier_names):
row = [tier_name]
for agent_type in AGENT_TYPES:
def get(name):
return tiers.get('%s|%s|%s' % (tier_name, agent_type, name), 0)
row.extend([get('licenses'), get('agents'), get('hosts')])
w.writerow(row)
|
AppDynamicsRESTx
|
/AppDynamicsRESTx-0.4.22.tar.gz/AppDynamicsRESTx-0.4.22/examples/license_report_to_csv.py
|
license_report_to_csv.py
|
from __future__ import print_function
from collections import defaultdict
from datetime import datetime
from time import mktime
from lxml.builder import ElementMaker
from lxml import etree
import tzlocal
from appd.cmdline import parse_argv
from appd.request import AppDynamicsClient
__author__ = 'Todd Radel'
__copyright__ = 'Copyright (c) 2013-2015 AppDynamics Inc.'
# The report will generate data for the 24-hour period before midnight of the current day. To change the
# reporting period, adjust these variables.
time_in_mins = 24 * 60
end_time = datetime.now().replace(hour=0, minute=0, second=0, microsecond=0)
end_epoch = int(mktime(end_time.timetuple())) * 1000
# Metric mapper
#
# Uses a hash table to call the function appropriate for specific metric types, e.g.: calls map_art for
# Average Response Time, map_cpm for Calls per Minute, etc.
def map_art(d, metric_total, metric_average, metric_sum, max_point):
d['art'] = metric_average
def map_cpm(d, metric_total, metric_average, metric_sum, max_point):
d['total_calls'] = metric_total
d['cpm_average'] = metric_average
d['cpm_max'] = max_point[0]
d['cpm_max_time'] = max_point[1]
def map_err(d, metric_total, metric_average, metric_sum, max_point):
d['total_errors'] = metric_total
d['epm_avg'] = metric_average
d['epm_max'] = max_point[0]
d['epm_max_time'] = max_point[1]
def no_such_metric(d, metric_total, metric_average, metric_sum, max_point):
raise ValueError("no such metric")
METRIC_DISPATCHER = {
'Average Response Time (ms)': map_art,
'Calls per Minute': map_cpm,
'Errors per Minute': map_err,
'DEFAULT': no_such_metric
}
# Helper functions
def now_rfc3339():
return datetime.now(tzlocal.get_localzone()).isoformat('T')
def freq_to_mins(md):
FREQ_MAP = {'ONE_MIN': 1, 'TEN_MIN': 10, 'SIXTY_MIN': 60}
return FREQ_MAP[md.frequency]
# Parse command line arguments and create AD client:
args = parse_argv()
c = AppDynamicsClient(args.url, args.username, args.password, args.account, args.verbose)
# Get the list of configured apps, and get backend metrics for each one:
rows = defaultdict(dict)
for app in c.get_applications():
for md in c.get_metrics('Backends|*|*', app.id, time_range_type='BEFORE_TIME', end_time=end_epoch,
duration_in_mins=time_in_mins, rollup=False):
# Get the last two components of the metric path. This should be 'backend_name|metric_name'.
backend_name, metric_name = md.path.split('|')[-2:]
if 'Discovered backend call' in backend_name:
backend_name = backend_name[26:]
metric_sum = sum([x.value for x in md.values])
metric_total = metric_sum * freq_to_mins(md)
metric_average = 0
max_point = (0, None)
if len(md.values) > 0:
metric_average = metric_sum / len(md.values)
max_point = max([(max(x.value, x.current, x.max), x.start_time) for x in md.values])
func = METRIC_DISPATCHER.get(metric_name, None)
if func:
func(rows[backend_name], metric_total, metric_average, metric_sum, max_point)
rows[backend_name]['app'] = app.name
# Generate the report.
XSI = 'http://www.w3.org/2001/XMLSchema-instance'
E = ElementMaker(nsmap={'xsi': XSI})
root = E.BackendResponseTimes(Controller=c.base_url, GenerationTime=now_rfc3339())
root.set('{%s}noNamespaceSchemaLocation' % XSI, 'backend_metrics.xsd')
for k, v in sorted(rows.items()):
v.setdefault('cpm_max_time', '')
v.setdefault('epm_max_time', '')
root.append(E.Backend(
E.ApplicationName(v['app']),
E.BackendName(k),
E.AverageResponseTime(str(v.get('art', 0))),
E.CallsPerMinute(str(v.get('cpm_average', 0))),
E.TotalCalls(str(v.get('total_calls', 0))),
E.MaximumCallsPerMinute(str(v.get('cpm_max', 0))),
E.MaximumCallTime(v['cpm_max_time'].isoformat('T') if v['cpm_max_time'] else ''),
E.ErrorsPerMinute(str(v.get('epm_avg', 0))),
E.TotalErrors(str(v.get('total_errors', 0))),
E.MaximumErrorsPerMinute(str(v.get('epm_max', 0))),
E.MaximumErrorTime(v['epm_max_time'].isoformat('T') if v['epm_max_time'] else ''),
))
# Print the report to stdout.
print(etree.ProcessingInstruction('xml', 'version="1.0" encoding="UTF-8"'))
print(etree.tostring(root, pretty_print=True, encoding='UTF-8'))
|
AppDynamicsRESTx
|
/AppDynamicsRESTx-0.4.22.tar.gz/AppDynamicsRESTx-0.4.22/examples/backend_metrics.py
|
backend_metrics.py
|
from __future__ import print_function
from datetime import datetime
import itertools
from appd.cmdline import parse_argv
from appd.request import AppDynamicsClient
__author__ = 'Todd Radel'
__copyright__ = 'Copyright (c) 2013-2015 AppDynamics Inc.'
def incr(d, name, amt=1):
d[name] = d.get(name, 0) + amt
args = parse_argv()
c = AppDynamicsClient(args.url, args.username, args.password, args.account, args.verbose)
nodes = []
for app in c.get_applications():
for node in c.get_nodes(app.id):
if node.has_machine_agent or node.has_app_agent:
if node.has_app_agent:
if 'PHP' in node.type:
node.group_type = 'PHP App Agent'
if 'IIS' in node.type:
node.group_type = '.NET App Agent'
else:
node.group_type = 'Java App Agent'
else:
node.group_type = 'Machine Agent only'
node.app = app
nodes.append(node)
# Sort and group the nodes by machine_id.
group_func = lambda x: x.machine_id
nodes.sort(key=group_func)
host_counts = dict()
node_counts = dict()
lic_counts = dict()
for machine_id, nodes_on_machine_iter in itertools.groupby(nodes, key=group_func):
nodes_on_machine = list(nodes_on_machine_iter)
first_node = nodes_on_machine[0]
agent_type = first_node.group_type
app_name = first_node.app.name
env = 'Production'
if 'Devl' in app_name:
env = 'Development'
if 'Qual' in app_name:
env = 'Qual'
if 'Cert' in app_name:
env = 'Cert'
all_same = all(x.group_type == agent_type for x in nodes_on_machine)
# assert all_same, first_node
all_same = all(x.app.name == app_name for x in nodes_on_machine)
# assert all_same, first_node
license_count = 1
if 'Java' in agent_type:
license_count = len(nodes_on_machine)
incr(lic_counts, env, license_count)
incr(host_counts, env, 1)
incr(node_counts, env, len(nodes_on_machine))
# Print the results.
tot_nodes, tot_hosts, tot_licenses = (0, 0, 0)
header_fmt = '%-30s %-15s %-15s %s'
data_fmt = '%-30s %15d %15d %15d'
print()
print('License usage report for ', args.url)
print('Generated at: ', datetime.now())
print()
print(header_fmt % ('Environment', 'Node Count', 'Host Count', 'License Count'))
print(header_fmt % ('=' * 30, '=' * 15, '=' * 15, '=' * 15))
for env in sorted(node_counts.keys()):
node_count = node_counts.get(env, 0)
host_count = host_counts.get(env, 0)
lic_count = lic_counts.get(env, 0)
tot_nodes += node_count
tot_hosts += host_count
tot_licenses += lic_count
print(data_fmt % (env, node_count, host_count, lic_count))
print(header_fmt % ('=' * 30, '=' * 15, '=' * 15, '=' * 15))
print(data_fmt % ('TOTAL', tot_nodes, tot_hosts, tot_licenses))
|
AppDynamicsRESTx
|
/AppDynamicsRESTx-0.4.22.tar.gz/AppDynamicsRESTx-0.4.22/examples/license_count_by_env.py
|
license_count_by_env.py
|
from __future__ import print_function
from datetime import datetime
import itertools
from appd.cmdline import parse_argv
from appd.request import AppDynamicsClient
__author__ = 'Todd Radel'
__copyright__ = 'Copyright (c) 2013-2015 AppDynamics Inc.'
def incr(d, name, amt=1):
d[name] = d.get(name, 0) + amt
args = parse_argv()
c = AppDynamicsClient(args.url, args.username, args.password, args.account, args.verbose)
nodes = []
for app in c.get_applications():
for node in c.get_nodes(app.id):
if node.has_machine_agent or node.has_app_agent:
if node.has_app_agent:
if 'PHP' in node.type:
node.group_type = 'PHP App Agent'
if 'IIS' in node.type:
node.group_type = '.NET App Agent'
else:
node.group_type = 'Java App Agent'
else:
node.group_type = 'Machine Agent only'
node.app = app
nodes.append(node)
# Sort and group the nodes by machine_id.
group_func = lambda x: x.machine_id
nodes.sort(key=group_func)
host_counts = dict()
node_counts = dict()
lic_counts = dict()
for machine_id, nodes_on_machine_iter in itertools.groupby(nodes, key=group_func):
nodes_on_machine = list(nodes_on_machine_iter)
first_node = nodes_on_machine[0]
agent_type = first_node.group_type
app_name = first_node.app.name
app_name = app_name.replace('Prod', '').replace('Qual', '').replace('Cert', '').replace('Devl', '')
all_same = all(x.group_type == agent_type for x in nodes_on_machine)
# assert all_same, first_node
all_same = all(x.app.name == app_name for x in nodes_on_machine)
# assert all_same, first_node
license_count = 1
if 'Java' in agent_type:
license_count = len(nodes_on_machine)
incr(lic_counts, app_name, license_count)
incr(host_counts, app_name, 1)
incr(node_counts, app_name, len(nodes_on_machine))
# Print the results.
tot_nodes, tot_hosts, tot_licenses = (0, 0, 0)
header_fmt = '%-30s\t%-15s\t%-15s\t%s'
data_fmt = '%-30s\t%15d\t%15d\t%15d'
print()
print('License usage report for ', args.url)
print('Generated at: ', datetime.now())
print()
print(header_fmt % ('App Name', 'Node Count', 'Host Count', 'License Count'))
print(header_fmt % ('=' * 30, '=' * 15, '=' * 15, '=' * 15))
for app_name in sorted(node_counts.keys()):
node_count = node_counts.get(app_name, 0)
host_count = host_counts.get(app_name, 0)
lic_count = lic_counts.get(app_name, 0)
tot_nodes += node_count
tot_hosts += host_count
tot_licenses += lic_count
print(data_fmt % (app_name, node_count, host_count, lic_count))
print(header_fmt % ('=' * 30, '=' * 15, '=' * 15, '=' * 15))
print(data_fmt % ('TOTAL', tot_nodes, tot_hosts, tot_licenses))
|
AppDynamicsRESTx
|
/AppDynamicsRESTx-0.4.22.tar.gz/AppDynamicsRESTx-0.4.22/examples/license_count_by_appgroup.py
|
license_count_by_appgroup.py
|
from __future__ import print_function
from datetime import datetime
import itertools
from appd.cmdline import parse_argv
from appd.request import AppDynamicsClient
__author__ = 'Todd Radel'
__copyright__ = 'Copyright (c) 2013-2015 AppDynamics Inc.'
def incr(d, name, amt=1):
d[name] = d.get(name, 0) + amt
args = parse_argv()
c = AppDynamicsClient(args.url, args.username, args.password, args.account, args.verbose)
nodes = []
for app in c.get_applications():
for node in c.get_nodes(app.id):
# node_type = node.type
# print node.id, node.machine_id, node.machine_name, node.type
# print node.type, node.os_type, node.app_agent_version
if node.has_machine_agent or node.has_app_agent:
if node.has_app_agent:
if 'PHP' in node.type:
node.group_type = 'PHP App Agent'
if 'IIS' in node.type:
node.group_type = '.NET App Agent'
else:
node.group_type = 'Java App Agent'
else:
node.group_type = 'Machine Agent only'
nodes.append(node)
# Sort and group the nodes by machine_id.
group_func = lambda x: x.machine_id
nodes.sort(key=group_func)
host_counts = dict()
node_counts = dict()
lic_counts = dict()
for machine_id, nodes_on_machine_iter in itertools.groupby(nodes, key=group_func):
nodes_on_machine = list(nodes_on_machine_iter)
first_node = nodes_on_machine[0]
agent_type = first_node.group_type
# types = [x.group_type for x in nodes_on_machine]
# all_same = all(x.group_type == agent_type for x in nodes_on_machine)
# print all_same, types
# assert all_same, first_node
license_count = 1
if 'Java' in agent_type:
license_count = len(nodes_on_machine)
incr(lic_counts, agent_type, license_count)
incr(host_counts, agent_type, 1)
incr(node_counts, agent_type, len(nodes_on_machine))
# if '.NET' in agent_type:
# node_names = [x.name for x in nodes_on_machine]
# print 'Host:', first_node.machine_name, '\n\t', '\n\t'.join(node_names)
# Print the results.
tot_nodes, tot_hosts, tot_licenses = (0, 0, 0)
header_fmt = '%-30s %-15s %-15s %s'
data_fmt = '%-30s %15d %15d %15d'
print()
print('License usage report for ' + args.url)
print('Generated at: ' + str(datetime.now()))
print()
print(header_fmt % ('Node Type', 'Node Count', 'Host Count', 'License Count'))
print(header_fmt % ('=' * 30, '=' * 15, '=' * 15, '=' * 15))
for node_type in ('Java App Agent', '.NET App Agent', 'PHP App Agent', 'Machine Agent only'):
node_count = node_counts.get(node_type, 0)
host_count = host_counts.get(node_type, 0)
lic_count = lic_counts.get(node_type, 0)
tot_nodes += node_count
tot_hosts += host_count
tot_licenses += lic_count
print(data_fmt % (node_type, node_count, host_count, lic_count))
print(header_fmt % ('=' * 30, '=' * 15, '=' * 15, '=' * 15))
print(data_fmt % ('TOTAL', tot_nodes, tot_hosts, tot_licenses))
|
AppDynamicsRESTx
|
/AppDynamicsRESTx-0.4.22.tar.gz/AppDynamicsRESTx-0.4.22/examples/license_count.py
|
license_count.py
|
from __future__ import print_function
from string import Template
from datetime import datetime, timedelta
from time import mktime
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from MySQLdb.cursors import DictCursor
from jinja2 import Environment, FileSystemLoader
import argparse
import smtplib
import sys
import MySQLdb
__author__ = 'Todd Radel'
__copyright__ = 'Copyright (c) 2013-2015 AppDynamics Inc.'
def parse_argv():
parser = argparse.ArgumentParser(add_help=False, usage='%(prog)s [options]',
description='Generates an HTML audit report by querying the database '
'on an AppDynamics controller.')
parser.add_argument('-h', '--host', dest='host',
default='localhost',
help='MySQL host name (default: %(default)s)')
parser.add_argument('-P', '--port', dest='port',
type=int, default=3306,
help='MySQL server port (default: %(default)d)')
parser.add_argument('-u', '--username', dest='username',
default='root',
help='MySQL user name (default: %(default)s)')
parser.add_argument('-p', '--password', dest='password',
default='singcontroller',
help='MySQL password (default: %(default)s)')
parser.add_argument('-d', '--database', dest='database',
default='controller',
help='MySQL database name (default: %(default)s)')
parser.add_argument('-a', '--account', dest='account',
default='customer1',
help='Controller account name (default: %(default)s)')
parser.add_argument('-n', '--days', dest='days',
type=int, default=1,
help='Number of days to report prior to today (default: %(default)d)')
parser.add_argument('-i', '--ignore-user', dest='ignore_users',
action='append',
default=['system', 'singularity-agent', 'ldapsync'],
help='Users to filter from audit log (default: %(default)s)')
parser.add_argument('-L', '--ignore-logins', dest='ignore_logins', action='store_true',
help='Ignore successful logins')
parser.add_argument('-o' '--output', dest='outfile',
help='Send HTML output to a file instead of SMTP server')
parser.add_argument('-f', '--from', dest='sender', default='[email protected]',
help='From address in email message (default: %(default)s)')
parser.add_argument('-t', '--to', dest='to',
action='append',
default=['[email protected]'],
help='To address in email message (default: %(default)s)')
parser.add_argument('-s', '--subject', dest='subject', default='Controller Audit Report',
help='Subject of email message (default: "%(default)s")')
parser.add_argument('--smtp-server', dest='smtphost', default='localhost',
help='SMTP server to use (default: %(default)s)')
parser.add_argument('-v', '--verbose', dest='verbose', action='store_true',
help='Enable verbose output')
parser.add_argument('--help', action='help',
help='Display this help screen')
return parser.parse_args()
def from_ts(ms):
return datetime.fromtimestamp(ms / 1000)
def to_ts(dt):
return int(mktime(dt.timetuple())) * 1000
ACTIONS = {
'APP_ALERTS_CONFIGURATION': 'View Alerts',
'APP_ANALYZE_HOME': 'Analyze',
'APP_AUTOMATION_HOME': 'Automation',
'APP_BACKEND_DASHBOARD|OVERVIEW_TAB': 'Backend Dashboard',
'APP_BACKEND_LIST': 'Backends List',
'APP_BASELINES': 'Baselines',
'APP_BT_DETAIL': 'BT Dashboard',
'APP_BT_DETAIL|OVERVIEW_TAB': 'BT Dashboard',
'APP_BT_LIST': 'BT List',
'APP_BT_LIST|OVERVIEW_TAB': 'BT List',
'APP_COMPARE_METRICS': 'Correlation Analysis',
'APP_COMPONENT_MANAGER': 'Tier Dashboard',
'APP_COMPONENT_MANAGER|OVERVIEW_TAB': 'Tier Dashboard',
'APP_CONFIGURATION': 'Configure',
'APP_CONFIGURATION_ACTIONS': 'Actions',
'APP_CONFIGURATION_HOME': 'Configure',
'APP_CPU_VS_LOAD': 'Scalability Analysis',
'APP_DASHBOARD': 'Application Dashboard',
'APP_DASHBOARD|OVERVIEW_TAB': 'Application Dashboard',
'APP_DATABASE_LIST': 'Databases',
'APP_ENVIRONMENT_PROPERTIES'
'APP_ERRORS': 'Errors',
'APP_ERRORS|OVERVIEW_TAB': 'Errors',
'APP_EVENTSTREAM_LIST': 'Events',
'APP_IIS_APP_POOLS': 'IIS App Pools',
'APP_INCIDENT_LIST': 'Policy Violations',
'APP_INFO_POINT_LIST': 'Information Points',
'APP_INFRA_MESSAGE_SERVERS': 'Message Servers',
'APP_INFRA_OTHER_SERVERS': 'Other Backends',
'APP_INFRASTRUCTURE': 'Servers',
'APP_NODE_MANAGER': 'Node Dashboard',
'APP_NODE_MANAGER|OVERVIEW_TAB': 'Node Dashboard',
'APP_POLICY_LIST': 'Policies',
'APP_RELEASE_ANALYSIS': 'Compare Releases',
'APP_REPORTING': 'Reports',
'APP_REQUEST_LIST': 'Requests',
'APP_RESPONSE_TIME_VS_LOAD': 'Scalability Analysis',
'APP_SCHEDULE_LIST': 'Schedules',
'APP_SERVER_HOME': 'Home',
'APP_SLOW_RESPONSE_TIMES': 'Slow Response times',
'APP_TASK_LIST': 'Tasks',
'APP_THRESHOLD_LIST': 'Thresholds',
'APP_TROUBLESHOOT_HOME': 'Troubleshooting',
'APP_WORKFLOW_EXECUTION_LIST': 'Workflow Executions',
'APP_WORKFLOW_LIST': 'Workflows',
'FLOW_ICON_MOVED': 'Flow Map Changed',
'LOGIN': 'Login Success',
'LOGIN_FAILED': 'Login Failure',
'OBJECT_CREATED': 'Object Created',
'OBJECT_DELETED': 'Object Deleted',
'OBJECT_UPDATED': 'Object Changed',
'USER_PASSWORD_CHANGED': 'Password Changed'
}
AUDIT_TABLES = {
'AGENT_CONFIGURATION': {'select_expr': "CONCAT(t.agent_type, ' ', t.entity_type)",
'display_name': 'Agent Configuration'},
'APPLICATION': {'display_name': 'Application',
'app_name_expr': 'name',
'app_id_expr': 'id'},
'APPLICATION_COMPONENT_NODE': {'display_name': 'Node',
'app_name_expr': 'NULL',
'app_id_expr': 'NULL'},
'APPLICATION_COMPONENT': {'display_name': 'Tier'},
'APPLICATION_DIAGNOSTIC_DATA': {},
'BACKEND_DISCOVERY_CONFIG': {'display_name': 'Backend Detection Config'},
'BUSINESS_TRANSACTION': {'join_to': 'abusiness_transaction',
'display_name': 'Business Transaction'},
'BUSINESS_TRANSACTION_GROUP': {'join_to': 'abusiness_transaction',
'display_name': 'Business Transaction Group'},
'CALL_GRAPH_CONFIGURATION': {'select_expr': "'CALL_GRAPH_CONFIGURATION'",
'display_name': 'Call Graph Config'},
'CUSTOM_EXIT_POINT_DEFINITION': {'display_name': 'Custom Backend Config'},
'CUSTOM_MATCH_POINT_DEFINITION': {'select_expr': "CONCAT(t.name, ' [', t.entry_point_type, ']')",
'display_name': 'Custom Entry Point'},
'DASHBOARD': {'display_name': 'Custom Dashboard'},
'DOT_NET_ERROR_CONFIGURATION': {'select_expr': "'DOT_NET_ERROR_CONFIGURATION'",
'join_to': 'dotnet_error_configuration',
'display_name': 'Error Detection Config [.NET]'},
'ERROR_CONFIGURATION': {'select_expr': "'ERROR_CONFIGURATION'",
'display_name': 'Error Detection Config'},
'EUM_CONFIGURATION': {'select_expr': "'EUM_CONFIGURATION'",
'display_name': 'EUM Config'},
# 'GLOBAL_CONFIGURATION': ('name', 'GLOBAL_CONFIGURATION'),
'HTTP_REQUEST_DATA_GATHERER_CONFIG': {'join_to': 'adata_gatherer_config',
'display_name': 'HTTP Data Collector Config'},
'JMX_CONFIG': {'join_to': 'jmx_rule'},
'MEMORY_CONFIGURATION': {'select_expr': "'MEMORY_CONFIGURATION'",
'display_name': 'Memory Monitoring Config'},
'POJO_DATA_GATHERER_CONFIG': {'join_to': 'adata_gatherer_config',
'display_name': 'Custom Method Collector Config'},
'POLICY': {'display_name': 'Policy'},
# 'RULE': ('name', 'RULE'),
'SQL_DATA_GATHERER_CONFIG': {'join_to': 'adata_gatherer_config',
'display_name': 'SQL Data Collector Config'},
'TRANSACTION_MATCH_POINT_CONFIG': {'display_name': 'Transaction Detection Config'},
'USER': {'display_name': 'User'},
}
ENTITY_TYPES = {'APPLICATION': {'display_name': 'Application',
'app_name_expr': 't.name',
'app_id_expr': 't.id'},
'APPLICATION_COMPONENT': {'display_name': 'Tier'},
'APPLICATION_COMPONENT_NODE': {'display_name': 'Node'}}
def connect(args):
conn = MySQLdb.connect(args.host, args.username, args.password, args.database, args.port,
cursorclass=DictCursor)
cur = conn.cursor()
return conn, cur
def create_temp_table(cur):
try:
sql = """
CREATE TEMPORARY TABLE audit_report (
ts_ms bigint,
account_name varchar(100),
account_id int,
user_name varchar(100),
user_security_provider_type varchar(25),
user_id int,
object_name varchar(100),
object_id int,
application_name varchar(100),
application_id int,
action varchar(100),
execution_time_ms bigint,
object_desc varchar(1000),
INDEX ts_ms (ts_ms)) ENGINE=memory DEFAULT CHARSET=utf8;
"""
cur.execute(sql)
except:
print("*** ERROR creating temporary table", file=sys.stderr)
raise
def insert_object_crud(cur, params):
try:
for table_name, table_data in AUDIT_TABLES.items():
params.update({
'table_name': table_name,
'id_field': table_data.get('id_field', 'id'),
'select_expr': table_data.get('select_expr', 'name'),
'app_name_expr': table_data.get('app_name_expr', 'NULL'),
'app_id_expr': table_data.get('app_id_expr', 'NULL'),
'join_to': table_data.get('join_to', table_name),
})
sql = """
INSERT INTO audit_report
(SELECT ca.ts_ms, ca.account_name, ca.account_id, ca.user_name, ca.user_security_provider_type,
ca.user_id, ca.object_name, ca.object_id, $app_name_expr, $app_id_expr,
ca.action, ca.execution_time_ms,
$select_expr
FROM controller_audit ca
JOIN $join_to t ON ($id_field = ca.object_id)
WHERE LOWER(ca.account_name) = LOWER('$acct')
AND LOWER(ca.object_name) = '$table_name'
AND ca.action LIKE 'OBJECT%'
AND ca.object_name NOT IN ('AGENT_CONFIGURATION')
AND ca.user_name NOT IN ($users_to_ignore)
AND ca.ts_ms BETWEEN $start_ts AND $end_ts
ORDER BY ca.ts_ms)
"""
sql = Template(sql).substitute(params)
cur.execute(sql)
except:
print("*** ERROR EXECUTING SQL: ", sql, file=sys.stderr)
raise
def insert_agent_configuration_crud(cur, params):
try:
for entity_type, entity_data in ENTITY_TYPES.items():
params.update({
'entity_type': entity_type,
'app_name_expr': entity_data.get('app_name_expr', 'NULL'),
'app_id_expr': entity_data.get('app_id_expr', 'NULL'),
'display_name': entity_data.get('display_name', entity_type)
})
sql = """
INSERT INTO audit_report
(SELECT ca.ts_ms, ca.account_name, ca.account_id, ca.user_name, ca.user_security_provider_type,
ca.user_id, ca.object_name, ca.object_id, $app_name_expr, $app_id_expr, ca.action, ca.execution_time_ms,
CONCAT('$display_name', ' ', t.name) AS display_name
FROM controller_audit ca
JOIN agent_configuration ac ON (ac.id = ca.object_id)
JOIN $entity_type t ON (t.id = ac.entity_id)
WHERE LOWER(ca.account_name) = LOWER('$acct')
AND LOWER(ca.object_name) = 'AGENT_CONFIGURATION'
AND ca.action LIKE 'OBJECT%%'
AND ca.user_name NOT IN ($users_to_ignore)
AND ca.ts_ms BETWEEN $start_ts AND $end_ts
ORDER BY ca.ts_ms)
"""
sql = Template(sql).substitute(params)
cur.execute(sql)
except:
print("*** ERROR EXECUTING SQL: ", sql, file=sys.stderr)
raise
def insert_logins(cur, params):
try:
sql = """
INSERT INTO audit_report
(SELECT ca.ts_ms, ca.account_name, ca.account_id, ca.user_name, ca.user_security_provider_type,
ca.user_id, ca.object_name, ca.object_id, NULL, NULL, ca.action, ca.execution_time_ms,
NULL AS display_name
FROM controller_audit ca
WHERE LOWER(ca.account_name) = LOWER('$acct')
AND ca.action IN ($login_types)
AND ca.user_name NOT IN ($users_to_ignore)
AND ca.ts_ms BETWEEN $start_ts AND $end_ts
ORDER BY ca.ts_ms)
"""
sql = Template(sql).substitute(params)
cur.execute(sql)
except:
print("*** ERROR EXECUTING SQL: ", sql, file=sys.stderr)
raise
def select_results(cur):
sql = """
SELECT ts_ms, account_name, account_id, user_name, user_security_provider_type,
user_id, object_name, object_id, application_id, action, execution_time_ms, object_desc
FROM audit_report
ORDER BY ts_ms
"""
cur.execute(sql)
def generate_output(args, params):
env = Environment(loader=FileSystemLoader(['templates', 'appd/reports/templates', '.']))
template = env.get_template('audit.jinja.html')
return template.render(params)
def populate_params(args):
end_time = datetime.now().replace(hour=0, minute=0, second=0, microsecond=0)
start_time = end_time + timedelta(-args.days)
login_types = ['LOGIN_FAILED']
if not args.ignore_logins:
login_types.append('LOGIN')
return {
'end_time': end_time,
'start_time': start_time,
'start_ts': to_ts(start_time),
'end_ts': to_ts(end_time),
'acct': args.account,
'users_to_ignore': ', '.join(["'" + x + "'" for x in args.ignore_users]),
'login_types': ', '.join(["'" + x + "'" for x in login_types])
}
def send_html_email(args, html):
msg = MIMEMultipart('alternative')
msg['Subject'] = args.subject
msg['From'] = args.sender
msg['To'] = ', '.join(args.to)
part1 = MIMEText('Please use an HTML email client to view this message.', 'plain')
part2 = MIMEText(html, 'html', 'utf-8')
msg.attach(part1)
msg.attach(part2)
s = smtplib.SMTP(args.smtphost)
if args.verbose:
s.set_debuglevel(1)
s.sendmail(args.sender, args.to, msg.as_string())
s.close()
if __name__ == '__main__':
args = parse_argv()
params = populate_params(args)
(conn, cur) = connect(args)
create_temp_table(cur)
insert_object_crud(cur, params)
insert_agent_configuration_crud(cur, params)
insert_logins(cur, params)
select_results(cur)
start_time = params['start_time']
end_time = params['end_time']
html = generate_output(args, locals())
conn.close()
if args.outfile:
with open(args.outfile, 'w') as f:
f.write(html)
else:
send_html_email(args, html)
|
AppDynamicsRESTx
|
/AppDynamicsRESTx-0.4.22.tar.gz/AppDynamicsRESTx-0.4.22/examples/audit.py
|
audit.py
|
from __future__ import print_function
from datetime import datetime
import itertools
from appd.cmdline import parse_argv
from appd.request import AppDynamicsClient
__author__ = 'Todd Radel'
__copyright__ = 'Copyright (c) 2013-2015 AppDynamics Inc.'
def incr(d, name, amt=1):
d[name] = d.get(name, 0) + amt
args = parse_argv()
c = AppDynamicsClient(args.url, args.username, args.password, args.account, args.verbose)
nodes = []
for app in c.get_applications():
for node in c.get_nodes(app.id):
if node.has_machine_agent or node.has_app_agent:
if node.has_app_agent:
if 'PHP' in node.type:
node.group_type = 'PHP App Agent'
if 'IIS' in node.type:
node.group_type = '.NET App Agent'
else:
node.group_type = 'Java App Agent'
else:
node.group_type = 'Machine Agent only'
node.app = app
nodes.append(node)
# Sort and group the nodes by machine_id.
group_func = lambda x: x.machine_id
nodes.sort(key=group_func)
host_counts = dict()
node_counts = dict()
lic_counts = dict()
for machine_id, nodes_on_machine_iter in itertools.groupby(nodes, key=group_func):
nodes_on_machine = list(nodes_on_machine_iter)
first_node = nodes_on_machine[0]
agent_type = first_node.group_type
app_name = first_node.app.name
all_same = all(x.group_type == agent_type for x in nodes_on_machine)
# assert all_same, first_node
all_same = all(x.app.name == app_name for x in nodes_on_machine)
# assert all_same, first_node
license_count = 1
if 'Java' in agent_type:
license_count = len(nodes_on_machine)
incr(lic_counts, app_name, license_count)
incr(host_counts, app_name, 1)
incr(node_counts, app_name, len(nodes_on_machine))
# Print the results.
tot_nodes, tot_hosts, tot_licenses = (0, 0, 0)
header_fmt = '%-30s %-15s %-15s %s'
data_fmt = '%-30s %15d %15d %15d'
print()
print('License usage report for ', args.url)
print('Generated at: ', datetime.now())
print()
print(header_fmt % ('App Name', 'Node Count', 'Host Count', 'License Count'))
print(header_fmt % ('=' * 30, '=' * 15, '=' * 15, '=' * 15))
for node_type in sorted(node_counts.keys()):
node_count = node_counts.get(node_type, 0)
host_count = host_counts.get(node_type, 0)
lic_count = lic_counts.get(node_type, 0)
tot_nodes += node_count
tot_hosts += host_count
tot_licenses += lic_count
print(data_fmt % (node_type, node_count, host_count, lic_count))
print(header_fmt % ('=' * 30, '=' * 15, '=' * 15, '=' * 15))
print(data_fmt % ('TOTAL', tot_nodes, tot_hosts, tot_licenses))
|
AppDynamicsRESTx
|
/AppDynamicsRESTx-0.4.22.tar.gz/AppDynamicsRESTx-0.4.22/examples/license_count_by_app.py
|
license_count_by_app.py
|
from __future__ import print_function
from collections import defaultdict
from datetime import datetime
from time import mktime
from lxml.builder import ElementMaker
from lxml import etree
import tzlocal
from appd.cmdline import parse_argv
from appd.request import AppDynamicsClient
__author__ = 'Todd Radel'
__copyright__ = 'Copyright (c) 2013-2015 AppDynamics Inc.'
# The report will generate data for the 24-hour period before midnight of the current day. To change the
# reporting period, adjust these variables.
time_in_mins = 1440
end_time = datetime.now().replace(hour=0, minute=0, second=0, microsecond=0)
end_epoch = int(mktime(end_time.timetuple())) * 1000
# Helper functions
def now_rfc3339():
return datetime.now(tzlocal.get_localzone()).isoformat('T')
# Parse command line arguments and create AD client:
args = parse_argv()
c = AppDynamicsClient(args.url, args.username, args.password, args.account, args.verbose)
# Get the list of configured apps, and get backend metrics for each one:
METRIC_MAP = {'Average Block Time (ms)': 'abt',
'Average CPU Used (ms)': 'cpu',
'Average Request Size': 'req_size',
'Average Response Time (ms)': 'art',
'Average Wait Time (ms)': 'wait_time',
'Calls per Minute': 'cpm',
'End User Average Response Time (ms)': 'eum_art',
'End User Network Average Response Time (ms)': 'eum_net',
'End User Page Render Average Response Time (ms)': 'eum_render',
'Errors per Minute': 'epm',
'Normal Average Response Time (ms)': 'norm_art',
'Number of Slow Calls': 'slow',
'Number of Very Slow Calls': 'veryslow',
'Stall Count': 'stalls'}
empty_row = dict([(x, 0) for x in list(METRIC_MAP.values())])
rows = defaultdict(dict)
for app in c.get_applications():
bt_list = c.get_bt_list(app.id)
for md in c.get_metrics('Business Transaction Performance|Business Transactions|*|*|*',
app.id, time_range_type='BEFORE_TIME', end_time=end_epoch,
duration_in_mins=time_in_mins, rollup=True):
# Get the last 3 components of the metric path. This should be 'tier_name|bt_name|metric_name'.
tier_name, bt_name, metric_name = md.path.split('|')[-3:]
tier_bts = bt_list.by_tier_and_name(bt_name, tier_name)
if tier_bts:
bt = tier_bts[0]
if len(md.values) > 0 and metric_name in METRIC_MAP:
key = (tier_name, bt_name)
rows.setdefault(key, empty_row.copy()).update({'app_id': app.id,
'app_name': app.name,
'bt_id': bt.id,
'bt_name': bt.name,
'tier_name': bt.tier_name,
'type': bt.type,
METRIC_MAP[metric_name]: md.values[0].value})
# Generate the report.
XSI = 'http://www.w3.org/2001/XMLSchema-instance'
E = ElementMaker(nsmap={'xsi': XSI})
root = E.BusinessTransactions(Controller=c.base_url, GenerationTime=now_rfc3339())
root.set('{%s}noNamespaceSchemaLocation' % XSI, 'bt_metrics.xsd')
for k, v in sorted(rows.items()):
v['calls'] = v['cpm'] * time_in_mins
v['errors'] = v['epm'] * time_in_mins
v['error_pct'] = round(float(v['errors']) / float(v['calls']) * 100.0, 1) if v['calls'] > 0 else 0
root.append(E.BusinessTransaction(
E.ApplicationName(v['app_name']),
E.BusinessTransactionName(v['bt_name']),
E.TierName(v['tier_name']),
E.AverageResponseTime(str(v['art'])),
E.CallsPerMinute(str(v['cpm'])),
E.TotalCalls(str(v['calls'])),
E.TotalErrors(str(v['errors'])),
E.ErrorsPerMinute(str(v['epm'])),
E.ErrorPercentage(str(v['error_pct'])),
E.SlowCalls(str(v['slow'])),
E.VerySlowCalls(str(v['veryslow'])),
E.Stalls(str(v['stalls'])),
))
# Print the report to stdout.
print(etree.ProcessingInstruction('xml', 'version="1.0" encoding="UTF-8"'))
print(etree.tostring(root, pretty_print=True, encoding='UTF-8'))
|
AppDynamicsRESTx
|
/AppDynamicsRESTx-0.4.22.tar.gz/AppDynamicsRESTx-0.4.22/examples/bt_metrics.py
|
bt_metrics.py
|
from __future__ import print_function
import sys
import json
import requests
from datetime import datetime
from urllib import parse
from appd.model.account import *
from appd.model.application import *
from appd.model.config_variable import *
from appd.model.license_module import *
from appd.model.hourly_license_usage import *
from appd.model.license_usage import *
from appd.model.tier import *
from appd.model.backend import *
from appd.model.metric_treenode import *
from appd.model.business_transaction import *
from appd.model.policy_violation import *
from appd.model.snapshot import *
from appd.model.metric_data import *
from appd.model.node import *
from appd.model.set_controller_url import *
from appd.model.event import *
from appd.model.action_suppressions import *
from appd.model.audit_history import *
class AppDynamicsClient(object):
"""
Main class that wraps around the REST API to make it easier to send requests
and parse responses.
"""
TIME_RANGE_TYPES = ('BEFORE_NOW', 'BEFORE_TIME', 'AFTER_TIME', 'BETWEEN_TIMES')
DEEP_DIVE_POLICIES = ('SLA_FAILURE', 'TIME_SAMPLING', 'ERROR_SAMPLING', 'OCCURRENCE_SAMPLING',
'ON_DEMAND', 'HOTSPOT', 'HOTSPOT_LEARN', 'APPLICATION_STARTUP', 'DIAGNOSTIC_SESSION',
'SLOW_DIAGNOSTIC_SESSION', 'ERROR_DIAGNOSTIC_SESSION', 'POLICY_FAILURE_DIAGNOSTIC_SESSION',
'INFLIGHT_SLOW_SESSION')
USER_EXPERIENCES = ('NORMAL', 'SLOW', 'VERY_SLOW', 'STALL', 'BUSINESS_ERROR')
COLLECTOR_TYPES = ('ERROR_IDS', 'STACK_TRACES', 'ERROR_DETAIL', 'HTTP_PARAMETER', 'BUSINESS_DATA',
'COOKIE', 'HTTP_HEADER', 'SESSION_KEY', 'RESPONSE_HEADER', 'LOG_MESSAGE',
'TRANSACTION_PROPERTY', 'TRANSACTION_EVENT', 'DOTNET_PROPS', 'DOTNET_SET')
SNAPSHOT_REQUEST_PARAMS = ('guids', 'archived', 'deep-dive-policy', 'application-component-ids',
'application-component-node-ids', 'business-transaction-ids', 'user-experience',
'first-in-chain', 'need-props', 'need-exit-calls', 'execution-time-in-millis',
'session-id', 'user-principal-id', 'error-ids', 'error-occurred',
'bad-request', 'diagnostic-snapshot', 'diagnostic-session-guid',
'starting-request-id', 'ending-request-id',
'data-collector-name', 'data-collector-type', 'data-collector-value')
SNAPSHOT_REQUEST_LISTS = ('business-transaction-ids', 'user-experience', 'error-ids', 'guids', 'deep-dive-policy',
'application-component-ids', 'application-component-node-ids', 'diagnostic-session-guid')
CPM = CALLS_PER_MINUTE = 'Calls per Minute'
ART = AVERAGE_RESPONSE_TIME = 'Average Response Time (ms)'
NORMAL_ART = 'Normal Average Response Time (ms)'
SLOW_CALLS = 'Number of Slow Calls'
VERY_SLOW_CALLS = 'Number of Very Slow Calls'
EPM = ERRORS_PER_MINUTE = 'Errors per Minute'
EXCEPTIONS_PER_MINUTE = 'Exceptions per Minute'
STALLS = 'Stall Count'
def __init__(self, base_url='http://localhost:8090', username='user1', password='welcome',
account='customer1', debug=False, verify=True):
"""
Creates a new instance of the client.
:param base_url: URL of your controller.
:type base_url: str.
:param username: User name to authenticate to the controller with.
:type username: str.
:param password: Password for authentication to the controller.
:type password: str.
:param account: Account name for multi-tenant controllers. For single-tenant controllers, use
the default value of "customer1".
:param debug: Set to :const:`True` to print extra debugging information to :const:`sys.stdout`.
:type debug: bool.
"""
self._username, self._password, self._account, self._app_id, self._session = '', '', '', None, None
self._base_url = ''
(self.base_url, self.username, self.password, self.account, self.debug, self.verify) = (base_url, username, password,
account, debug, verify)
@property
def base_url(self):
return self._base_url
@base_url.setter
def base_url(self, new_url):
self._base_url = new_url
if '://' not in self._base_url:
self._base_url = 'http://' + self._base_url
while self._base_url.endswith('/'):
self._base_url = self._base_url[:-1]
def __make_auth(self):
self._auth = (self._username + '@' + self._account, self._password)
@property
def username(self):
return self._username
@username.setter
def username(self, new_username):
self._username = new_username
self.__make_auth()
@property
def password(self):
return self._password
@password.setter
def password(self, new_password):
self._password = new_password
self.__make_auth()
@property
def account(self):
return self._account
@account.setter
def account(self, new_account):
self._account = new_account
self.__make_auth()
@property
def app_id(self):
return self._app_id
@app_id.setter
def app_id(self, new_app_id):
self._app_id = new_app_id
def _get_session(self):
if not self._session:
from requests.sessions import Session
self._session = Session()
self._session.verify = self.verify
return self._session
def _request(self, **request_args):
s = self._get_session()
req = requests.Request(**request_args)
prepped = s.prepare_request(req)
# Merge environment settings into session
settings = s.merge_environment_settings(prepped.url, {}, None, None, None)
return s.send(prepped, **settings)
def upload(self, path, data, filename='default'):
"""
Workaround a bug between requests file upload boundary format
and the expected format at the server
"""
import pycurl
import base64
try:
from io import BytesIO
except ImportError:
from StringIO import StringIO as BytesIO
if not path.startswith('/'):
path = '/' + path
url = self._base_url + path
c = pycurl.Curl()
b = BytesIO()
c.setopt(c.SSL_VERIFYPEER, int(self.verify))
c.setopt(c.SSL_VERIFYHOST, int(self.verify))
c.setopt(c.WRITEDATA, b)
c.setopt(c.URL, url)
auth = base64.encodestring('%s:%s' % self._auth).replace('\n', '')
c.setopt(c.HTTPHEADER, [
'Authorization: Basic {0}'.format(auth)
])
c.setopt(c.HTTPPOST, [
('fileupload', (
c.FORM_BUFFER, filename,
c.FORM_BUFFERPTR, data,
)),
])
c.perform()
c.close()
return b.getvalue()
def request(self, path, params=None, method='GET', use_json=True, query=True, headers=None):
if not path.startswith('/'):
path = '/' + path
url = self._base_url + path
params = params or {}
if use_json and method == 'GET':
params['output'] = 'JSON'
for k in list(params.keys()):
if params[k] is None:
del params[k]
if self.debug:
print('Retrieving ' + url, self._auth, params)
if method == 'GET' or query:
r = self._request(method=method, url=url, auth=self._auth, params=params, headers=headers)
else:
if not headers:
headers = {'Content-type': 'application/json', 'Accept': 'text/plain'}
r = self._request(method=method, url=url, auth=self._auth, data=json.dumps(params), headers=headers)
if r.status_code != requests.codes.ok:
print(url, file=sys.stderr)
r.raise_for_status()
return r.json() if use_json else r.text
def _app_path(self, app_id, path=None):
app_id = app_id if isinstance(app_id, int) or isinstance(app_id, str) else self._app_id
if not app_id:
raise ValueError('application id is required')
path = '/controller/rest/applications/%s' % parse.quote(str(app_id)) + (path or '')
return path
def get_metric_tree(self, app_id=None, metric_path=None, recurse=False):
"""
Retrieves a list of available metrics.
:param int app_id: Application ID to retrieve metrics for. If :const:`None`, the value stored in the
`app_id` property will be used.
:param str metric_path: Point in the metric tree at which you want to retrieve the available metrics.
If :const:`None`, start at the root.
:param bool recurse: If :const:`True`, retrieve the entire tree from :data:`metric_path` on down.
:returns: An object containing the metrics under this point in the tree.
:rtype: appd.model.MetricTreeNodes
"""
parent = None
if metric_path is not None:
parent = MetricTreeNode(parent=None, node_name=metric_path, node_type='folder')
return self._get_metric_tree(app_id, parent=parent, recurse=recurse)
def _get_metric_tree(self, app_id=None, parent=None, recurse=False):
params = {}
if parent is not None:
params['metric-path'] = parent.path
path = self._app_path(app_id, '/metrics')
nodes = MetricTreeNodes.from_json(self.request(path, params), parent)
if recurse:
for node in nodes:
if node.type == 'folder':
self._get_metric_tree(app_id, parent=node, recurse=True)
return nodes
# Top-level requests
def _top_request(self, cls, path, params=None, method='GET', query=True):
return cls.from_json(self.request('/controller/rest' + path, params, method, query=query))
def set_controller_url(self, controllerURL):
"""
Sets the base url for all email notification links
:param str controller_url: Base URL to use for email notification links
:returns: Success message
"""
param = {'controllerURL': controllerURL}
return self._top_request(SetControllerUrlResponse,
'/accounts/%s/update-controller-url' % self.account, param, 'POST', query=False)
def get_audit_history(self, start_time, end_time, time_zone_id=None):
"""
Gathers audit history for up to a 24h time period
:param str start_time: Start time in the format: "yyyy-MM-dd'T'HH:mm:ss.SSSZ"
:param str end_time: End time in the format: "yyyy-MM-dd'T'HH:mm:ss.SSSZ"
:param str time_zone_id: Optional, time zone in tz format
:return: A :class:`AuditHistory <appd.model.AuditHistory>` object, representing a collection of audits.
:rtype: appd.model.AuditHistory
"""
params = {
'startTime': start_time,
'endTime': end_time
}
if (time_zone_id):
params.update({'timeZoneId': time_zone_id})
return AuditHistory.from_json(self.request('/controller/ControllerAuditHistory',
params, 'GET', query=True, use_json=True))
def create_user(self,
user_name,
user_display_name,
user_email,
user_password,
user_roles=None):
"""
Creates a user
:param str user_name: User's name (required)
:param str user_display_name: How the user's name will show up in the UI (required)
:param str user_email: User's email (required)
:param str user_roles: Comma-separated list of roles
https://docs.appdynamics.com/display/PRO42/Roles+and+Permissions#RolesandPermissions-PredefinedRoles
:param str user_password: Required on create, not on update
:returns: Nothing on success, exception with HTTP error code on failure
"""
params = {'user-name': user_name,
'user-display-name': user_display_name,
'user-email': user_email}
if (user_roles):
params.update({'user-roles': user_roles})
if (user_password):
params.update({'user-password': user_password})
return self.request('/controller/rest/users', params, 'POST', query=True, use_json=False)
def export_health_rules(self, application_id, name=None):
"""
Exports all health rules from the given app, in XML format
:param int application_id: Application ID
:param string name: Name of a particular health rule to export,
if omitted, all health rules will be exported
:returns: JSON string
"""
params = {}
if (name):
params.update({'name': name})
return self.request('/controller/healthrules/{0}'.format(application_id),
params, 'GET', query=True, use_json=False)
def import_health_rules(self, application_id, xml, overwrite=False):
"""
Imports all health rules into the given app, from XML format
:param int application_id: Application ID
:param string xml: Output of export_health_rules
:returns: Plain text string, containing success or failure messages
"""
params = {}
if (overwrite):
params.update({'overwrite': overwrite})
path = '/controller/healthrules/{0}'.format(application_id)
if not path.startswith('/'):
path = '/' + path
url = self._base_url + path
files = {'file': ('healthrules.xml', xml)}
r = self._request(method='POST', url=url, auth=self._auth, params=params, files=files)
return r.text
def export_entry_point_type(self, application_id, entry_point_type, rule_type, tier=None, rule_name=None):
"""
Exports one entry point type from the given app, in XML format
:param int application_id: Application ID
:param string entry_point_type: type of entry point, see AppDynamics docs
:param string rule_type: one of auto, custom, or exclude
:param string tier: name of the tier to export from, optional
:returns: XML string
"""
url = '/controller/transactiondetection/{0}/'.format(application_id)
if (tier):
url = url + tier + '/'
url = url + rule_type + '/'
url = url + entry_point_type
if (rule_name):
url = url + '/' + rule_name
return self.request(url, {}, 'GET', query=True, use_json=False)
def import_entry_point_type(self, application_id, entry_point_type, rule_type, xml, tier=None, rule_name=None):
"""
Imports all entry points into the given app, from XML format
:param int application_id: Application ID
:param string rule_type: one of auto, custom, or exclude
:param string xml: Output of export_entry_points
:param string tier: name of the tier to export from, optional
:returns: JSON string, containing success or failure messages
"""
path = '/controller/transactiondetection/{0}/'.format(application_id)
if (tier):
path = path + tier + '/'
path = path + rule_type + '/'
path = path + entry_point_type
if (rule_name):
path = path + '/' + rule_name
if not path.startswith('/'):
path = '/' + path
url = self._base_url + path
files = {'file': ('entrypoints.xml', xml)}
r = self._request(method='POST', url=url, auth=self._auth, files=files)
return r.text
def export_entry_points(self, application_id, rule_type, tier=None):
"""
Exports all entry points from the given app, in XML format
:param int application_id: Application ID
:param string rule_type: one of auto, custom, or exclude
:param string tier: name of the tier to export from, optional
:returns: XML string
"""
url = '/controller/transactiondetection/{0}/'.format(application_id)
if (tier):
url = url + tier + '/'
url = url + rule_type
return self.request(url, {}, 'GET', query=True, use_json=False)
def import_entry_points(self, application_id, rule_type, xml, tier=None):
"""
Imports all entry points into the given app, from XML format
:param int application_id: Application ID
:param string rule_type: one of auto, custom, or exclude
:param string xml: Output of export_entry_points
:param string tier: name of the tier to export from, optional
:returns: JSON string, containing success or failure messages
"""
path = '/controller/transactiondetection/{0}/'.format(application_id)
if (tier):
path = path + tier + '/'
path = path + rule_type
if not path.startswith('/'):
path = '/' + path
url = self._base_url + path
files = {'file': ('entrypoints.xml', xml)}
r = self._request(method='POST', url=url, auth=self._auth, files=files)
return r.text
def export_analytics_dynamic_service_configs(self, application_id):
"""
Exports all analytics dynamics service configs from the given app, in XML format
Only available on controllers 4.3+
:param int application_id: Application ID
:returns: JSON string
"""
return self.request('/controller/analyticsdynamicservice/{0}'.format(application_id),
{}, 'GET', query=True, use_json=False)
def import_analytics_dynamic_service_configs(self, application_id, xml):
"""
Imports all actions into the given app, from JSON format
Only available on controllers 4.3+
:param int application_id: Application ID
:param string xml: Output of export_analytics_dynamic_service_configs
:returns: JSON string, containing success or failure messages
"""
path = '/controller/analyticsdynamicservice/{0}'.format(application_id)
if not path.startswith('/'):
path = '/' + path
url = self._base_url + path
files = {'file': ('actions.xml', xml)}
r = self._request(method='POST', url=url, auth=self._auth, files=files)
return r.text
def export_custom_dashboard(self, dashboard_id):
"""
Exports custom dashboard with the given ID, in JSON format
:param int dashboard_id: custom dashboard ID
:returns: JSON string
"""
params = {}
if (dashboard_id):
params.update({'dashboardId': dashboard_id})
return self.request('/controller/CustomDashboardImportExportServlet', params, 'GET', query=True, use_json=False)
def import_custom_dashboard(self, json):
"""
Imports custom dashboard, from JSON format
:param string json: Output of export_custom_dashboard
:returns: JSON string, containing success or failure messages
"""
path = '/controller/CustomDashboardImportExportServlet'
return self.upload(path, json)
def export_policies(self, application_id):
"""
Exports all policies from the given app, in JSON format
:param int application_id: Application ID
:returns: JSON string
"""
return self.request('/controller/actions/{0}'.format(application_id), {}, 'GET', query=True, use_json=False)
def import_policies(self, application_id, json):
"""
Imports all policies into the given app, from JSON format
:param int application_id: Application ID
:param string json: Output of export_policies
:returns: JSON string, containing success or failure messages
"""
path = '/controller/actions/{0}'.format(application_id)
return self.upload(path, json)
def export_actions(self, application_id):
"""
Exports all actions from the given app, in JSON format
:param int application_id: Application ID
:returns: JSON string
"""
return self.request('/controller/actions/{0}'.format(application_id), {}, 'GET', query=True, use_json=False)
def import_actions(self, application_id, json):
"""
Imports all actions into the given app, from JSON format
:param int application_id: Application ID
:param string json: Output of export_actions
:returns: JSON string, containing success or failure messages
"""
path = '/controller/actions/{0}'.format(application_id)
if not path.startswith('/'):
path = '/' + path
url = self._base_url + path
files = {'file': ('actions.json', json)}
r = self._request(method='POST', url=url, auth=self._auth, files=files)
return r.text
def export_email_action_templates(self):
"""
Exports all email action templates from the controller, in JSON format
:returns: JSON string
"""
return self.request('/controller/actiontemplate/email', {}, 'GET', query=True, use_json=False)
def import_email_action_templates(self, json):
"""
Imports all email action templates into the controller, from JSON format
:param string json: Output of export_email_action_templates
:returns: JSON string, containing success or failure messages
"""
path = '/controller/actiontemplate/email'
if not path.startswith('/'):
path = '/' + path
url = self._base_url + path
files = {'file': ('actions.json', json)}
r = self._request(method='POST', url=url, auth=self._auth, files=files)
return r.text
def export_httprequest_action_templates(self):
"""
Exports all httprequest action templates from the controller, in JSON format
:returns: JSON string
"""
return self.request('/controller/actiontemplate/httprequest', {}, 'GET', query=True, use_json=False)
def import_httprequest_action_templates(self, json):
"""
Imports all httprequest action templates into the controller, from JSON format
:param string json: Output of export_httprequest_action_templates
:returns: JSON string, containing success or failure messages
"""
path = '/controller/actiontemplate/httprequest'
if not path.startswith('/'):
path = '/' + path
url = self._base_url + path
files = {'file': ('actions.json', json)}
r = self._request(method='POST', url=url, auth=self._auth, files=files)
return r.text
def get_config(self):
"""
Retrieve the controller configuration.
:returns: Configuration variables.
:rtype: appd.model.ConfigVariables
"""
return self._top_request(ConfigVariables, '/configuration')
def set_config(self, name, value):
"""
Set a controller configuration value.
:returns: Configuration variables.
:rtype: appd.model.ConfigVariables
"""
param = {'name': name, 'value': value}
return self._top_request(ConfigVariables, '/configuration', param, 'POST', query=True)
def get_applications(self):
"""
Get a list of all business applications.
:returns: List of applications visible to the user.
:rtype: appd.model.Applications
"""
return self._top_request(Applications, '/applications')
def mark_nodes_historical(self, nodes):
"""
Mark nodes historical
:param str nodes: CSV of node IDs
:returns: Nothing on success, exception with HTTP error code on failure
"""
params = {'application-component-node-ids': nodes}
return self.request('/controller/rest/mark-nodes-historical', params, 'POST', query=True, use_json=False)
# Application-level requests
def _app_request(self, cls, path, app_id=None, params=None, method='GET', query=True, use_json=True):
path = self._app_path(app_id, path)
return cls.from_json(self.request(path, params, method=method, query=query, use_json=use_json))
def get_bt_list(self, app_id=None, excluded=False):
"""
Get the list of all registered business transactions in an application.
:param int app_id: Application ID to retrieve the BT list for. If :const:`None`, the value stored in the
`app_id` property will be used.
:param bool excluded: If True, the function will return BT's that have been excluded in the AppDynamics
UI. If False, the function will return all BT's that have not been excluded. The default is False.
:returns: The list of registered business transactions.
:rtype: appd.model.BusinessTransactions
"""
return self._app_request(BusinessTransactions, '/business-transactions', app_id, {'exclude': excluded})
def get_backends(self, app_id=None):
"""
Get the list of all backends in an application.
:param int app_id: Application ID to retrieve backends for. If :const:`None`, the value stored in the
`app_id` property will be used.
:return: A :class:`Backends <appd.model.Backends>` object, representing a collection of backends.
:rtype: appd.model.Backends
"""
return self._app_request(Backends, '/backends', app_id)
def get_tiers(self, app_id=None):
"""
Get the list of all configured tiers in an application.
:param int app_id: Application ID to retrieve tiers for. If :const:`None`, the value stored in the
`app_id` property will be used.
:return: A :class:`Tiers <appd.model.Tiers>` object, representing a collection of tiers.
:rtype: appd.model.Tiers
"""
return self._app_request(Tiers, '/tiers', app_id)
def get_nodes(self, app_id=None, tier_id=None):
"""
Retrieves the list of nodes in the application, optionally filtered by tier.
:param int app_id: Application ID to retrieve nodes for. If :const:`None`, the value stored in the
`app_id` property will be used.
:param int tier_id: If set, retrieve only the nodes belonging to the specified tier. If :const:`None`,
retrieve all nodes in the application.
:return: A :class:`Nodes <appd.model.Nodes>` object, representing a collection of nodes.
:rtype: appd.model.Nodes
"""
path = ('/tiers/%s/nodes' % tier_id) if tier_id else '/nodes'
return self._app_request(Nodes, path, app_id)
def get_node(self, node_id, app_id=None):
"""
Retrieves details about a single node.
:param node_id: ID or name of the node to retrieve.
:param app_id: Application ID to search for the node.
:return: A single Node object.
:rtype: appd.model.Node
"""
return self._app_request(Node, '/nodes/%s' % node_id, app_id)
def _validate_time_range(self, time_range_type, duration_in_mins, start_time, end_time):
"""
Validates the combination of parameters used to specify a time range in AppDynamics.
:param str time_range_type: type of time range to search
:param int duration_in_mins: duration to search, in minutes
:param long start_time: starting time
:param long end_time: ending time
:returns: parameters to be sent to controller
:rtype: dict
"""
if time_range_type and time_range_type not in self.TIME_RANGE_TYPES:
raise ValueError('time_range_time must be one of: ' + ', '.join(self.TIME_RANGE_TYPES))
elif time_range_type == 'BEFORE_NOW' and not duration_in_mins:
raise ValueError('when using BEFORE_NOW, you must specify duration_in_mins')
elif time_range_type == 'BEFORE_TIME' and (not end_time or not duration_in_mins):
raise ValueError('when using BEFORE_TIME, you must specify duration_in_mins and end_time')
elif time_range_type == 'AFTER_TIME' and (not start_time or not duration_in_mins):
raise ValueError('when using AFTER_TIME, you must specify duration_in_mins and start_time')
elif time_range_type == 'BETWEEN_TIMES' and (not start_time or not end_time):
raise ValueError('when using BETWEEN_TIMES, you must specify start_time and end_time')
return {'time-range-type': time_range_type,
'duration-in-mins': duration_in_mins,
'start-time': start_time,
'end-time': end_time}
def get_metrics(self, metric_path, app_id=None, time_range_type='BEFORE_NOW',
duration_in_mins=15, start_time=None, end_time=None, rollup=True):
"""
Retrieves metric data.
:param str metric_path: Full metric path of the metric(s) to be retrieved. Wildcards are supported.
See :ref:`metric-paths` for details.
:param int app_id: Application ID to retrieve nodes for. If :const:`None`, the value stored in the
`app_id` property will be used.
:param str time_range_type: Must be one of :const:`BEFORE_NOW`, :const:`BEFORE_TIME`,
:const:`AFTER_TIME`, or :const:`BETWEEN_TIMES`.
See :ref:`time-range-types` for a full explanation.
:param int duration_in_mins: Number of minutes before now. Only valid if the
:attr:`time_range_type` is :const:`BEFORE_NOW`.
:param long start_time: Start time, expressed in milliseconds since epoch. Only valid if the
:attr:`time_range_type` is :const:`AFTER_TIME` or :const:`BETWEEN_TIMES`.
:param long end_time: End time, expressed in milliseconds since epoch. Only valid if the
:attr:`time_range_type` is :const:`BEFORE_TIME` or :const:`BETWEEN_TIMES`.
:param bool rollup: If :const:`False`, return individual data points for each time slice in
the given time range. If :const:`True`, aggregates the data and returns a single data point.
:returns: A list of metric values.
:rtype: appd.model.MetricData
"""
params = self._validate_time_range(time_range_type, duration_in_mins, start_time, end_time)
params.update({'metric-path': metric_path,
'rollup': rollup,
'output': 'JSON'})
return self._app_request(MetricData, '/metric-data', app_id, params)
def get_events(self, app_id=None, event_types='ALL', severities='ERROR', time_range_type='BEFORE_NOW',
duration_in_mins=15, start_time=None, end_time=None):
"""
Retrieves events.
:param int app_id: Application ID to retrieve nodes for. If :const:`None`, the value stored in the
`app_id` property will be used.
:param str event_types: CSV of the events types to search on
:param str severities: CSV of the severities to search on
:param str time_range_type: Must be one of :const:`BEFORE_NOW`, :const:`BEFORE_TIME`,
:const:`AFTER_TIME`, or :const:`BETWEEN_TIMES`.
See :ref:`time-range-types` for a full explanation.
:param int duration_in_mins: Number of minutes before now. Only valid if the
:attr:`time_range_type` is :const:`BEFORE_NOW`.
:param long start_time: Start time, expressed in milliseconds since epoch. Only valid if the
:attr:`time_range_type` is :const:`AFTER_TIME` or :const:`BETWEEN_TIMES`.
:param long end_time: End time, expressed in milliseconds since epoch. Only valid if the
:attr:`time_range_type` is :const:`BEFORE_TIME` or :const:`BETWEEN_TIMES`.
:param bool rollup: If :const:`False`, return individual data points for each time slice in
the given time range. If :const:`True`, aggregates the data and returns a single data point.
:returns: A list of events for the specified app, filtered by the event type and severities desired.
:rtype: appd.model.Events
"""
params = self._validate_time_range(time_range_type, duration_in_mins, start_time, end_time)
params.update({'event-types': event_types,
'severities': severities,
'output': 'JSON'})
return self._app_request(Events, '/events', app_id, params)
def create_event(self, app_id=None, summary='', comment='', eventtype='APPLICATION_CONFIG_CHANGE',
severity='INFO', customeventtype=None, node=None, tier=None, bt=None):
"""
Creates an event.
:param int app_id: Application ID to retrieve nodes for. If :const:`None`, the value stored in the
`app_id` property will be used.
:param str summary: Summary of the event
:param str comment: Comment about the event
:param str eventtype: Event type
:param str severity: Severity of event, one of INFO, WARN, ERROR
:returns: Message containing the event ID if successful
"""
# TODO: support array of properties
params = {'summary': summary,
'comment': comment,
'eventtype': eventtype,
'severity': severity}
if (eventtype == "CUSTOM"):
params.update({'customeventtype': customeventtype,
'node': node,
'tier': tier,
'bt': bt})
path = self._app_path(app_id, '/events')
return self.request(path, params, method='POST', query=True, use_json=False)
def exclude_bt_list(self, app_id, bts=[], exclude=True):
xml = '<business-transactions>'
for bt_id in bts:
xml = xml + '<business-transaction>'
xml = xml + '<id>{0}</id>'.format(bt_id)
xml = xml + '</business-transaction>'
xml = xml + '</business-transactions>'
params = {}
if (exclude):
params.update({'exclude': 'true'})
else:
params.update({'exclude': 'false'})
path = self._app_path(app_id, '/business-transactions')
if not path.startswith('/'):
path = '/' + path
url = self._base_url + path
headers = {'Content-type': 'text/xml', 'Accept': 'text/plain'}
r = self._request(method='POST', url=url, auth=self._auth, params=params, data=xml, headers=headers)
return r
def get_snapshots(self, app_id=None, time_range_type=None, duration_in_mins=None,
start_time=None, end_time=None, **kwargs):
"""
Finds and returns any snapshots in the given time range that match a set of criteria. You must provide
at least one condition to the search parameters in the :data:`kwargs` parameters. The list of valid
conditions can be found `here <http://appd.ws/2>`_.
:param int app_id: Application ID to retrieve nodes for. If :const:`None`, the value stored in the
`app_id` property will be used.
:param str time_range_type: Must be one of :const:`BEFORE_NOW`, :const:`BEFORE_TIME`,
:const:`AFTER_TIME`, or :const:`BETWEEN_TIMES`.
See :ref:`time-range-types` for a full explanation.
:param int duration_in_mins: Number of minutes before now. Only valid if the
:attr:`time_range_type` is :const:`BEFORE_NOW`.
:param long start_time: Start time, expressed in milliseconds since epoch. Only valid if the
:attr:`time_range_type` is :const:`AFTER_TIME` or :const:`BETWEEN_TIMES`.
:param long end_time: End time, expressed in milliseconds since epoch. Only valid if the
:attr:`time_range_type` is :const:`BEFORE_TIME` or :const:`BETWEEN_TIMES`.
:param kwargs: Additional key/value pairs to pass to the controller as search parameters.
:returns: A list of snapshots.
:rtype: appd.model.Snapshots
"""
self._validate_time_range(time_range_type, duration_in_mins, start_time, end_time)
params = self._validate_time_range(time_range_type, duration_in_mins, start_time, end_time)
for qs_name in self.SNAPSHOT_REQUEST_PARAMS:
arg_name = qs_name.replace('-', '_')
params[qs_name] = kwargs.get(arg_name, None)
if qs_name in self.SNAPSHOT_REQUEST_LISTS and qs_name in kwargs:
params[qs_name] = ','.join(params[qs_name])
return self._app_request(Snapshots, '/request-snapshots', app_id, params)
def get_policy_violations(self, app_id=None, time_range_type='BEFORE_NOW', duration_in_mins=15,
start_time=None, end_time=None):
"""
Retrieves a list of policy violations during the specified time range.
*NOTE:* Beginning with controller version 3.7, you should use :meth:`get_healthrule_violations` instead.
:param int app_id: Application ID to retrieve nodes for. If :const:`None`, the value stored in the
`app_id` property will be used.
:param str time_range_type: Must be one of :const:`BEFORE_NOW`, :const:`BEFORE_TIME`,
:const:`AFTER_TIME`, or :const:`BETWEEN_TIMES`.
See :ref:`time-range-types` for a full explanation.
:param int duration_in_mins: Number of minutes before now. Only valid if the
:attr:`time_range_type` is :const:`BEFORE_NOW`.
:param long start_time: Start time, expressed in milliseconds since epoch. Only valid if the
:attr:`time_range_type` is :const:`AFTER_TIME` or :const:`BETWEEN_TIMES`.
:param long end_time: End time, expressed in milliseconds since epoch. Only valid if the
:attr:`time_range_type` is :const:`BEFORE_TIME` or :const:`BETWEEN_TIMES`.
:returns: A list of policy violations.
:rtype: appd.model.PolicyViolations
"""
params = self._validate_time_range(time_range_type, duration_in_mins, start_time, end_time)
return self._app_request(PolicyViolations, '/problems/policy-violations', app_id, params)
def get_healthrule_violations(self, app_id=None, time_range_type='BEFORE_NOW', duration_in_mins=15,
start_time=None, end_time=None):
"""
Retrieves a list of health rule violations during the specified time range. Compatible with
controller version 3.7 and later.
:param int app_id: Application ID to retrieve nodes for. If :const:`None`, the value stored in the
`app_id` property will be used.
:param str time_range_type: Must be one of :const:`BEFORE_NOW`, :const:`BEFORE_TIME`,
:const:`AFTER_TIME`, or :const:`BETWEEN_TIMES`.
See :ref:`time-range-types` for a full explanation.
:param int duration_in_mins: Number of minutes before now. Only valid if the
:attr:`time_range_type` is :const:`BEFORE_NOW`.
:param long start_time: Start time, expressed in milliseconds since epoch. Only valid if the
:attr:`time_range_type` is :const:`AFTER_TIME` or :const:`BETWEEN_TIMES`.
:param long end_time: End time, expressed in milliseconds since epoch. Only valid if the
:attr:`time_range_type` is :const:`BEFORE_TIME` or :const:`BETWEEN_TIMES`.
:returns: A list of policy violations.
:rtype: appd.model.PolicyViolations
"""
params = self._validate_time_range(time_range_type, duration_in_mins, start_time, end_time)
return self._app_request(PolicyViolations, '/problems/healthrule-violations', app_id, params)
def _v2_request(self, cls, path, params=None, method='GET'):
return cls.from_json(self.request('/api' + path, params, method))
def get_my_account(self):
"""
:rtype: Account
"""
return self._v2_request(Account, '/accounts/myaccount')
def get_account(self, account_id):
"""
:rtype: Account
"""
return self._v2_request(Account, '/accounts/{0}'.format(account_id))
def get_license_modules(self, account_id):
"""
:rtype: LicenseModules
"""
return self._v2_request(LicenseModules, '/accounts/{0}/licensemodules'.format(account_id))
def set_metric_retention(self, account_id, days, app_id=None):
path = '/controller/api/accounts/{0}/'.format(account_id)
if (app_id):
path = path + 'applications/{0}/'.format(app_id)
path = path + 'metricstaleduration/{0}'.format(days)
return self.request(path, method="POST", use_json=False)
def get_action_suppressions(self, account_id, app_id):
return self._v2_request(ActionSuppressionsResponse,
'/accounts/{0}/applications/{1}/actionsuppressions'.format(account_id, app_id))
def get_action_suppression(self, account_id, app_id, action_suppression_id):
return self._v2_request(ActionSuppression,
'/accounts/{0}/applications/{1}/actionsuppressions/{2}'.format(account_id,
app_id,
action_suppression_id))
def delete_action_suppression(self, account_id, app_id, action_suppression_id):
return self.request('/api/accounts/{0}/applications/{1}/actionsuppressions/{2}'.format(account_id,
app_id,
action_suppression_id),
method="DELETE",
use_json=False)
def create_action_suppression(self, account_id, app_id, params):
return self.request('/api/accounts/{0}/applications/{1}/actionsuppressions'.format(account_id, app_id),
method="POST",
use_json=False,
query=False,
params=params,
headers={'Content-Type': 'application/vnd.appd.cntrl+json;v=1'})
def get_license_usage(self, account_id, license_module=None, start_time=None, end_time=None):
"""
:param int account_id:
:param str license_module:
:param datetime.datetime start_time:
:param datetime.datetime end_time:
:rtype: HourlyLicenseUsages
"""
if isinstance(start_time, datetime):
start_time = start_time.isoformat()
if isinstance(end_time, datetime):
end_time = end_time.isoformat()
params = {
'licensemodule': license_module,
'showfiveminutesresolution': 'False',
'startdate': start_time,
'enddate': end_time
}
return self._v2_request(HourlyLicenseUsages, '/accounts/{0}/licensemodules/usages'.format(account_id), params)
def get_license_usage_5min(self, account_id, license_module=None, start_time=None, end_time=None):
"""
:param int account_id:
:param str license_module:
:param datetime.datetime start_time:
:param datetime.datetime end_time:
:rtype: LicenseUsages
"""
params = {
'licensemodule': license_module,
'showfiveminutesresolution': 'True',
'startdate': start_time.isoformat() if start_time else None,
'enddate': end_time.isoformat() if end_time else None
}
return self._v2_request(LicenseUsages, '/accounts/{0}/licensemodules/usages'.format(account_id), params)
|
AppDynamicsRESTx
|
/AppDynamicsRESTx-0.4.22.tar.gz/AppDynamicsRESTx-0.4.22/appd/request.py
|
request.py
|
from . import JsonObject, JsonList
from .entity_def import EntityDefinition
class Event(JsonObject):
FIELDS = {'id': '',
'summary': '',
'type': '',
'archived': '',
'deep_link_url': 'deepLinkUrl',
'event_time_ms': 'eventTime',
'is_read': 'markedAsRead',
'is_resolved': 'markedAsResolved',
'severity': '',
'sub_type': 'subType'}
"""
https://docs.appdynamics.com/display/PRO42/Events+Reference
"""
EVENT_TYPES = (
'ACTIVITY_TRACE',
'ADJUDICATION_CANCELLED',
'AGENT_ADD_BLACKLIST_REG_LIMIT_REACHED',
'AGENT_ASYNC_ADD_REG_LIMIT_REACHED',
'AGENT_CONFIGURATION_ERROR',
'AGENT_DIAGNOSTICS',
'AGENT_ERROR_ADD_REG_LIMIT_REACHED',
'AGENT_EVENT',
'AGENT_METRIC_BLACKLIST_REG_LIMIT_REACHED',
'AGENT_METRIC_REG_LIMIT_REACHED',
'AGENT_STATUS',
'ALREADY_ADJUDICATED',
'APPDYNAMICS_CONFIGURATION_WARNINGS',
'APPDYNAMICS_DATA',
'APPLICATION_CONFIG_CHANGE',
'APPLICATION_DEPLOYMENT',
'APPLICATION_ERROR',
'APP_SERVER_RESTART',
'AZURE_AUTO_SCALING',
'BACKEND_DISCOVERED',
'BT_DISCOVERED',
'CONTROLLER_AGENT_VERSION_INCOMPATIBILITY',
'CONTROLLER_ASYNC_ADD_REG_LIMIT_REACHED',
'CONTROLLER_ERROR_ADD_REG_LIMIT_REACHED',
'CONTROLLER_EVENT_UPLOAD_LIMIT_REACHED',
'CONTROLLER_METRIC_REG_LIMIT_REACHED',
'CONTROLLER_RSD_UPLOAD_LIMIT_REACHED',
'CUSTOM',
'CUSTOM_ACTION_END',
'CUSTOM_ACTION_FAILED',
'CUSTOM_ACTION_STARTED',
'DEADLOCK',
'DIAGNOSTIC_SESSION',
'DISK_SPACE',
'EMAIL_SENT',
'EUM_CLOUD_BROWSER_EVENT',
'HIGH_END_TO_END_LATENCY',
'INFO_INSTRUMENTATION_VISIBILITY',
'INTERNAL_UI_EVENT',
'LICENSE',
'LOW_HEAP_MEMORY',
'MACHINE_DISCOVERED',
'MEMORY',
'MEMORY_LEAK',
'MEMORY_LEAK_DIAGNOSTICS',
'MOBILE_CRASH_IOS_EVENT',
'MOBILE_CRASH_ANDROID_EVENT',
'NODE_DISCOVERED',
'NORMAL',
'OBJECT_CONTENT_SUMMARY',
'POLICY_CANCELED',
'POLICY_CANCELED_CRITICAL',
'POLICY_CANCELED_WARNING',
'POLICY_CLOSE',
'POLICY_CLOSE_CRITICAL',
'POLICY_CLOSE_WARNING',
'POLICY_CONTINUES',
'POLICY_CONTINUES_CRITICAL',
'POLICY_CONTINUES_WARNING',
'POLICY_DOWNGRADED',
'POLICY_OPEN',
'POLICY_OPEN_CRITICAL',
'POLICY_OPEN_WARNING',
'POLICY_UPGRADED',
'RESOURCE_POOL_LIMIT',
'RUNBOOK_DIAGNOSTIC_SESSION_END',
'RUNBOOK_DIAGNOSTIC_SESSION_FAILED',
'RUNBOOK_DIAGNOSTIC_SESSION_STARTED',
'RUN_LOCAL_SCRIPT_ACTION_END',
'RUN_LOCAL_SCRIPT_ACTION_FAILED',
'RUN_LOCAL_SCRIPT_ACTION_STARTED',
'SERVICE_ENDPOINT_DISCOVERED',
'SLOW',
'SMS_SENT',
'STALL',
'STALLED',
'SYSTEM_LOG'
'THREAD_DUMP_ACTION_END',
'THREAD_DUMP_ACTION_FAILED',
'THREAD_DUMP_ACTION_STARTED',
'TIER_DISCOVERED',
'VERY_SLOW',
'WORKFLOW_ACTION_END',
'WORKFLOW_ACTION_FAILED',
'WORKFLOW_ACTION_STARTED')
def __init__(self, event_id=0, event_type='CUSTOM', sub_type='', summary='', archived=False, event_time_ms=0,
is_read=False, is_resolved=False, severity='INFO', deep_link_url='',
triggered_entity=None, affected_entities=None):
self._event_type = None
(self.id, self.type, self.sub_type, self.summary, self.archived, self.event_time_ms, self.is_read,
self.is_resolved, self.severity, self.deep_link_url) = (event_id, event_type, sub_type, summary, archived,
event_time_ms, is_read, is_resolved, severity,
deep_link_url)
self.triggered_entity = triggered_entity or EntityDefinition()
self.affected_entities = affected_entities or []
@property
def event_type(self):
"""
:return:
"""
return self._event_type
@event_type.setter
def event_type(self, new_type):
self._list_setter('_event_type', new_type, Event.EVENT_TYPES)
class Events(JsonList):
def __init__(self, initial_list=None):
super(Events, self).__init__(Event, initial_list)
def __getitem__(self, i):
"""
:rtype: Event
"""
return self.data[i]
|
AppDynamicsRESTx
|
/AppDynamicsRESTx-0.4.22.tar.gz/AppDynamicsRESTx-0.4.22/appd/model/event.py
|
event.py
|
from . import JsonObject, JsonList
from appd.time import from_ts
class Snapshot(JsonObject):
FIELDS = {'id': '', 'local_id': 'localID', 'request_guid': 'requestGUID', 'summary': '',
'bt_id': 'businessTransactionId', 'app_id': 'applicationId',
'url': 'URL', 'archived': '', 'async': '', 'stall_dump': 'stallDump',
'call_chain': 'callChain', 'is_first_in_chain': 'firstInChain',
'diag_session_guid': 'diagnosticSessionGUID', 'exit_sequence': 'snapshotExitSequence',
'exit_calls_truncated': 'exitCallsDataTruncated',
'exit_calls_truncated_msg': 'exitCallsDataTruncationMessage',
'app_component_id': 'applicationComponentId', 'app_component_node_id': 'applicationComponentNodeId',
'local_start_time_ms': 'localStartTime', 'server_start_time_ms': 'serverStartTime',
'thread_id': 'threadID', 'thread_name': 'threadName',
'http_headers': 'httpHeaders', 'response_headers': 'responseHeaders',
'http_params': 'httpParameters', 'cookies': '',
'http_session_id': 'httpSessionID', 'session_keys': 'sessionKeys', 'business_data': 'businessData',
'error_ids': 'errorIDs', 'error_occurred': 'errorOccured', 'error_summary': 'errorSummary',
'error_details': 'errorDetails', 'log_messages': 'logMessages',
'bt_events': 'transactionEvents', 'bt_properties': 'transactionProperties',
'dotnet_properties': 'dotnetProperty',
'has_deep_dive_data': 'hasDeepDiveData', 'deep_dive_policy': 'deepDivePolicy',
'is_delayed_deep_dive': 'delayedDeepDive', 'delayed_deep_dive_offset': 'delayedDeepDiveOffSet',
'has_unresolved_calls': 'unresolvedCallInCallChain',
'time_taken_ms': 'timeTakenInMilliSecs', 'cpu_time_taken_ms': 'cpuTimeTakenInMilliSecs',
'warning_threshold': 'warningThreshold', 'critical_threshold': 'criticalThreshold',
'user_experience': 'userExperience'}
def __init__(self, snap_id=0, **kwargs):
self.id = snap_id
self.local_start_time_ms, self.server_start_time_ms = 0, 0
for k, v in list(Snapshot.FIELDS.items()):
self.__setattr__(k, kwargs.get(k, None))
@property
def local_start_time(self):
return from_ts(self.local_start_time_ms)
@property
def server_start_time(self):
return from_ts(self.server_start_time_ms)
class Snapshots(JsonList):
def __init__(self, initial_list=None):
super(Snapshots, self).__init__(Snapshot, initial_list)
def __getitem__(self, i):
"""
:rtype: Snapshot
"""
return self.data[i]
|
AppDynamicsRESTx
|
/AppDynamicsRESTx-0.4.22.tar.gz/AppDynamicsRESTx-0.4.22/appd/model/snapshot.py
|
snapshot.py
|
from . import JsonObject, JsonList
class Node(JsonObject):
FIELDS = {'id': '', 'name': '', 'type': '', 'machine_id': 'machineId', 'machine_name': 'machineName',
'tier_id': 'tierId', 'tier_name': 'tierName', 'unique_id': 'nodeUniqueLocalId',
'os_type': 'machineOSType', 'has_app_agent': 'appAgentPresent', 'app_agent_version': 'appAgentVersion',
'has_machine_agent': 'machineAgentPresent', 'machine_agent_version': 'machineAgentVersion'}
def __init__(self, node_id=0, name='', node_type='', machine_id=0, machine_name='', os_type='',
unique_local_id='', tier_id=0, tier_name='', has_app_agent=False, app_agent_version='',
has_machine_agent=False, machine_agent_version=''):
(self.id, self.name, self.type, self.machine_id, self.machine_name, self.os_type, self.unique_local_id,
self.tier_id, self.tier_name, self.has_app_agent, self.app_agent_version, self.has_machine_agent,
self.machine_agent_version) = (node_id, name, node_type, machine_id, machine_name, os_type, unique_local_id,
tier_id, tier_name, has_app_agent, app_agent_version,
has_machine_agent, machine_agent_version)
class Nodes(JsonList):
def __init__(self, initial_list=None):
super(Nodes, self).__init__(Node, initial_list)
def __getitem__(self, i):
"""
:rtype: Node
"""
return self.data[i]
def by_machine_name(self, name):
"""
Filters a Nodes collection to return only the nodes matching the given hostname.
:param str name: Hostname to match against.
:returns: a Nodes collection filtered by hostname.
:rtype: Nodes
"""
return Nodes([x for x in self.data if x.machineName == name])
def by_machine_id(self, machine_id):
"""
Filters a Nodes collection to return only the nodes matching the given machine instance ID.
:param int machine_id: Machine ID to match against.
:returns: a Nodes collection filtered by machine ID.
:rtype: Nodes
"""
return Nodes([x for x in self.data if x.machineId == machine_id])
def by_tier_name(self, name):
"""
Filters a Nodes collection to return only the nodes belonging to the given tier.
:param str name: Tier name to match against.
:returns: a Nodes collection filtered by tier.
:rtype: Nodes
"""
return Nodes([x for x in self.data if x.tier_name == name])
def by_tier_id(self, tier_id):
"""
Filters a Nodes collection to return only the nodes belonging to the given tier ID.
:param int tier_id: Tier ID to match against.
:returns: a Nodes collection filtered by tier.
:rtype: Nodes
"""
return Nodes([x for x in self.data if x.tier_id == tier_id])
|
AppDynamicsRESTx
|
/AppDynamicsRESTx-0.4.22.tar.gz/AppDynamicsRESTx-0.4.22/appd/model/node.py
|
node.py
|
from . import JsonObject, JsonList
from appd.time import from_ts
class HourlyLicenseUsage(JsonObject):
FIELDS = {
'id': '',
'account_id': 'accountId',
'max_units_used': 'maxUnitsUsed',
'min_units_used': 'minUnitsUsed',
'avg_units_used': 'avgUnitsUsed',
'total_units_used': 'totalUnitsUsed',
'sample_count': 'sampleCount',
'avg_units_allowed': 'avgUnitsAllowed',
'avg_units_provisioned': 'avgUnitsProvisioned',
'license_module': 'agentType',
'created_on_ms': 'createdOn',
}
def __init__(self, id=0, account_id=0, max_units_used=0, min_units_used=0, avg_units_used=0, total_units_used=0,
sample_count=0, avg_units_allowed=0, avg_units_provisioned=None, license_module=None,
created_on_ms=0):
(self.id, self.account_id, self.max_units_used, self.min_units_used, self.avg_units_used, self.total_units_used,
self.sample_count, self.avg_units_allowed, self.avg_units_provisioned, self.license_module,
self.created_on_ms) = (id, account_id, max_units_used, min_units_used, avg_units_used, total_units_used,
sample_count, avg_units_allowed, avg_units_provisioned, license_module,
created_on_ms)
@property
def created_on(self):
"""
:rtype: datetime.datetime
"""
return from_ts(self.created_on_ms)
class HourlyLicenseUsageList(JsonList):
def __init__(self, initial_list=None):
super(HourlyLicenseUsageList, self).__init__(HourlyLicenseUsage, initial_list)
def __getitem__(self, i):
"""
:rtype: HourlyLicenseUsage
"""
return self.data[i]
def by_account_id(self, account_id):
"""
:rtype: HourlyLicenseUsageList
"""
return HourlyLicenseUsageList([x for x in self.data if x.account_id == account_id])
def by_license_module(self, license_module):
"""
:rtype: HourlyLicenseUsageList
"""
return HourlyLicenseUsageList([x for x in self.data if x.license_module == license_module])
class HourlyLicenseUsages(JsonObject):
FIELDS = {}
def __init__(self):
self.usages = HourlyLicenseUsageList()
@classmethod
def from_json(cls, json_dict):
obj = super(HourlyLicenseUsages, cls).from_json(json_dict)
obj.usages = HourlyLicenseUsageList.from_json(json_dict['usages'])
return obj
|
AppDynamicsRESTx
|
/AppDynamicsRESTx-0.4.22.tar.gz/AppDynamicsRESTx-0.4.22/appd/model/hourly_license_usage.py
|
hourly_license_usage.py
|
from . import JsonObject, JsonList
class Tier(JsonObject):
FIELDS = {'id': '', 'name': '', 'description': '', 'type': '',
'node_count': 'numberOfNodes', 'agent_type': 'agentType'}
AGENT_TYPES = ('APP_AGENT', 'MACHINE_AGENT',
'DOT_NET_APP_AGENT', 'DOT_NET_MACHINE_AGENT',
'PHP_APP_AGENT', 'PHP_MACHINE_AGENT',
'NODEJS_APP_AGENT','NODEJS_MACHINE_AGENT',
'PYTHON_APP_AGENT',
'NATIVE_APP_AGENT','NATIVE_SDK', 'NATIVE_DYNAMIC','NATIVE_WEB_SERVER',
'DB_AGENT','DB_COLLECTOR',
'RUBY_APP_AGENT',
'SIM_MACHINE_AGENT','APM_MACHINE_AGENT','SERVICE_AVAIL_MACHINE_AGENT',
'APM_APP_AGENT',
'ANALYTICS_AGENT',
'GOLANG_SDK',
'WMB_AGENT')
def __init(self, tier_id=0, name='', description='', agent_type='JAVA_AGENT', node_count=0,
tier_type='Java Application Server'):
self._agent_type = None
self.id, self.name, self.description, self.agent_type, self.node_count, self.type = \
tier_id, name, description, agent_type, node_count, tier_type
@property
def agent_type(self):
"""
:rtype: str
"""
return self._agent_type
@agent_type.setter
def agent_type(self, agent_type):
self._list_setter('_agent_type', agent_type, Tier.AGENT_TYPES)
class Tiers(JsonList):
def __init__(self, initial_list=None):
super(Tiers, self).__init__(Tier, initial_list)
def __getitem__(self, i):
"""
:rtype: Tier
"""
return self.data[i]
def by_agent_type(self, agent_type):
"""
Searches for tiers of a particular type (which should be one of Tier.AGENT_TYPES). For example, to find
all the Java app server tiers:
>>> from appd.request import AppDynamicsClient
>>> client = AppDynamicsClient(...)
>>> all_tiers = client.get_tiers()
>>> java_tiers = all_tiers.by_agent_type('APP_AGENT')
:returns: a Tiers object containing any tiers matching the criteria
:rtype: Tiers
"""
return Tiers([x for x in self.data if x.agentType == agent_type])
|
AppDynamicsRESTx
|
/AppDynamicsRESTx-0.4.22.tar.gz/AppDynamicsRESTx-0.4.22/appd/model/tier.py
|
tier.py
|
from . import JsonObject, JsonList
from .entity_def import EntityDefinition
from appd.time import from_ts
class PolicyViolation(JsonObject):
FIELDS = {'id': '',
'name': '',
'description': '',
'status': 'incidentStatus',
'severity': '',
'start_time_ms': 'startTimeInMillis',
'end_time_ms': 'endTimeInMillis',
'detected_time_ms': 'detectedTimeInMillis',
'deep_link_url': 'deepLinkUrl'}
STATUSES = ('NOT_APPLICABLE', 'OPEN', 'RESOLVED')
SEVERITIES = ('INFO', 'WARNING', 'CRITICAL')
def __init__(self, pv_id=0, pv_name='', description='', status='OPEN', severity='INFO',
start_time_ms=0, end_time_ms=0, detected_time_ms=0, deep_link_url='',
affected_entity=None, triggered_entity=None):
self._severity = None
self._status = None
(self.id, self.name, self.description, self.status, self.severity, self.start_time_ms, self.end_time_ms,
self.detected_time_ms, self.deep_link_url, self.affected_entity, self.triggered_entity) = \
(pv_id, pv_name, description, status, severity, start_time_ms, end_time_ms, detected_time_ms,
deep_link_url, affected_entity, triggered_entity)
@classmethod
def _set_fields_from_json_dict(cls, obj, json_dict):
JsonObject._set_fields_from_json_dict(obj, json_dict)
obj.affected_entity = EntityDefinition.from_json(json_dict['affectedEntityDefinition'])
obj.triggered_entity = EntityDefinition.from_json(json_dict['triggeredEntityDefinition'])
@property
def status(self):
return self._status
@status.setter
def status(self, new_status):
self._list_setter('_status', new_status, PolicyViolation.STATUSES)
@property
def severity(self):
return self._severity
@severity.setter
def severity(self, new_sev):
self._list_setter('_severity', new_sev, PolicyViolation.SEVERITIES)
@property
def start_time(self):
return from_ts(self.start_time_ms)
@property
def end_time(self):
"""
Gets the end time of the violation, converting it from an AppDynamics timestamp to standard
Python :class:datetime.
:return: Time the violation was resolved
:rtype: datetime
"""
return from_ts(self.end_time_ms)
@property
def detected_time(self):
return from_ts(self.detected_time_ms)
class PolicyViolations(JsonList):
def __init__(self, initial_list=None):
super(PolicyViolations, self).__init__(PolicyViolation, initial_list)
def __getitem__(self, i):
"""
:rtype: PolicyViolation
"""
return self.data[i]
|
AppDynamicsRESTx
|
/AppDynamicsRESTx-0.4.22.tar.gz/AppDynamicsRESTx-0.4.22/appd/model/policy_violation.py
|
policy_violation.py
|
from . import JsonObject, JsonList
from appd.time import from_ts
class LicenseUsage(JsonObject):
FIELDS = {
'id': '',
'account_id': 'accountId',
'units_used': 'unitsUsed',
'units_allowed': 'unitsAllowed',
'units_provisioned': 'unitsProvisioned',
'license_module': 'agentType',
'created_on_ms': 'createdOn',
}
def __init__(self, id=0, account_id=0, license_module=None, units_used=0,
units_allowed=0, units_provisioned=None, created_on_ms=0):
(self.id, self.account_id, self.license_module, self.units_used,
self.units_allowed, self.units_provisioned, self.created_on_ms) = (id, account_id, license_module,
units_used, units_allowed,
units_provisioned, created_on_ms)
@property
def created_on(self):
"""
:rtype: datetime.datetime
"""
return from_ts(self.created_on_ms)
class LicenseUsageList(JsonList):
def __init__(self, initial_list=None):
super(LicenseUsageList, self).__init__(LicenseUsage, initial_list)
def __getitem__(self, i):
"""
:rtype: LicenseUsage
"""
return self.data[i]
def by_account_id(self, account_id):
"""
:rtype: LicenseUsageList
"""
return LicenseUsageList([x for x in self.data if x.account_id == account_id])
def by_license_module(self, license_module):
"""
:rtype: LicenseUsageList
"""
return LicenseUsageList([x for x in self.data if x.license_module == license_module])
class LicenseUsages(JsonObject):
FIELDS = {}
def __init__(self):
self.usages = LicenseUsageList()
@classmethod
def from_json(cls, json_dict):
obj = super(LicenseUsages, cls).from_json(json_dict)
obj.usages = LicenseUsageList.from_json(json_dict['usages'])
return obj
|
AppDynamicsRESTx
|
/AppDynamicsRESTx-0.4.22.tar.gz/AppDynamicsRESTx-0.4.22/appd/model/license_usage.py
|
license_usage.py
|
import re
import os
import shutil
import sys
import time
import xml.etree.ElementTree as ET
from os.path import abspath, dirname, exists, expanduser, isdir, isfile, join
from egginst.utils import rm_rf, get_executable
from freedesktop import make_desktop_entry, make_directory_entry
# datadir: contains the desktop and directory entries
# confdir: contains the XML menu files
sys_menu_file = '/etc/xdg/menus/applications.menu'
if os.getuid() == 0:
mode = 'system'
datadir = '/usr/share'
confdir = '/etc/xdg'
else:
mode = 'user'
datadir = os.environ.get('XDG_DATA_HOME',
abspath(expanduser('~/.local/share')))
confdir = os.environ.get('XDG_CONFIG_HOME',
abspath(expanduser('~/.config')))
appdir = join(datadir, 'applications')
menu_file = join(confdir, 'menus/applications.menu')
def indent(elem, level=0):
"""
adds whitespace to the tree, so that it results in a pretty printed tree
"""
XMLindentation = " " # 4 spaces, just like in Python!
i = "\n" + level * XMLindentation
if len(elem):
if not elem.text or not elem.text.strip():
elem.text = i + XMLindentation
for e in elem:
indent(e, level+1)
if not e.tail or not e.tail.strip():
e.tail = i + XMLindentation
if not e.tail or not e.tail.strip():
e.tail = i
else:
if level and (not elem.tail or not elem.tail.strip()):
elem.tail = i
def add_child(parent, tag, text=None):
"""
Add a child element of specified tag type to parent.
The new child element is returned.
"""
elem = ET.SubElement(parent, tag)
if text is not None:
elem.text = text
return elem
def is_valid_menu_file():
try:
root = ET.parse(menu_file).getroot()
assert root is not None and root.tag == 'Menu'
return True
except:
return False
def write_menu_file(tree):
indent(tree.getroot())
fo = open(menu_file, 'w')
fo.write("""\
<!DOCTYPE Menu PUBLIC '-//freedesktop//DTD Menu 1.0//EN'
'http://standards.freedesktop.org/menu-spec/menu-1.0.dtd'>
""")
tree.write(fo)
fo.write('\n')
fo.close()
def ensure_menu_file():
# ensure any existing version is a file
if exists(menu_file) and not isfile(menu_file):
rm_rf(menu_file)
# ensure any existing file is actually a menu file
if isfile(menu_file):
# make a backup of the menu file to be edited
cur_time = time.strftime('%Y-%m-%d_%Hh%Mm%S')
backup_menu_file = "%s.%s" % (menu_file, cur_time)
shutil.copyfile(menu_file, backup_menu_file)
if not is_valid_menu_file():
os.remove(menu_file)
# create a new menu file if one doesn't yet exist
if not isfile(menu_file):
fo = open(menu_file, 'w')
if mode == 'user':
merge = '<MergeFile type="parent">%s</MergeFile>' % sys_menu_file
else:
merge = ''
fo.write("<Menu><Name>Applications</Name>%s</Menu>\n" % merge)
fo.close()
class Menu(object):
def __init__(self, name):
self.name = name
self.name_ = name + '_'
self.entry_fn = '%s.directory' % self.name
self.entry_path = join(datadir, 'desktop-directories', self.entry_fn)
def create(self):
self._create_dirs()
self._create_directory_entry()
if is_valid_menu_file() and self._has_this_menu():
return
ensure_menu_file()
self._add_this_menu()
def remove(self):
rm_rf(self.entry_path)
for fn in os.listdir(appdir):
if fn.startswith(self.name_):
# found one shortcut, so don't remove the name from menu
return
self._remove_this_menu()
def _remove_this_menu(self):
tree = ET.parse(menu_file)
root = tree.getroot()
for elt in root.findall('Menu'):
if elt.find('Name').text == self.name:
root.remove(elt)
write_menu_file(tree)
def _has_this_menu(self):
root = ET.parse(menu_file).getroot()
return any(e.text == self.name for e in root.findall('Menu/Name'))
def _add_this_menu(self):
tree = ET.parse(menu_file)
root = tree.getroot()
menu_elt = add_child(root, 'Menu')
add_child(menu_elt, 'Name', self.name)
add_child(menu_elt, 'Directory', self.entry_fn)
inc_elt = add_child(menu_elt, 'Include')
add_child(inc_elt, 'Category', self.name)
write_menu_file(tree)
def _create_directory_entry(self):
# Create the menu resources. Note that the .directory files all go
# in the same directory.
d = dict(name=self.name, path=self.entry_path)
try:
import custom_tools
icon_path = join(dirname(custom_tools.__file__), 'menu.ico')
if isfile(icon_path):
d['icon'] = icon_path
except ImportError:
pass
make_directory_entry(d)
def _create_dirs(self):
# Ensure the three directories we're going to write menu and shortcut
# resources to all exist.
for dir_path in [dirname(menu_file),
dirname(self.entry_path),
appdir]:
if not isdir(dir_path):
os.makedirs(dir_path)
class ShortCut(object):
def __init__(self, menu, shortcut, prefix=None):
# note that this is the path WITHOUT extension
fn = menu.name_ + shortcut['id']
self.path = join(appdir, fn)
shortcut['categories'] = menu.name
self.shortcut = shortcut
for var_name in ('name', 'cmd'):
if var_name in shortcut:
setattr(self, var_name, shortcut[var_name])
self.prefix = prefix if prefix is not None else sys.prefix
def create(self):
self._install_desktop_entry('gnome')
self._install_desktop_entry('kde')
def remove(self):
for ext in ('.desktop', 'KDE.desktop'):
path = self.path + ext
rm_rf(path)
def _install_desktop_entry(self, tp):
# Handle the special placeholders in the specified command. For a
# filebrowser request, we simply used the passed filebrowser. But
# for a webbrowser request, we invoke the Python standard lib's
# webbrowser script so we can force the url(s) to open in new tabs.
spec = self.shortcut.copy()
spec['tp'] = tp
path = self.path
if tp == 'gnome':
filebrowser = 'gnome-open'
path += '.desktop'
elif tp == 'kde':
filebrowser = 'kfmclient openURL'
path += 'KDE.desktop'
cmd = self.cmd
if cmd[0] == '{{FILEBROWSER}}':
cmd[0] = filebrowser
elif cmd[0] == '{{WEBBROWSER}}':
import webbrowser
executable = get_executable(self.prefix)
cmd[0:1] = [executable, webbrowser.__file__, '-t']
spec['cmd'] = cmd
spec['path'] = path
# create the shortcuts
make_desktop_entry(spec)
if __name__ == '__main__':
rm_rf(menu_file)
Menu('Foo').create()
Menu('Bar').create()
Menu('Foo').remove()
Menu('Foo').remove()
|
AppInst
|
/AppInst-2.1.5.tar.gz/AppInst-2.1.5/appinst/linux2.py
|
linux2.py
|
import os
import sys
from os.path import isdir, join
from egginst.utils import rm_empty_dir, rm_rf, get_executable
import wininst
try:
from custom_tools.msi_property import get
mode = ('user', 'system')[get('ALLUSERS') == '1']
addtodesktop = bool(get('ADDTODESKTOP') == '1')
addtolauncher = bool(get('ADDTOLAUNCHER') == '1')
except ImportError:
mode = 'user'
addtodesktop = True
addtolauncher = True
quicklaunch_dir = join(wininst.get_special_folder_path('CSIDL_APPDATA'),
"Microsoft", "Internet Explorer", "Quick Launch")
if mode == 'system':
desktop_dir = wininst.get_special_folder_path(
'CSIDL_COMMON_DESKTOPDIRECTORY')
start_menu = wininst.get_special_folder_path('CSIDL_COMMON_PROGRAMS')
else:
desktop_dir = wininst.get_special_folder_path('CSIDL_DESKTOPDIRECTORY')
start_menu = wininst.get_special_folder_path('CSIDL_PROGRAMS')
def quoted(s):
"""
quotes a string if necessary.
"""
# strip any existing quotes
s = s.strip('"')
if ' ' in s:
return '"%s"' % s
else:
return s
class Menu(object):
def __init__(self, name):
self.path = join(start_menu, name)
def create(self):
if not isdir(self.path):
os.mkdir(self.path)
def remove(self):
rm_empty_dir(self.path)
class ShortCut(object):
def __init__(self, menu, shortcut, prefix=None):
"""
Prefix is the system prefix to be used -- this is needed since
there is the possibility of a different Python's packages being managed.
"""
self.menu = menu
self.shortcut = shortcut
self.prefix = prefix if prefix is not None else sys.prefix
self.cmd = shortcut['cmd']
def remove(self):
self.create(remove=True)
def create(self, remove=False):
# Separate the arguments to the invoked command from the command
# itself.
cmd = self.cmd[0]
args = self.cmd[1:]
executable = get_executable(self.prefix)
# Handle the special '{{FILEBROWSER}}' command by using webbrowser
# since using just the path name pops up a dialog asking for which
# application to use. Using 'explorer.exe' picks up
# c:/windows/system32/explorer.exe which does not work. Webbrowser
# does the right thing.
if cmd == '{{FILEBROWSER}}':
cmd = executable
args = ['-m', 'webbrowser'] + args
# Otherwise, handle the special '{{WEBBROWSER}}' command by
# invoking the Python standard lib's 'webbrowser' script. This
# allows us to specify that the url(s) should be opened in new
# tabs.
#
# If this doesn't work, see the following website for details of
# the special URL shortcut file format. While split across two
# lines it is one URL:
# http://delphi.about.com/gi/dynamic/offsite.htm?site= \
# http://www.cyanwerks.com/file-format-url.html
elif cmd == '{{WEBBROWSER}}':
cmd = executable
args = ['-m', 'webbrowser', '-t'] + args
# The API for the call to 'wininst.create_shortcut' has 3 required
# arguments:-
#
# path, description and filename
#
# and 4 optional arguments:-
#
# args, working_dir, icon_path and icon_index
#
# We always pass the args argument, but we only pass the working
# directory and the icon path if given, and we never currently pass the
# icon index.
working_dir = quoted(self.shortcut.get('working_dir', ''))
icon = self.shortcut.get('icon')
if working_dir and icon:
shortcut_args = [working_dir, icon]
elif working_dir and not icon:
shortcut_args = [working_dir]
elif not working_dir and icon:
shortcut_args = ['', icon]
else:
shortcut_args = []
# Menu link
dst_dirs = [self.menu.path]
# Desktop link
if self.shortcut.get('desktop') and addtodesktop:
dst_dirs.append(desktop_dir)
# Quicklaunch link
if self.shortcut.get('quicklaunch') and addtolauncher:
dst_dirs.append(quicklaunch_dir)
for dst_dir in dst_dirs:
dst = join(dst_dir, self.shortcut['name'] + '.lnk')
if remove:
rm_rf(dst)
else:
wininst.create_shortcut(
quoted(cmd),
self.shortcut['comment'],
dst,
' '.join(quoted(arg) for arg in args),
*shortcut_args)
|
AppInst
|
/AppInst-2.1.5.tar.gz/AppInst-2.1.5/appinst/win32.py
|
win32.py
|
import os
import shutil
from os.path import basename, dirname, isdir, join
from plistlib import Plist, writePlist
from egginst.utils import rm_empty_dir, rm_rf
class Menu(object):
def __init__(self, name):
self.path = join('/Applications', name)
def create(self):
if not isdir(self.path):
os.mkdir(self.path)
def remove(self):
rm_empty_dir(self.path)
class ShortCut(object):
def __init__(self, menu, shortcut, prefix=None):
self.menu = menu
self.shortcut = shortcut
self.prefix = prefix
for var_name in ('name', 'cmd'):
if var_name in shortcut:
setattr(self, var_name, shortcut[var_name])
if os.access(self.cmd[0], os.X_OK):
self.tp = 'app'
self.path = join(menu.path, self.name + '.app')
else:
self.tp = 'link'
self.path = join(menu.path, self.name)
def remove(self):
rm_rf(self.path)
def create(self):
if self.tp == 'app':
Application(self.path, self.shortcut).create()
elif self.tp == 'link':
src = self.cmd[0]
if src.startswith('{{'):
src = self.cmd[1]
rm_rf(self.path)
os.symlink(src, self.path)
TERMINAL = '''\
#!/bin/sh
mypath="`dirname "$0"`"
osascript << EOF
tell application "System Events" to set terminalOn to (exists process "Terminal")
tell application "Terminal"
if (terminalOn) then
activate
do script "\\"$mypath/startup.command\\"; exit"
else
do script "\\"$mypath/startup.command\\"; exit" in front window
end if
end tell
EOF
exit 0
'''
class Application(object):
"""
Class for creating an application folder on OSX. The application may
be standalone executable, but more likely a Python script which is
interpreted by the framework Python interpreter.
"""
def __init__(self, app_path, shortcut):
"""
Required:
---------
shortcut is a dictionary defining a shortcut per the AppInst standard.
"""
# Store the required values out of the shortcut definition.
self.app_path = app_path
self.cmd = shortcut['cmd']
self.name = shortcut['name']
# Store some optional values out of the shortcut definition.
self.icns_path = shortcut.get('icns', None)
self.terminal = shortcut.get('terminal', False)
self.version = shortcut.get('version', '1.0.0')
# Calculate some derived values just once.
self.contents_dir = join(self.app_path, 'Contents')
self.resources_dir = join(self.contents_dir, 'Resources')
self.macos_dir = join(self.contents_dir, 'MacOS')
self.executable = self.name
self.executable_path = join(self.macos_dir, self.executable)
def create(self):
self._create_dirs()
self._write_pkginfo()
self._write_icns()
self._writePlistInfo()
self._write_script()
def _create_dirs(self):
rm_rf(self.app_path)
os.makedirs(self.resources_dir)
os.makedirs(self.macos_dir)
def _write_pkginfo(self):
fo = open(join(self.contents_dir, 'PkgInfo'), 'w')
fo.write(('APPL%s????' % self.name.replace(' ', ''))[:8])
fo.close()
def _write_icns(self):
if self.icns_path is None:
# Use the default icon if no icns file was specified.
self.icns_path = join(dirname(__file__), 'PythonApplet.icns')
shutil.copy(self.icns_path, self.resources_dir)
def _writePlistInfo(self):
"""
Writes the Info.plist file in the Contests directory.
"""
pl = Plist(
CFBundleExecutable=self.executable,
CFBundleGetInfoString='%s-%s' % (self.name, self.version),
CFBundleIconFile=basename(self.icns_path),
CFBundleIdentifier='com.%s' % self.name,
CFBundlePackageType='APPL',
CFBundleVersion=self.version,
CFBundleShortVersionString=self.version,
)
writePlist(pl, join(self.contents_dir, 'Info.plist'))
def _write_script(self):
"""
Copies a python script (which starts the application) into the
application folder (into Contests/MacOS) and makes sure the script
uses sys.executable, which should be the "framework Python".
"""
shell = "#!/bin/sh\n%s\n" % ' '.join(self.cmd)
if self.terminal:
path = join(self.macos_dir, 'startup.command')
fo = open(path, 'w')
fo.write(shell)
fo.close()
os.chmod(path, 0755)
data = TERMINAL
else:
data = shell
fo = open(self.executable_path, 'w')
fo.write(data)
fo.close()
os.chmod(self.executable_path, 0755)
|
AppInst
|
/AppInst-2.1.5.tar.gz/AppInst-2.1.5/appinst/darwin.py
|
darwin.py
|
import sys
from os.path import abspath, dirname, isfile, join
from egginst.utils import bin_dir_name
# The custom_tools package is importable when the Python was created by an
# "enicab" installer, in which case the directory custom_tools contains
# platform-independent install information in __init__.py and platform-specific
# information about user setting chosen during the install process.
try:
import custom_tools
menu_name = custom_tools.FULL_NAME
except ImportError:
menu_name = 'Python-%i.%i' % sys.version_info[:2]
def install(shortcuts, remove, prefix=None):
"""
install Menu and shortcuts
"""
if sys.platform == 'linux2':
from linux2 import Menu, ShortCut
elif sys.platform == 'darwin':
from darwin import Menu, ShortCut
elif sys.platform == 'win32':
from win32 import Menu, ShortCut
m = Menu(menu_name)
if remove:
for sc in shortcuts:
ShortCut(m, sc, prefix=prefix).remove()
m.remove()
else:
m.create()
for sc in shortcuts:
ShortCut(m, sc, prefix=prefix).create()
def transform_shortcut(dat_dir, sc, prefix=None):
"""
transform the shortcuts relative paths to absolute paths
"""
prefix = prefix if prefix is not None else sys.prefix
# Make the path to the executable absolute
bin = sc['cmd'][0]
if bin.startswith('..'):
bin = abspath(join(dat_dir, bin))
elif not bin.startswith('{{'):
bin = join(prefix, bin_dir_name, bin)
sc['cmd'][0] = bin
if (sys.platform == 'win32' and sc['terminal'] is False and
not bin.startswith('{{') and isfile(bin + '-script.py')):
argv = [join(prefix, 'pythonw.exe'), bin + '-script.py']
argv.extend(sc['cmd'][1:])
sc['cmd'] = argv
# Make the path of to icon files absolute
for kw in ('icon', 'icns'):
if kw in sc:
sc[kw] = abspath(join(dat_dir, sc[kw]))
def get_shortcuts(dat_path, prefix=None):
"""
reads and parses the appinst data file and returns the shortcuts
"""
d = {}
execfile(dat_path, d)
shortcuts = d['SHORTCUTS']
for sc in shortcuts:
transform_shortcut(dirname(dat_path), sc, prefix=prefix)
return shortcuts
def install_from_dat(dat_path, prefix=None):
"""
does a complete install given a data file, the prefix is the system prefix
to use.
"""
install(get_shortcuts(dat_path, prefix=prefix), remove=False, prefix=prefix)
def uninstall_from_dat(dat_path, prefix=None):
"""
uninstalls all items in a data file, the prefix is the system prefix to
use.
"""
install(get_shortcuts(dat_path, prefix=prefix), remove=True, prefix=prefix)
|
AppInst
|
/AppInst-2.1.5.tar.gz/AppInst-2.1.5/appinst/__init__.py
|
__init__.py
|
from __future__ import print_function, unicode_literals
from gi.repository import Gtk, Gdk, WebKit
import sys
import os
import multiprocessing
from flask import Flask
import socket
from urllib2 import urlopen, HTTPError, URLError
class App(Gtk.Application):
"""App
Application class
"""
def __init__(self, module=None):
Gtk.Application.__init__(self)
self.server = Flask(module)
self.route = self.server.route
self.root_dir = os.path.abspath(
os.path.dirname(module)
)
def do_startup(self):
"""Gtk.Application.run() will call this function()"""
Gtk.Application.do_startup(self)
gtk_window = Gtk.ApplicationWindow(application=self)
gtk_window.set_title('AppKit')
webkit_web_view = WebKit.WebView()
webkit_web_view.load_uri('http://localhost:' + str(self.port))
screen = Gdk.Screen.get_default()
monitor_geometry = screen.get_primary_monitor()
monitor_geometry = screen.get_monitor_geometry(monitor_geometry)
settings = webkit_web_view.get_settings()
settings.set_property('enable-universal-access-from-file-uris', True)
settings.set_property('enable-file-access-from-file-uris', True)
settings.set_property('default-encoding', 'utf-8')
gtk_window.set_default_size(
monitor_geometry.width * 1.0 / 2.0,
monitor_geometry.height * 3.0 / 5.0,
)
scrollWindow = Gtk.ScrolledWindow()
scrollWindow.add(webkit_web_view)
gtk_window.add(scrollWindow)
gtk_window.connect('delete-event', self._on_gtk_window_destroy)
gtk_window.show_all()
webkit_web_view.connect('notify::title', self._on_notify_title)
self.gtk_window = gtk_window
self.webkit_web_view = webkit_web_view
def do_activate(self):
"""Gtk.Application.run() will call this function()
after do_startup()
"""
pass
def _on_gtk_window_destroy(self, window, *args, **kwargs):
self.server_process.terminate()
def _on_notify_title(
self,
webkit_web_view,
g_param_string,
*args, **kwargs):
print('on_notify_title')
title = webkit_web_view.get_title()
if title is not None:
self.gtk_window.set_title(title)
def _run_server(self, publish=False, port=None, debug=False):
if port is None:
sock = socket.socket()
sock.bind(('localhost', 0))
port = sock.getsockname()[1]
sock.close()
if publish:
host = '0.0.0.0'
else:
host = 'localhost'
process = multiprocessing.Process(
target=self.server.run,
args=(host, port, debug),
kwargs={'use_reloader': False},
)
process.start()
return (process, port)
def _check_server(self, port=None):
port = str(port)
# These code may be replaced by using signal between
# http server and GIO network
while True:
try:
urlopen('http://localhost:' + port)
break
except HTTPError as e:
print(e)
break
except URLError as e:
pass
def run(self, publish=False, port=None, debug=False, *args, **kw):
(self.server_process, self.port) = self._run_server(
publish=publish,
port=port,
debug=debug
)
self._check_server(port=self.port)
exit_status = super(App, self).run(sys.argv)
sys.exit(exit_status)
|
AppKit
|
/AppKit-0.2.8.tar.gz/AppKit-0.2.8/appkit/app.py
|
app.py
|
from gi.repository import Gtk, WebKit
import urlparse
import os
import tempfile
import mimetypes
import codecs
import re
from bs4 import BeautifulSoup
Gtk.init('')
class App(object):
"""App
Application class
"""
url_pattern = dict()
debug = False
def __init__(self, module_path=None):
app_dir = os.path.abspath(os.path.dirname(module_path))
window = Gtk.Window()
window.set_title('AppKit')
webkit_web_view = WebKit.WebView()
settings = webkit_web_view.get_settings()
settings.set_property('enable-universal-access-from-file-uris', True)
settings.set_property('enable-file-access-from-file-uris', True)
settings.set_property('default-encoding', 'utf-8')
window.set_default_size(800, 600)
scrollWindow = Gtk.ScrolledWindow()
scrollWindow.add(webkit_web_view)
window.add(scrollWindow)
window.connect('destroy', Gtk.main_quit)
webkit_web_view.connect(
'notify::load-status',
self.on_notify_load_status)
webkit_web_view.connect(
'resource-request-starting',
self.on_web_view_resource_request_starting)
webkit_web_view.connect(
'resource-response-received',
self.on_web_view_resource_response_received)
webkit_web_view.connect(
'resource-load-finished',
self.on_web_view_resource_load_finished)
webkit_web_view.connect(
'navigation_policy_decision_requested',
self.on_navigation_policy_decision_requested)
webkit_main_frame = webkit_web_view.get_main_frame()
webkit_main_frame.connect(
'resource-request-starting',
self.on_web_frame_resource_request_starting)
webkit_main_frame.connect(
'resource-response-received',
self.on_web_frame_resource_response_received)
webkit_main_frame.connect(
'resource-load-finished',
self.on_web_frame_resource_load_finished)
webkit_main_frame.connect(
'resource-load-failed',
self.on_web_frame_resource_load_failed)
window.show_all()
self.window = window
self.webkit_web_view = webkit_web_view
self.webkit_main_frame = webkit_main_frame
self.app_dir = app_dir
def _url_map_to_function(self, url):
match_list = list()
for pattern in self.url_pattern:
m = re.match(pattern, url)
if m:
match_list.append(m)
if len(match_list) > 1:
raise Exception('Found more than one matched urls')
return None
try:
m = match_list[0]
except:
return None
args = list(m.groups())
kw = m.groupdict()
for value in kw.values():
args.remove(value)
return self.url_pattern[m.re.pattern](*args, **kw)
def route(self, pattern=None):
def decorator(fn):
self.url_pattern[pattern] = fn
return fn
return decorator
def on_notify_load_status(self, webkitView, *args, **kwargs):
"""Callback function when the page was loaded completely
FYI, this function will be called after $(document).ready()
in jQuery
"""
status = webkitView.get_load_status()
if status == status.FINISHED:
if self.debug is True:
print 'Load finished'
def on_navigation_policy_decision_requested(
self,
webkit_web_view,
webkit_web_frame,
webkit_network_request,
webkit_web_navigation_action,
webkit_web_policy_dicision):
if self.debug is True:
print 'on_navigation_policy_decision_requested'
def on_web_view_resource_request_starting(
self,
web_view,
web_frame,
web_resource,
network_request,
network_response=None):
if self.debug is True:
print 'on_web_view_resource_request_starting'
def on_web_view_resource_response_received(
self,
web_view,
web_frame,
web_resource,
network_response,
*arg, **kw):
if self.debug is True:
print 'on_web_view_resource_response_received'
def on_web_view_resource_load_finished(
self,
web_view, web_frame, web_resource,
*args, **kw):
if self.debug is True:
print 'on_web_view_resource_load_finished'
def on_web_frame_resource_request_starting(
self,
web_frame,
web_resource,
network_request,
network_response=None):
if self.debug is True:
print 'on_web_frame_resource_request_starting'
url = urlparse.unquote(network_request.get_uri())
url = urlparse.urlparse(url.decode('utf-8'))
if url.netloc == '':
# Try mapping request path to function. `return`.
# If there's no mapped function then serve it as static file.
response = make_response(self._url_map_to_function(url.path))
if response:
(content, mimetype) = response
file_ext = mimetypes.guess_extension(mimetype)
tmp_file_path = tempfile.mkstemp(suffix=file_ext)[1]
f = codecs.open(tmp_file_path, 'w', encoding='utf-8')
f.write(content)
f.close()
network_request.set_uri('file://' + tmp_file_path + '?tmp=1')
else:
# A bit hack about request url
# Remove self.app_dir string from url
# This case happen if resource is called by static files
# in relative path format ('./<path>')
# for ex. images called by CSS.
url_path = re.sub(self.app_dir, '', url.path)
# Remove /tmp/ path from url
# This case happen with the file which was opened directly
# from controller.
splitted_path = url_path.split('/')
if splitted_path[1] == 'tmp':
splitted_path.pop(1)
url_path = os.path.join(*splitted_path)
file_path = os.path.join(self.app_dir, url_path)
file_path = os.path.normcase(file_path)
file_path = os.path.normpath(file_path)
if not(os.path.exists(file_path)):
raise Exception('Not found: ' + file_path)
network_request.set_uri('file://' + file_path)
def on_web_frame_resource_response_received(
self,
web_frame,
web_resource,
network_response,
*arg, **kw):
if self.debug is True:
print 'on_web_frame_resource_response_received'
url = urlparse.urlparse(network_response.get_uri())
url = urlparse.urlparse(url.path)
query = urlparse.parse_qs(url.query)
if 'tmp' in query:
os.remove(url.path)
def on_web_frame_resource_load_finished(
self,
web_frame,
web_resource,
*arg, **kw):
if self.debug is True:
print 'on_web_frame_resource_load_finished'
def on_web_frame_resource_load_failed(
self,
web_frame,
web_resource,
*arg, **kw):
if self.debug is True:
print 'on_web_frame_resource_load_failed'
def _init_ui(self):
"""Initial the first UI page.
- load html from '/' endpoint
- if <title> is defined, use as windows title
"""
(content, mimetype) = make_response(self._url_map_to_function('/'))
try:
beautifulsoup = BeautifulSoup(content)
self.window.set_title(beautifulsoup.find('title').string)
except:
pass
if self.debug is True:
print self.app_dir
# Use load_string instead of load_uri because it shows warning.
self.webkit_web_view.load_string(
content,
mime_type=mimetype,
encoding='utf-8',
base_uri='/',
)
def run(self):
self._init_ui()
Gtk.main()
def make_response(response):
"""Make response tuple
Potential features to be added
- Parameters validation
"""
if isinstance(response, unicode) or \
isinstance(response, str):
response = (response, 'text/html')
return response
|
AppKit
|
/AppKit-0.2.8.tar.gz/AppKit-0.2.8/appkit/api/v0_2_4/app.py
|
app.py
|
import logging
import os.path
from datetime import date
class SingleLevelFilter(logging.Filter):
def __init__(self, pass_level, reject):
self.pass_level = pass_level
self.reject = reject
def filter(self, record):
if self.reject:
return record.levelno == self.pass_level
else:
return record.levelno != self.pass_level
class AppLogger:
app_logger = ''
@staticmethod
def __init__(config):
today_date_str = str(date.today())
app_root_path = os.path.split(os.path.abspath(os.path.dirname(__file__)))[0]
logger_folder = config['log_folder']
logger_dir_path = app_root_path + '/' + logger_folder + '/' + today_date_str
# checking for logger folder
if not os.path.exists(logger_dir_path):
os.makedirs(logger_dir_path)
AppLogger.logger_dir_path = logger_dir_path
# logging format
AppLogger.logging_format = "%(asctime)s %(name)-12s %(levelname)-8s %(message)s\n\r"
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
# create console handler and set level to info
"""handler = logging.StreamHandler()
handler.setLevel(logging.INFO)
formatter = logging.Formatter("%(levelname)s - %(message)s")
handler.setFormatter(formatter)
logger.addHandler(handler)"""
AppLogger.app_logger = logger
AppLogger.register_handler()
@staticmethod
def register_handler():
AppLogger.add_debug_handler()
AppLogger.add_info_handler()
@staticmethod
def add_info_handler():
today_date_str = str(date.today())
# info logger file name
log_file_name_info = "info_"+today_date_str+".log"
# create info file handler and set level to info
info_fh = logging.FileHandler(os.path.join(AppLogger.logger_dir_path, log_file_name_info), "a")
info_fh.setLevel(logging.INFO)
formatter = logging.Formatter(AppLogger.logging_format)
info_fh.setFormatter(formatter)
# added filter for logging only INFO messages
info_filter = SingleLevelFilter(logging.INFO, True)
info_fh.addFilter(info_filter)
AppLogger.app_logger.addHandler(info_fh)
@staticmethod
def add_debug_handler():
today_date_str = str(date.today())
# error logger file name
log_file_name_debug = "all_"+today_date_str+".log"
# create info file handler and set level to info
debug_fh = logging.FileHandler(os.path.join(AppLogger.logger_dir_path, log_file_name_debug), "a")
debug_fh.setLevel(logging.DEBUG)
formatter = logging.Formatter(AppLogger.logging_format)
debug_fh.setFormatter(formatter)
# added filter for not logging INFO messages
info_filter = SingleLevelFilter(logging.INFO, False)
debug_fh.addFilter(info_filter)
AppLogger.app_logger.addHandler(debug_fh)
@staticmethod
def info(msg):
logging.info(msg)
@staticmethod
def warning(msg):
logging.warning(msg)
@staticmethod
def error(msg):
logging.error(msg)
@staticmethod
def critical(msg):
logging.critical(msg)
@staticmethod
def exception(msg):
logging.exception(msg)
@staticmethod
def debug(msg):
# if AppLogger.app_logger.isEnabledFor(logging.DEBUG):
logging.debug(msg)
@staticmethod
def start(msg):
logging.info('-------------------------------------')
logging.info(msg)
logging.info('-------------------------------------')
@staticmethod
def end(msg):
logging.info('-------------------------------------')
logging.info(msg)
logging.info('-------------------------------------')
|
AppLogger
|
/AppLogger-1.2.0.tar.gz/AppLogger-1.2.0/AppLogger.py
|
AppLogger.py
|
AppMetrics
++++++++++
.. image:: https://travis-ci.org/avalente/appmetrics.png?branch=master
:target: https://travis-ci.org/avalente/appmetrics
:alt: Build status
.. image:: https://coveralls.io/repos/avalente/appmetrics/badge.png
:target: https://coveralls.io/r/avalente/appmetrics
:alt: Code coverage
``AppMetrics`` is a python library used to collect useful run-time application's metrics, based on
`Folsom from Boundary <https://github.com/boundary/folsom>`_, which is in turn inspired by
`Metrics from Coda Hale <https://github.com/codahale/metrics>`_.
The library's purpose is to help you collect real-time metrics from your Python applications,
being them web apps, long-running batches or whatever. ``AppMetrics`` is not a persistent store,
you must provide your own persistence layer, maybe by using well established monitoring tools.
``AppMetrics`` works on python 2.7 and 3.3.
Getting started
---------------
Install ``AppMetrics`` into your python environment::
pip install appmetrics
or, if you don't use ``pip``, download and unpack the package an then::
python setup.py install
Once you have installed ``AppMetrics`` you can access it by the ``metrics`` module::
>>> from appmetrics import metrics
>>> histogram = metrics.new_histogram("test")
>>> histogram.notify(1.0)
True
>>> histogram.notify(2.0)
True
>>> histogram.notify(3.0)
True
>>> histogram.get()
{'arithmetic_mean': 2.0, 'kind': 'histogram', 'skewness': 0.0, 'harmonic_mean': 1.6363636363636365, 'min': 1.0, 'standard_deviation': 1.0, 'median': 2.0, 'histogram': [(3.0, 3), (5.0, 0)], 'percentile': [(50, 2.0), (75, 2.0), (90, 3.0), (95, 3.0), (99, 3.0), (99.9, 3.0)], 'n': 3, 'max': 3.0, 'variance': 1.0, 'geometric_mean': 1.8171205928321397, 'kurtosis': -2.3333333333333335}
Basically you create a new metric by using one of the ``metrics.new_*`` functions. The metric will be stored into
an internal registry, so you can access it in different places in your application::
>>> test_histogram = metrics.metric("test")
>>> test_histogram.notify(4.0)
True
The ``metrics`` registry is thread-safe, you can safely use it in multi-threaded web servers.
Using the ``with_histogram`` decorator we can time a function::
>>> import time, random
>>> @metrics.with_histogram("test")
... def my_worker():
... time.sleep(random.random())
...
>>> my_worker()
>>> my_worker()
>>> my_worker()
and let's see the results::
>>> metrics.get("test")
{'arithmetic_mean': 0.41326093673706055, 'kind': 'histogram', 'skewness': 0.2739718270714368, 'harmonic_mean': 0.14326954591313346, 'min': 0.0613858699798584, 'standard_deviation': 0.4319169569113129, 'median': 0.2831099033355713, 'histogram': [(1.0613858699798584, 3), (2.0613858699798584, 0)], 'percentile': [(50, 0.2831099033355713), (75, 0.2831099033355713), (90, 0.895287036895752), (95, 0.895287036895752), (99, 0.895287036895752), (99.9, 0.895287036895752)], 'n': 3, 'max': 0.895287036895752, 'variance': 0.18655225766752892, 'geometric_mean': 0.24964828731906127, 'kurtosis': -2.3333333333333335}
It is also possible to time specific sections of the code by using the ``timer`` context manager::
>>> import time, random
... def my_worker():
... with metrics.timer("test"):
... time.sleep(random.random())
...
Let's print the metrics data on the screen every 5 seconds::
>>> from appmetrics import reporter
>>> def stdout_report(metrics):
... print metrics
...
>>> reporter.register(stdout_report, reporter.fixed_interval_scheduler(5))
'5680173c-0279-46ec-bd88-b318f8058ef4'
>>> {'test': {'arithmetic_mean': 0.0, 'kind': 'histogram', 'skewness': 0.0, 'harmonic_mean': 0.0, 'min': 0, 'standard_deviation': 0.0, 'median': 0.0, 'histogram': [(0, 0)], 'percentile': [(50, 0.0), (75, 0.0), (90, 0.0), (95, 0.0), (99, 0.0), (99.9, 0.0)], 'n': 0, 'max': 0, 'variance': 0.0, 'geometric_mean': 0.0, 'kurtosis': 0.0}}
>>> my_worker()
>>> my_worker()
>>> {'test': {'arithmetic_mean': 0.5028266906738281, 'kind': 'histogram', 'skewness': 0.0, 'harmonic_mean': 0.2534044030939462, 'min': 0.14868521690368652, 'standard_deviation': 0.50083167520453, 'median': 0.5028266906738281, 'histogram': [(1.1486852169036865, 2), (2.1486852169036865, 0)], 'percentile': [(50, 0.14868521690368652), (75, 0.8569681644439697), (90, 0.8569681644439697), (95, 0.8569681644439697), (99, 0.8569681644439697), (99.9, 0.8569681644439697)], 'n': 2, 'max': 0.8569681644439697, 'variance': 0.2508323668881758, 'geometric_mean': 0.35695727672917066, 'kurtosis': -2.75}}
>>> reporter.remove('5680173c-0279-46ec-bd88-b318f8058ef4')
<Timer(Thread-1, started daemon 4555313152)>
Decorators
**********
The ``metrics`` module also provides a couple of decorators: ``with_histogram`` and ``with_meter`` which are
an easy and fast way to use ``AppMetrics``: just decorate your functions/methods and you will have metrics
collected for them. You can decorate multiple functions with the same metric's name, as long as the decorator's
type and parameters are the same, or a ``DuplicateMetricError`` will be raised.
See the documentation for `Histograms`_ and `Meters`_ for more details.
API
---
``AppMetrics`` exposes a simple and consistent API; all the metric objects have three methods:
* ``notify(value)`` - add a new value to the metric
* ``get()`` - get the computed metric's value (if any)
* ``raw_data()`` - get the raw data stored in the metrics
However, the ``notify`` input type and the ``get()`` and ``raw_data()`` data format depend on the kind
of metric chosen. Please notice that ``get()`` returns a dictionary with the mandatory
field ``kind`` which depends on the metric's type.
Metrics
-------
Several metric types are available:
Counters
********
Counter metrics provide increment and decrement capabilities for a single integer value.
The ``notify`` method accepts an integer: the counter will be incremented or decremented according
to the value's sign. Notice that the function tries to cast the input value to integer, so
a ``TypeError`` or a ``ValueError`` may be raised::
>>> counter = metrics.new_counter("test")
>>> counter.notify(10)
>>> counter.notify(-5)
>>> counter.get()
{'kind': 'counter', 'value': 5}
>>> counter.notify("wrong")
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "appmetrics/simple_metrics.py", line 40, in notify
value = int(value)
ValueError: invalid literal for int() with base 10: 'wrong'
Gauges
******
Gauges are point-in-time single value metrics. The ``notify`` method accepts any data type::
>>> gauge = metrics.new_gauge("gauge_test")
>>> gauge.notify("version 1.0")
>>> gauge.get()
{'kind': 'gauge', 'value': 'version 1.0'}
The ``gauge`` metric is useful to expose almost-static values such as configuration parameters, constants and so on.
Although you can use any python data type as the value, you won't be able to use the ``wsgi`` middleware unless
you use a valid ``json`` type.
Histograms
**********
Histograms are collections of values on which statistical analysis are performed automatically. They are useful
to know how the application is performing. The ``notify`` method accepts a single floating-point value, while
the ``get`` method computes and returns the following values:
* arithmetic mean
* geometric mean
* harmonic mean
* data distribution histogram with automatic bins
* kurtosis
* maximum value
* median
* minimum value
* number of values
* 50, 75, 90, 95, 99 and 99.9th percentiles of the data distribution
* skewness
* standard deviation
* variance
Notice that the ``notify`` method tries to cast the input value to a float, so a ``TypeError`` or a ``ValueError`` may
be raised.
You can use the histogram metric also by the ``with_histogram`` decorator: the time spent in the decorated
function will be collected by an ``histogram`` with the given name::
>>> @metrics.with_histogram("histogram_test")
... def fun(v):
... return v*2
...
>>> fun(10)
20
>>> metrics.metric("histogram_test").raw_data()
[5.9604644775390625e-06]
The full signature is::
with_histogram(name, reservoir_type, *reservoir_args, **reservoir_kwargs)
where:
* name is the metric's name
* reservoir_type is a string which identifies a ``reservoir`` class, see reservoirs documentation
* reservoir_args and reservoir_kwargs are passed to the chosen reservoir's \_\_init\_\_
Sample types
^^^^^^^^^^^^
To avoid unbound memory usage, the histogram metrics are generated from a *reservoir* of values.
Uniform reservoir
.................
The default *reservoir* type is the *uniform* one, in which a fixed number of values (default 1028)
is kept, and when the reservoir is full new values replace older ones randomly with an uniform
probability distribution, ensuring that the sample is always statistically representative.
This kind of reservoir must be used when you are interested in statistics over the whole stream of
observations. Use ``"uniform"`` as ``reservoir_type`` in ``with_histogram``.
Sliding window reservoir
........................
This *reservoir* keeps a fixed number of observations (default 1028) and when a new value comes in the first
one is discarded. The statistics are representative of the last N observations. Its ``reservoir_type``
is ``sliding_window``.
Sliding time window reservoir
.............................
This *reservoir* keeps observation for a fixed amount of time (default 60 seconds), older values get discarded.
The statistics are representative of the last N seconds, but if you have a lot of readings in N seconds this could
eat a lot amount of memory. Its ``reservoir_type`` is ``sliding_time_window``.
Exponentially-decaying reservoir
................................
This *reservoir* keeps a fixed number of values (default 1028), with
`exponential decaying <http://dimacs.rutgers.edu/~graham/pubs/papers/fwddecay.pdf>`_ of older values
in order to give greater significance to recent data. The bias towards newer values can be adjusted by
specifying a proper `alpha` value to the reservoir's init (defaults to 0.015).
Its ``reservoir_type`` is ``exp_decaying``.
Meters
******
Meters are increment-only counters that measure the rate of events (such as ``"http requests"``) over time. This kind of
metric is useful to collect throughput values (such as ``"requests per second"``), both on average and on different time
intervals::
>>> meter = metrics.new_meter("meter_test")
>>> meter.notify(1)
>>> meter.notify(1)
>>> meter.notify(3)
>>> meter.get()
{'count': 5, 'kind': 'meter', 'five': 0.0066114184713530035, 'mean': 0.27743058841197027, 'fifteen': 0.0022160607980413085, 'day': 2.3147478365093123e-05, 'one': 0.031982234148270686}
The return values of the ``get`` method are the following:
* ``count``: number of operations collected so far
* ``mean``: the average throughput since the metric creation
* ``one``: one-minute
`exponentially-weighted moving average <http://en.wikipedia.org/wiki/Moving_average#Exponential_moving_average>`_
(*EWMA*)
* ``five``: five-minutes *EWMA*
* ``fifteen``: fifteen-minutes *EWMA*
* ``day``: last day *EWMA*
* ``kind``: "meter"
Notice that the ``notify`` method tries to cast the input value to an integer, so a ``TypeError`` or a ``ValueError``
may be raised.
You can use the meter metric also by the ``with_meter`` decorator: the number of calls to the decorated
function will be collected by a ``meter`` with the given name.
Tagging
-------
You can group several metrics together by "tagging" them::
>>> metrics.new_histogram("test1")
<appmetrics.histogram.Histogram object at 0x10ac2a950>
>>> metrics.new_gauge("test2")
<appmetrics.simple_metrics.Gauge object at 0x10ac2a990>
>>> metrics.new_meter("test3")
<appmetrics.meter.Meter object at 0x10ac2a9d0>
>>> metrics.tag("test1", "group1")
>>> metrics.tag("test3", "group1")
>>> metrics.tags()
{'group1': set(['test1', 'test3'])}
>>> metrics.metrics_by_tag("group1")
{'test1': {'arithmetic_mean': 0.0, 'skewness': 0.0, 'harmonic_mean': 0.0, 'min': 0, 'standard_deviation': 0.0, 'median': 0.0, 'histogram': [(0, 0)], 'percentile': [(50, 0.0), (75, 0.0), (90, 0.0), (95, 0.0), (99, 0.0), (99.9, 0.0)], 'n': 0, 'max': 0, 'variance': 0.0, 'geometric_mean': 0.0, 'kurtosis': 0.0}, 'test3': {'count': 0, 'five': 0.0, 'mean': 0.0, 'fifteen': 0.0, 'day': 0.0, 'one': 0.0}}
>>> metrics.untag('test1', 'group1')
True
>>> metrics.untag('test1', 'group1')
False
As you can see above, four functions are available:
* ``metrics.tag(metric_name, tag_name)``: tag the metric named ``<metric_name>`` with ``<tag_name>``.
Raise ``InvalidMetricError`` if ``<metric_name>`` does not exist.
* ``metrics.tags()``: return the currently defined tags.
* ``metrics.metrics_by_tag(tag_name)``: return a dictionary with metric names as keys
and metric values as returned by ``<metric_object>.get()``. Return an empty dictionary if ``tag_name`` does
not exist.
* ``metrics.untag(metric_name, tag_name)``: remove the tag named ``<metric_name>`` from the metric named
``<metric_name>``. Return True if the tag was removed, False if either the metric or the tag did not exist. When a
tag is no longer used, it gets implicitly removed.
External access
---------------
You can access the metrics provided by ``AppMetrics`` externally by the ``WSGI``
middleware found in ``appmetrics.wsgi.AppMetricsMiddleware``. It is a standard ``WSGI``
middleware with only ``werkzeug`` as external dependency and it can be plugged in any framework supporting
the ``WSGI`` standard, for example in a ``Flask`` application::
from flask import Flask
from appmetrics import metrics
metrics.new_histogram("test-histogram")
metrics.new_gauge("test-counter")
metrics.metric("test-counter").notify(10)
app = Flask(__name__)
@app.route('/hello')
def hello_world():
return 'Hello World!'
if __name__ == '__main__':
from appmetrics.wsgi import AppMetricsMiddleware
app.wsgi_app = AppMetricsMiddleware(app.wsgi_app)
app.run()
If you launch the above application you can ask for metrics::
$ curl http://localhost:5000/hello
Hello World!
$ curl http://localhost:5000/_app-metrics
["test-counter", "test-histogram"]
$ curl http://localhost:5000/_app-metrics/test-counter
10
In this way you can easily expose your application's metrics to an external monitoring service.
Moreover, since the ``AppMetricsMiddleware`` exposes a full *RESTful API*, you can create metrics
from anywhere and also populate them with foreign application's data.
Usage
*****
As usual, instantiate the middleware with the wrapped ``WSGI`` application; it looks for
request paths starting with ``"/_app-metrics"``: if not found, the wrapped application
is called. The following resources are defined:
``/_app-metrics/metrics``
- **GET**: return the list of the registered metrics
``/_app-metrics/metrics/<name>``
- **GET**: return the value of the given metric or ``404``.
- **PUT**: create a new metric with the given name. The body must be a ``JSON`` object with a
mandatory attribute named ``"type"`` which must be one of the metrics types allowed,
by the ``"metrics.METRIC_TYPES"`` dictionary, while the other attributes are
passed to the ``new_<type>`` function as keyword arguments.
Request's ``content-type`` must be ``"application/json"``.
- **POST**: add a new value to the metric. The body must be a ``JSON`` object with a mandatory
attribute named ``"value"``: the notify method will be called with the given value.
Other attributes are ignored.
Request's ``content-type`` must be ``"application/json"``.
- **DELETE**: remove the metric with the given name. Return "deleted" or "not deleted".
``/_app-metrics/tags``
- **GET**: return the list of registered tags
``/_app-metrics/tags/<name>``
- **GET**: return the metrics tagged with the given tag. If the value of the ``GET`` parameter ``"expand"``
is ``"true"``, a JSON object is returned, with the name of each tagged metric as keys and corresponding values.
If it is ``"false"`` or not provided, the list of metric names is returned.
Return a ``404`` if the tag does not exist
``/_app-metrics/tags/<tag_name>/<metric_name>``
- **PUT**: tag the metric named ``<metric_name>`` with ``<tag_name>``. Return a ``400`` if the given metric
does not exist.
- **DELETE**: remove the tag ``<tag_name>`` from ``<metric_name>``. Return "deleted" or "not deleted". If
``<tag_name>`` is no longer used, it gets implicitly removed.
The response body is always encoded in JSON, and the ``Content-Type`` is ``application/json``.
The root doesn't have to be ``"/_app-metrics"``, you can customize it by providing your own to
the middleware constructor.
A standalone ``AppMetrics`` webapp can be started by using ``werkzeug``'s development server::
$ python -m werkzeug.serving appmetrics.wsgi.standalone_app
* Running on http://127.0.0.1:5000/
The standalone app mounts on the root (no ``_app-metrics`` prefix). DON'T use it for production purposes!!!
Reporting
---------
``AppMetrics`` provides another easy way to get your application's metrics: the ``reporter`` module. It allows
to register any number of callbacks that will be called at scheduled times with the metrics, allowing you
to "export" your application's metrics into your favourite storage system.
The main entry point for the ``reporter`` feature is ``reporter.register``::
reporter.register(callback, schedule, tag=None)
where:
* *callback* must be a callback function that will be called with a dictionary of ``{metric name: metric values}``
* *schedule* must be an iterable object yielding a future timestamp (in ``time.time()`` format) at each iteration
* *tag* must be a tag to narrow the involved metrics to the ones with that tag, if ``None`` all the
available metrics will be used.
When a callback is registered, a new thread will be started, waiting for the next scheduled call. Please notice
that the callback will be executed in a thread. ``register`` returns an opaque id identifying the registration.
A callback registration can be removed by calling ``reporter.remove`` with the id returned by ``register``.
``reporter`` provides a simple scheduler object, ``fixed_interval_scheduler``::
>>> sched = reporter.fixed_interval_scheduler(10)
>>> next(sched)
1397297405.672592
>>> next(sched)
1397297415.672592
>>> next(sched)
1397297425.672592
CSV reporter
************
A simple reporter callback is exposed by ``reporter.CSVReporter``. As the name suggests, it will create
csv reports with metric values, a file for each metric, a row for each call. See ``examples/csv_reporter.py``
Testing
-------
``AppMetrics`` has an exhaustive, fully covering test suite, made up by both doctests and unit tests. To run the
whole test suite (including the coverage test), just issue::
$ nosetests --with-coverage --cover-package=appmetrics --cover-erase
You will need to install a couple of packages in your python environment, the list is in the
``"requirements.txt"`` file.
|
AppMetrics
|
/AppMetrics-0.5.0.tar.gz/AppMetrics-0.5.0/README.rst
|
README.rst
|
import collections
import random
import threading
import abc
import time
import operator
import math
from . import statistics, exceptions, py3comp
DEFAULT_UNIFORM_RESERVOIR_SIZE = 1028
DEFAULT_TIME_WINDOW_SIZE = 60
DEFAULT_EXPONENTIAL_DECAY_FACTOR = 0.015
def search_greater(values, target):
"""
Return the first index for which target is greater or equal to the first
item of the tuple found in values
"""
first = 0
last = len(values)
while first < last:
middle = (first + last) // 2
if values[middle][0] < target:
first = middle + 1
else:
last = middle
return first
class ReservoirBase(object):
__metaclass__ = abc.ABCMeta
"""
Base class for reservoirs. Subclass and override _do_add, _get_values and
_same_parameters
"""
def add(self, value):
"""
Add a value to the reservoir
The value will be casted to a floating-point, so a TypeError or a
ValueError may be raised.
"""
if not isinstance(value, float):
value = float(value)
return self._do_add(value)
@property
def values(self):
"""
Return the stored values
"""
return self._get_values()
@property
def sorted_values(self):
"""
Sort and return the current sample values
"""
return sorted(self.values)
def same_kind(self, other):
"""
Return True if "other" is an object of the same type and it was
instantiated with the same parameters
"""
return type(self) is type(other) and self._same_parameters(other)
@abc.abstractmethod
def _do_add(self, value):
"""
Add the floating-point value to the reservoir. Override in subclasses
"""
@abc.abstractmethod
def _get_values(self):
"""
Get the current reservoir's content. Override in subclasses
"""
@abc.abstractmethod
def _same_parameters(self, other):
"""
Return True if this object has been instantiated with the same
parameters as "other".
Override in subclasses
"""
class UniformReservoir(ReservoirBase):
"""
A random sampling reservoir of floating-point values. Uses Vitter's
Algorithm R to produce a statistically representative sample
(http://www.cs.umd.edu/~samir/498/vitter.pdf)
"""
def __init__(self, size=DEFAULT_UNIFORM_RESERVOIR_SIZE):
self.size = size
self._values = [0] * size
self.count = 0
self.lock = threading.Lock()
def _do_add(self, value):
changed = False
with self.lock:
if self.count < self.size:
self._values[self.count] = value
changed = True
else:
# not randint() because it yields different values on
# python 3, it would be a nightmare to test.
k = int(random.uniform(0, self.count))
if k < self.size:
self._values[k] = value
changed = True
self.count += 1
return changed
def _get_values(self):
return self._values[:min(self.count, self.size)]
def _same_parameters(self, other):
return self.size == other.size
def __repr__(self):
return "{}({})".format(type(self).__name__, self.size)
class SlidingWindowReservoir(ReservoirBase):
"""
A simple sliding-window reservoir that keeps the last N values
"""
def __init__(self, size=DEFAULT_UNIFORM_RESERVOIR_SIZE):
self.size = size
self.deque = collections.deque(maxlen=self.size)
def _do_add(self, value):
# No need for explicit lock - deques should be thread-safe:
# http://docs.python.org/2/library/collections.html#collections.deque
self.deque.append(value)
def _get_values(self):
return list(self.deque)
def _same_parameters(self, other):
return self.size == other.size
def __repr__(self):
return "{}({})".format(type(self).__name__, self.size)
class SlidingTimeWindowReservoir(ReservoirBase):
"""
A time-sliced reservoir that keeps the values added in the last N seconds
"""
def __init__(self, window_size=DEFAULT_TIME_WINDOW_SIZE):
"""
Build a new sliding time-window reservoir
window_size is the time window size in seconds
"""
self.window_size = window_size
self.lock = threading.Lock()
self.key = operator.itemgetter(0)
self._values = []
def _do_add(self, value):
now = time.time()
with self.lock:
self.tick(now)
self._values.append((now, value))
def tick(self, now):
target = now - self.window_size
# the values are sorted by the first element (timestamp), so let's
# perform a dichotomic search
idx = search_greater(self._values, target)
# older values found, discard them
if idx:
self._values = self._values[idx:]
def _get_values(self):
now = time.time()
with self.lock:
self.tick(now)
return [y for x, y in self._values]
def _same_parameters(self, other):
return self.window_size == other.window_size
def __repr__(self):
return "{}({})".format(type(self).__name__, self.window_size)
class ExponentialDecayingReservoir(ReservoirBase):
"""
An exponential-weighted reservoir which exponentially decays older values
in order to give greater significance to newer ones.
See http://dimacs.rutgers.edu/~graham/pubs/papers/fwddecay.pdf
"""
# TODO: replace the sort()s with a proper data structure (btree/skiplist).
# However, since the list is keep sorted (and it should be very small),
# the sort() shouldn't dramatically slow down the insertions, also
# considering that the search can be log(n) in that way
RESCALE_THRESHOLD = 3600
EPSILON = 1e-12
def __init__(self, size=DEFAULT_UNIFORM_RESERVOIR_SIZE,
alpha=DEFAULT_EXPONENTIAL_DECAY_FACTOR):
self.size = size
self.alpha = alpha
self.start_time = time.time()
self.lock = threading.Lock()
self.count = 0
self.next_scale_time = self.start_time + self.RESCALE_THRESHOLD
self.key = operator.itemgetter(0)
self._values = []
def _lookup(self, timestamp):
"""
Return the index of the value associated with "timestamp" if any, else
None. Since the timestamps are floating-point values, they are
considered equal if their absolute difference is smaller than
self.EPSILON
"""
idx = search_greater(self._values, timestamp)
if (idx < len(self._values)
and math.fabs(self._values[idx][0] - timestamp) < self.EPSILON):
return idx
return None
def _put(self, timestamp, value):
"""Replace the value associated with "timestamp" or add the new value"""
idx = self._lookup(timestamp)
if idx is not None:
self._values[idx] = (timestamp, value)
else:
self._values.append((timestamp, value))
def _do_add(self, value):
now = time.time()
self.rescale(now)
rnd = random.random()
weighted_time = self.weight(now - self.start_time) / rnd
changed = False
with self.lock:
if self.count < self.size:
self._put(weighted_time, value)
self._values.sort(key=self.key)
changed = True
else:
first = self._values[0][0]
if first < weighted_time:
idx = self._lookup(weighted_time)
if idx is None:
self._values[0] = (weighted_time, value)
self._values.sort(key=self.key)
changed = True
self.count += 1
return changed
def weight(self, t):
return math.exp(self.alpha * t)
def rescale(self, now):
with self.lock:
if now > self.next_scale_time:
original_values = self._values[:]
self._values = []
for i, (k, v) in enumerate(original_values):
k *= math.exp(-self.alpha * (now - self.start_time))
self._put(k, v)
self.count = len(self._values)
self.start_time = now
self.next_scale_time = self.start_time + self.RESCALE_THRESHOLD
def _get_values(self):
return [y for x, y in self._values[:max(self.count, self.size)]]
def _same_parameters(self, other):
return self.size == other.size and self.alpha == other.alpha
def __repr__(self):
return "{}({}, {})".format(type(self).__name__, self.size, self.alpha)
class Histogram(object):
"""A metric which calculates some statistics over the distribution of some
values"""
def __init__(self, reservoir):
self.reservoir = reservoir
def notify(self, value):
"""Add a new value to the metric"""
return self.reservoir.add(value)
def raw_data(self):
"""Return the raw underlying data"""
return self.reservoir.values
def get(self):
"""Return the computed statistics over the gathered data"""
values = self.reservoir.sorted_values
def safe(f, *args):
try:
return f(values, *args)
except exceptions.StatisticsError:
return 0.0
plevels = [50, 75, 90, 95, 99, 99.9]
percentiles = [safe(statistics.percentile, p) for p in plevels]
try:
histogram = statistics.get_histogram(values)
except exceptions.StatisticsError:
histogram = [(0, 0)]
res = dict(
kind="histogram",
min=values[0] if values else 0,
max=values[-1] if values else 0,
arithmetic_mean=safe(statistics.mean),
geometric_mean=safe(statistics.geometric_mean),
harmonic_mean=safe(statistics.harmonic_mean),
median=safe(statistics.median),
variance=safe(statistics.variance),
standard_deviation=safe(statistics.stdev),
skewness=safe(statistics.skewness),
kurtosis=safe(statistics.kurtosis),
percentile=py3comp.zip(plevels, percentiles),
histogram=histogram,
n=len(values))
return res
|
AppMetrics
|
/AppMetrics-0.5.0.tar.gz/AppMetrics-0.5.0/appmetrics/histogram.py
|
histogram.py
|
import logging, logging.config
import json
import werkzeug
from . import metrics, exceptions, py3comp
log = logging.getLogger("appmetrics.wsgi")
class AppMetricsMiddleware(object):
"""
WSGI middleware for AppMetrics
Usage:
Instantiate me with the wrapped WSGI application. This middleware looks for request paths starting
with "/_app-metrics": if not found, the wrapped application is called. The following resources are defined:
``/_app-metrics/metrics``
- **GET**: return the list of the registered metrics
``/_app-metrics/metrics/<name>``
- **GET**: return the value of the given metric or ``404``.
- **PUT**: create a new metric with the given name. The body must be a ``JSON`` object with a
mandatory attribute named ``"type"`` which must be one of the metrics types allowed,
by the ``"metrics.METRIC_TYPES"`` dictionary, while the other attributes are
passed to the ``new_<type>`` function as keyword arguments.
Request's ``content-type`` must be ``"application/json"``.
- **POST**: add a new value to the metric. The body must be a ``JSON`` object with a mandatory
attribute named ``"value"``: the notify method will be called with the given value.
Other attributes are ignored.
Request's ``content-type`` must be ``"application/json"``.
- **DELETE**: remove the metric with the given name. Return "deleted" or "not deleted".
``/_app-metrics/tags``
- **GET**: return the list of registered tags
``/_app-metrics/tags/<name>``
- **GET**: return the metrics tagged with the given tag. If the value of the ``GET`` parameter ``"expand"``
is ``"true"``, a JSON object is returned, with the name of each tagged metric as keys and corresponding values.
If it is ``"false"`` or not provided, the list of metric names is returned.
Return a ``404`` if the tag does not exist
``/_app-metrics/tags/<tag_name>/<metric_name>``
- **PUT**: tag the metric named ``<metric_name>`` with ``<tag_name>``. Return a ``400`` if the given metric
does not exist.
- **DELETE**: remove the tag ``<tag_name>`` from ``<metric_name>``. Return "deleted" or "not deleted". If
``<tag_name>`` is no longer used, it gets implicitly removed.
The root can be different from "/_app-metrics", you can set it on middleware constructor.
"""
def __init__(self, app, root="_app-metrics", extra_headers=None, mimetype="application/json"):
"""
parameters:
- app: wrapped WSGI application
- root: path root to look for
- extra_headers: extra headers that will be appended to the return headers
"""
self.app = app
self.root = "/" + root.strip("/").strip()
self.extra_headers = extra_headers or {}
self.mimetype = mimetype
self.url_map = werkzeug.routing.Map([
werkzeug.routing.Submount(self.root, [
werkzeug.routing.Rule("/metrics", endpoint=handle_metrics_list, methods=['GET']),
werkzeug.routing.Rule("/metrics/<name>", endpoint=handle_metric_show, methods=['GET']),
werkzeug.routing.Rule("/metrics/<name>", endpoint=handle_metric_new, methods=['PUT']),
werkzeug.routing.Rule("/metrics/<name>", endpoint=handle_metric_update, methods=['POST']),
werkzeug.routing.Rule("/metrics/<name>", endpoint=handle_metric_delete, methods=['DELETE']),
werkzeug.routing.Rule("/tags", endpoint=handle_tags_list, methods=['GET']),
werkzeug.routing.Rule("/tags/<tag_name>", endpoint=handle_tag_show, methods=['GET']),
werkzeug.routing.Rule("/tags/<tag_name>/<metric_name>", endpoint=handle_tag_add, methods=['PUT']),
werkzeug.routing.Rule("/tags/<tag_name>/<metric_name>", endpoint=handle_untag, methods=['DELETE']),
])
])
def get_response(self, body, code, headers=None):
if headers is None:
headers = []
headers = dict(headers).copy()
headers.update(self.extra_headers)
return werkzeug.wrappers.Response(body, code, headers.items(), self.mimetype)
def jsonize_error(self, exception, environ):
return self.get_response(json.dumps(exception.description), exception.code, exception.get_headers(environ))
def __call__(self, environ, start_response):
"""WSGI application interface"""
urls = self.url_map.bind_to_environ(environ)
try:
endpoint, args = urls.match()
except werkzeug.exceptions.NotFound:
# the request did not match, go on with wsgi stack
return self.app(environ, start_response)
except werkzeug.exceptions.HTTPException as e:
response = self.jsonize_error(e, environ)
else:
request = werkzeug.wrappers.Request(environ, populate_request=False)
try:
body = endpoint(request, **args)
response = self.get_response(body, 200)
except werkzeug.exceptions.HTTPException as e:
response = self.jsonize_error(e, environ)
except Exception as e:
log.debug("Unhandled exception: %s", e, exc_info=True)
response = self.get_response("Internal Server Error", 500)
return response(environ, start_response)
def get_body(request):
# get content type
ctype = request.mimetype
if not ctype or ctype != "application/json":
raise werkzeug.exceptions.UnsupportedMediaType()
# get content data
try:
return py3comp.json_load(request.stream, request.charset)
except ValueError as e:
log.debug("Invalid body: %s", e)
raise werkzeug.exceptions.BadRequest(description="invalid json")
def handle_metrics_list(request):
return json.dumps(metrics.metrics())
def handle_metric_show(request, name):
try:
metric = metrics.metric(name)
except KeyError:
raise werkzeug.exceptions.NotFound(("No such metric: {!r}".format(name)))
return json.dumps(metric.get())
def handle_metric_delete(request, name):
res = metrics.delete_metric(name)
return "deleted" if res else "not deleted"
def handle_metric_new(request, name):
data = get_body(request)
type_ = data.pop('type', None)
if not type_:
raise werkzeug.exceptions.BadRequest(description="metric type not provided")
metric_type = metrics.METRIC_TYPES.get(type_)
if not metric_type:
raise werkzeug.exceptions.BadRequest("invalid metric type: {!r}".format(type_))
try:
metric_type(name, **data)
except exceptions.AppMetricsError as e:
raise werkzeug.exceptions.BadRequest("can't create metric {}({!r}): {}".format(type_, name, e))
except Exception as e:
log.debug(str(e), exc_info=True)
raise werkzeug.exceptions.BadRequest("can't create metric {}({!r})".format(type_, name))
return ""
def handle_metric_update(request, name):
data = get_body(request)
value = data.pop('value', None)
if value is None:
raise werkzeug.exceptions.BadRequest("metric value not provided")
try:
metric = metrics.metric(name)
except KeyError:
raise werkzeug.exceptions.NotFound()
metric.notify(value)
return ""
def handle_tags_list(request):
return json.dumps(sorted(metrics.tags().keys()))
def handle_tag_show(request, tag_name):
all_tags = metrics.tags()
if tag_name not in all_tags:
raise werkzeug.exceptions.NotFound(description="no such tag: {!r}".format(tag_name))
if request.args.get('expand', 'false') == 'true':
return json.dumps(metrics.metrics_by_tag(tag_name))
else:
return json.dumps(sorted(all_tags[tag_name]))
def handle_tag_add(request, tag_name, metric_name):
try:
metrics.tag(metric_name, tag_name)
except metrics.InvalidMetricError as e:
raise werkzeug.exceptions.BadRequest(description=str(e))
return ""
def handle_untag(request, tag_name, metric_name):
res = metrics.untag(metric_name, tag_name)
return "deleted" if res else "not deleted"
# useful to run standalone with werkzeug's server:
# $ python -m werkzeug.serving appmetrics.wsgi.standalone_app
# * Running on http://127.0.0.1:5000/
standalone_app = AppMetricsMiddleware(werkzeug.exceptions.NotFound(), "")
|
AppMetrics
|
/AppMetrics-0.5.0.tar.gz/AppMetrics-0.5.0/appmetrics/wsgi.py
|
wsgi.py
|
from contextlib import contextmanager
import functools
import threading
import time
from .exceptions import DuplicateMetricError, InvalidMetricError
from . import histogram, simple_metrics, meter, py3comp
REGISTRY = {}
TAGS = {}
LOCK = threading.Lock()
def new_metric(name, class_, *args, **kwargs):
"""Create a new metric of the given class.
Raise DuplicateMetricError if the given name has been already registered before
Internal function - use "new_<type> instead"
"""
with LOCK:
try:
item = REGISTRY[name]
except KeyError:
item = REGISTRY[name] = class_(*args, **kwargs)
return item
raise DuplicateMetricError("Metric {} already exists of type {}".format(name, type(item).__name__))
def delete_metric(name):
"""Remove the named metric"""
with LOCK:
old_metric = REGISTRY.pop(name, None)
# look for the metric name in the tags and remove it
for _, tags in py3comp.iteritems(TAGS):
if name in tags:
tags.remove(name)
return old_metric
def metric(name):
"""
Return the metric with the given name, if any
Raise InvalidMetricError if the given name has not been registered
"""
try:
return REGISTRY[name]
except KeyError as e:
raise InvalidMetricError("Metric {} not found!".format(e))
def metrics():
"""
Return the list of the returned metrics' names
"""
return sorted(REGISTRY.keys())
def get(name):
"""
Call "get" on the metric with the given name
Raise InvalidMetricError if the given name has not been registered
"""
return metric(name).get()
def notify(name, value):
"""
Call "notify" on the metric with the given name
Raise InvalidMetricError if the given name has not been registered
"""
return metric(name).notify(value)
def new_histogram(name, reservoir=None):
"""
Build a new histogram metric with a given reservoir object
If the reservoir is not provided, a uniform reservoir with the default size is used
"""
if reservoir is None:
reservoir = histogram.UniformReservoir(histogram.DEFAULT_UNIFORM_RESERVOIR_SIZE)
return new_metric(name, histogram.Histogram, reservoir)
def new_counter(name):
"""
Build a new "counter" metric
"""
return new_metric(name, simple_metrics.Counter)
def new_gauge(name):
"""
Build a new "gauge" metric
"""
return new_metric(name, simple_metrics.Gauge)
def new_meter(name, tick_interval=5):
"""
Build a new "meter" metric
"""
return new_metric(name, meter.Meter, tick_interval)
def new_histogram_with_implicit_reservoir(name, reservoir_type='uniform', *reservoir_args, **reservoir_kwargs):
"""
Build a new histogram metric and a reservoir from the given parameters
"""
reservoir = new_reservoir(reservoir_type, *reservoir_args, **reservoir_kwargs)
return new_histogram(name, reservoir)
def new_reservoir(reservoir_type='uniform', *reservoir_args, **reservoir_kwargs):
"""
Build a new reservoir
"""
try:
reservoir_cls = RESERVOIR_TYPES[reservoir_type]
except KeyError:
raise InvalidMetricError("Unknown reservoir type: {}".format(reservoir_type))
return reservoir_cls(*reservoir_args, **reservoir_kwargs)
def get_or_create_histogram(name, reservoir_type, *reservoir_args, **reservoir_kwargs):
"""
Will return a histogram matching the given parameters or raise
DuplicateMetricError if it can't be created due to a name collision
with another histogram with different parameters.
"""
reservoir = new_reservoir(reservoir_type, *reservoir_args, **reservoir_kwargs)
try:
hmetric = new_histogram(name, reservoir)
except DuplicateMetricError:
hmetric = metric(name)
if not isinstance(hmetric, histogram.Histogram):
raise DuplicateMetricError(
"Metric {!r} already exists of type {!r}".format(name, type(hmetric).__name__))
if not hmetric.reservoir.same_kind(reservoir):
raise DuplicateMetricError(
"Metric {!r} already exists with a different reservoir: {}".format(name, hmetric.reservoir))
return hmetric
def with_histogram(name, reservoir_type="uniform", *reservoir_args, **reservoir_kwargs):
"""
Time-measuring decorator: the time spent in the wrapped function is measured
and added to the named metric.
metric_args and metric_kwargs are passed to new_histogram()
"""
hmetric = get_or_create_histogram(name, reservoir_type, *reservoir_args, **reservoir_kwargs)
def wrapper(f):
@functools.wraps(f)
def fun(*args, **kwargs):
t1 = time.time()
res = f(*args, **kwargs)
t2 = time.time()
hmetric.notify(t2-t1)
return res
return fun
return wrapper
def with_meter(name, tick_interval=meter.DEFAULT_TICK_INTERVAL):
"""
Call-counting decorator: each time the wrapped function is called
the named meter is incremented by one.
metric_args and metric_kwargs are passed to new_meter()
"""
try:
mmetric = new_meter(name, tick_interval)
except DuplicateMetricError as e:
mmetric = metric(name)
if not isinstance(mmetric, meter.Meter):
raise DuplicateMetricError("Metric {!r} already exists of type {}".format(name, type(mmetric).__name__))
if tick_interval != mmetric.tick_interval:
raise DuplicateMetricError("Metric {!r} already exists: {}".format(name, mmetric))
def wrapper(f):
@functools.wraps(f)
def fun(*args, **kwargs):
res = f(*args, **kwargs)
mmetric.notify(1)
return res
return fun
return wrapper
@contextmanager
def timer(name, reservoir_type="uniform", *reservoir_args, **reservoir_kwargs):
"""
Time-measuring context manager: the time spent in the wrapped block
if measured and added to the named metric.
"""
hmetric = get_or_create_histogram(name, reservoir_type, *reservoir_args, **reservoir_kwargs)
t1 = time.time()
yield
t2 = time.time()
hmetric.notify(t2 - t1)
def tag(name, tag_name):
"""
Tag the named metric with the given tag.
"""
with LOCK:
# just to check if <name> exists
metric(name)
TAGS.setdefault(tag_name, set()).add(name)
def tags():
"""
Return the currently defined tags.
"""
# protect global value against accidental modifications
return TAGS.copy()
def metrics_by_tag(tag_name):
"""
Return a dictionary with {metric name: metric values} for all the metrics with the given tag.
Return an empty dictionary if the given tag does not exist.
"""
try:
names = TAGS[tag_name]
except KeyError:
return {}
return metrics_by_name_list(names)
def untag(name, tag_name):
"""
Remove the given tag from the given metric.
Return True if the metric was tagged, False otherwise
"""
with LOCK:
by_tag = TAGS.get(tag_name, None)
if not by_tag:
return False
try:
by_tag.remove(name)
# remove the tag if no associations left
if not by_tag:
TAGS.pop(tag_name)
return True
except KeyError:
return False
def metrics_by_name_list(names):
"""
Return a dictionary with {metric name: metric value} for all the metrics with the given names.
"""
results = {}
for name in names:
# no lock - a metric could have been removed in the meanwhile
try:
results[name] = get(name)
except InvalidMetricError:
continue
return results
RESERVOIR_TYPES = {
'uniform': histogram.UniformReservoir,
'sliding_window': histogram.SlidingWindowReservoir,
'sliding_time_window': histogram.SlidingTimeWindowReservoir,
'exp_decaying': histogram.ExponentialDecayingReservoir,
}
METRIC_TYPES = {
'histogram': new_histogram_with_implicit_reservoir,
'gauge': new_gauge,
'counter': new_counter,
'meter': new_meter,
}
|
AppMetrics
|
/AppMetrics-0.5.0.tar.gz/AppMetrics-0.5.0/appmetrics/metrics.py
|
metrics.py
|
import os
import csv
import logging
import uuid
import threading
import time
import atexit
from . import metrics, py3comp
log = logging.getLogger('appmetrics.reporter')
REGISTRY = {}
LOCK = threading.Lock()
def register(callback, schedule, tag=None):
"""
Register a callback which will be called at scheduled intervals with
the metrics that have the given tag (or all the metrics if None).
Return an identifier which can be used to access the registered callback later.
"""
try:
iter(schedule)
except TypeError:
raise TypeError("{} is not iterable".format(schedule))
if not callable(callback):
raise TypeError("{} is not callable".format(callback))
thread = Timer(schedule, callback, tag)
id_ = str(uuid.uuid4())
with LOCK:
REGISTRY[id_] = thread
thread.start()
return id_
def get(id_):
"""
Return the registered callback with the given name, or None if it does not exist
"""
return REGISTRY.get(id_)
def remove(id_):
"""
Remove the callback and its schedule
"""
with LOCK:
thread = REGISTRY.pop(id_, None)
if thread is not None:
thread.cancel()
return thread
class Timer(threading.Thread):
"""
Encapsulate a callback and its parameters
"""
def __init__(self, schedule, callback, tag=None):
"""
"schedule" must be an iterator yielding the next "tick" at each iteration
"""
super(Timer, self).__init__()
self.schedule = schedule
self.callback = callback
self.tag = tag
self._event = threading.Event()
self.daemon = True
def run(self):
while not self._event.is_set():
now = time.time()
# skip already passed ticks
for next_time in self.schedule:
if next_time > now:
break
# the iterator was consumed, exit
else:
break
# sleep for the remaining time
time.sleep(next_time - now)
# the event may have been set while sleeping
if not self._event.is_set():
data = get_metrics(self.tag)
if not data:
log.debug("No metrics found for tag: {}".format(self.tag))
else:
# call the function, finally
self.callback(data)
# set the event, for consistency
self.cancel()
def cancel(self):
"""
Cancel the timer, no more actions will be performed
"""
self._event.set()
@property
def is_running(self):
return not self._event.is_set()
def get_metrics(tag):
"""
Return the values for the metrics with the given tag or all the available metrics if None
"""
if tag is None:
return metrics.metrics_by_name_list(metrics.metrics())
else:
return metrics.metrics_by_tag(tag)
def fixed_interval_scheduler(interval):
"""
A scheduler that ticks at fixed intervals of "interval" seconds
"""
start = time.time()
next_tick = start
while True:
next_tick += interval
yield next_tick
class CSVReporter(object):
histogram_header = (
'time', 'n', 'min', 'max', 'arithmetic_mean', 'median', 'harmonic_mean', 'geometric_mean',
'standard_deviation', 'variance', 'percentile_50', 'percentile_75', 'percentile_90',
'percentile_95', 'percentile_99', 'percentile_99.9', 'kurtosis', 'skewness')
meter_header = ('time', 'count', 'mean', 'one', 'five', 'fifteen', 'day')
def __init__(self, directory):
self.directory = directory
def file_name(self, name, kind):
file_name = os.path.join(self.directory, "{}_{}.csv".format(name, kind))
if not os.path.exists(file_name):
new = True
else:
new = False
return file_name, new
def dump_histogram(self, name, obj):
# histogram doesn't fit into a tabular format
obj.pop('histogram')
# we already know its kind
kind = obj.pop('kind')
# flatten percentiles
percentiles = obj.pop('percentile')
for k, v in percentiles:
obj['percentile_{}'.format(k)] = v
# add the current time
obj['time'] = time.time()
file_name, new = self.file_name(name, kind)
with open(file_name, "a" if py3comp.PY3 else "ab") as of:
writer = csv.DictWriter(of, self.histogram_header)
# if the file is new, write the header once
if new:
writer.writerow(dict(zip(self.histogram_header, self.histogram_header)))
writer.writerow(obj)
return file_name
def dump_meter(self, name, obj):
# we already know its kind
kind = obj.pop('kind')
# add the current time
obj['time'] = time.time()
file_name, new = self.file_name(name, kind)
with open(file_name, "a" if py3comp.PY3 else "ab") as of:
writer = csv.DictWriter(of, self.meter_header)
# if the file is new, write the header once
if new:
writer.writerow(dict(zip(self.meter_header, self.meter_header)))
writer.writerow(obj)
return file_name
def __call__(self, objects):
for name, obj in py3comp.iteritems(objects):
fun = getattr(self, "dump_%s" % obj.get('kind', "unknown"), None)
if fun:
# protect the original object
fun(name, obj.copy())
@atexit.register
def cleanup():
for v in REGISTRY.values():
v.cancel()
|
AppMetrics
|
/AppMetrics-0.5.0.tar.gz/AppMetrics-0.5.0/appmetrics/reporter.py
|
reporter.py
|
import math
import time
import threading
DEFAULT_TICK_INTERVAL = 5
class EWMA(object):
"""
Compute exponential-weighted moving average of values incoming at a fixed rate.
http://en.wikipedia.org/wiki/Moving_average#Exponential_moving_average
"""
def __init__(self, time_period, tick_interval):
"""
time_unit is the period of time on which the moving average is computed,
expressed in minutes
"""
self.time_period = time_period
self.tick_interval = tick_interval
self.rate = 0.0
self.value = 0
self.initialized = False
self.alpha = self.compute_alpha(time_period, tick_interval)
self.lock = threading.Lock()
@staticmethod
def compute_alpha(period, interval):
"""Compute exponential smoothing factor"""
return 1 - math.exp(-interval / (60.0 * period))
def update(self, value):
"""
Update the current rate with the given value.
The value must be an integer.
"""
value = int(value)
with self.lock:
self.value += value
def tick(self):
"""Decay the current rate according to the elapsed time"""
instant_rate = float(self.value) / float(self.tick_interval)
with self.lock:
if self.initialized:
self.rate += (self.alpha * (instant_rate - self.rate))
else:
self.initialized = True
self.rate = instant_rate
self.value = 0
class Meter(object):
"""
A meter metric which measures mean throughput and one, five, and fifteen-minute
exponentially-weighted moving average throughputs.
This is very similar to the unix "load average" metric. All the throughput
values are expressed in number of operation per second.
"""
def __init__(self, tick_interval=DEFAULT_TICK_INTERVAL):
self.tick_interval = tick_interval
# one minute
self.m1 = EWMA(1, tick_interval)
# five minutes
self.m5 = EWMA(5, tick_interval)
# fifteen minutes
self.m15 = EWMA(15, tick_interval)
# one day
self.day = EWMA(60 * 24, tick_interval)
self.started_on = self.latest_tick = time.time()
self.count = 0
self.lock = threading.Lock()
def notify(self, value):
"""Add a new observation to the metric"""
with self.lock:
#TODO: this could slow down slow-rate incoming updates
# since the number of ticks depends on the actual time
# passed since the latest notification. Consider using
# a real timer to tick the EWMA.
self.tick()
for avg in (self.m1, self.m5, self.m15, self.day):
avg.update(value)
self.count += value
def tick_all(self, times):
"""
Tick all the EWMAs for the given number of times
"""
for i in range(times):
for avg in (self.m1, self.m5, self.m15, self.day):
avg.tick()
def tick(self):
"""
Emulate a timer: in order to avoid a real timer we "tick" a number
of times depending on the actual time passed since the last tick
"""
now = time.time()
elapsed = now - self.latest_tick
if elapsed > self.tick_interval:
ticks = int(elapsed / self.tick_interval)
self.tick_all(ticks)
self.latest_tick = now
def raw_data(self):
"""Return the raw underlying data"""
return self.count
def get(self):
"""
Return the computed statistics over the gathered data
"""
with self.lock:
self.tick()
data = dict(
kind="meter",
count=self.count,
mean=self.count / (time.time() - self.started_on),
one=self.m1.rate,
five=self.m5.rate,
fifteen=self.m15.rate,
day=self.day.rate)
return data
def __repr__(self):
return "{}({})".format(type(self).__name__, self.tick_interval)
|
AppMetrics
|
/AppMetrics-0.5.0.tar.gz/AppMetrics-0.5.0/appmetrics/meter.py
|
meter.py
|
from __future__ import division
import collections
import math
import operator
import functools
from fractions import Fraction
from decimal import Decimal
from .exceptions import StatisticsError
from .py3comp import xrange, iteritems
def isfinite(n):
"""Return True if x is neither an infinity nor a NaN, and False otherwise.
(Note that 0.0 is considered finite.)
Backported from python 3
"""
return not (math.isinf(n) or math.isnan(n))
def sum(data, start=0):
"""sum(data [, start]) -> value
Return a high-precision sum of the given numeric data. If optional
argument ``start`` is given, it is added to the total. If ``data`` is
empty, ``start`` (defaulting to 0) is returned.
"""
n, d = exact_ratio(start)
T = type(start)
partials = {d: n} # map {denominator: sum of numerators}
# Micro-optimizations.
coerce_types_ = coerce_types
exact_ratio_ = exact_ratio
partials_get = partials.get
# Add numerators for each denominator, and track the "current" type.
for x in data:
T = coerce_types_(T, type(x))
n, d = exact_ratio_(x)
partials[d] = partials_get(d, 0) + n
if None in partials:
assert issubclass(T, (float, Decimal))
assert not isfinite(partials[None])
return T(partials[None])
total = Fraction()
for d, n in sorted(partials.items()):
total += Fraction(n, d)
if issubclass(T, int):
assert total.denominator == 1
return T(total.numerator)
if issubclass(T, Decimal):
return T(total.numerator) / total.denominator
return T(total)
def exact_ratio(x):
"""Convert Real number x exactly to (numerator, denominator) pair.
x is expected to be an int, Fraction, Decimal or float.
"""
try:
try:
# int, Fraction
return x.numerator, x.denominator
except AttributeError:
# float
try:
return x.as_integer_ratio()
except AttributeError:
# Decimal
try:
return decimal_to_ratio(x)
except AttributeError:
msg = "can't convert type '{}' to numerator/denominator"
raise TypeError(msg.format(type(x).__name__))
except (OverflowError, ValueError):
# INF or NAN
return (x, None)
# FIXME This is faster than Fraction.from_decimal, but still too slow.
def decimal_to_ratio(d):
"""Convert Decimal d to exact integer ratio (numerator, denominator).
"""
sign, digits, exp = d.as_tuple()
if exp in ('F', 'n', 'N'): # INF, NAN, sNAN
assert not d.is_finite()
raise ValueError
num = 0
for digit in digits:
num = num * 10 + digit
if sign:
num = -num
den = 10 ** -exp
return (num, den)
def coerce_types(T1, T2):
"""Coerce types T1 and T2 to a common type.
Coercion is performed according to this table, where "N/A" means
that a TypeError exception is raised.
+----------+-----------+-----------+-----------+----------+
| | int | Fraction | Decimal | float |
+----------+-----------+-----------+-----------+----------+
| int | int | Fraction | Decimal | float |
| Fraction | Fraction | Fraction | N/A | float |
| Decimal | Decimal | N/A | Decimal | float |
| float | float | float | float | float |
+----------+-----------+-----------+-----------+----------+
Subclasses trump their parent class; two subclasses of the same
base class will be coerced to the second of the two.
"""
# Get the common/fast cases out of the way first.
if T1 is T2: return T1
if T1 is int: return T2
if T2 is int: return T1
# Subclasses trump their parent class.
if issubclass(T2, T1): return T2
if issubclass(T1, T2): return T1
# Floats trump everything else.
if issubclass(T2, float): return T2
if issubclass(T1, float): return T1
# Subclasses of the same base class give priority to the second.
if T1.__base__ is T2.__base__: return T2
# Otherwise, just give up.
raise TypeError('cannot coerce types %r and %r' % (T1, T2))
def counts(data):
"""
Generate a table of sorted (value, frequency) pairs.
"""
if data is None:
raise TypeError('None is not iterable')
table = collections.Counter(data).most_common()
if not table:
return table
# Extract the values with the highest frequency.
maxfreq = table[0][1]
for i in range(1, len(table)):
if table[i][1] != maxfreq:
table = table[:i]
break
return table
# === Measures of central tendency (averages) ===
def mean(data):
"""Return the sample arithmetic mean of data.
If ``data`` is empty, StatisticsError will be raised.
"""
if iter(data) is data:
data = list(data)
n = len(data)
if n < 1:
raise StatisticsError('mean requires at least one data point')
return sum(data) / n
# FIXME: investigate ways to calculate medians without sorting? Quickselect?
def median(data):
"""Return the median (middle value) of numeric data.
When the number of data points is odd, return the middle data point.
When the number of data points is even, the median is interpolated by
taking the average of the two middle values:
"""
data = sorted(data)
n = len(data)
if n == 0:
raise StatisticsError("no median for empty data")
if n % 2 == 1:
return data[n // 2]
else:
i = n // 2
return (data[i - 1] + data[i]) / 2
def median_low(data):
"""Return the low median of numeric data.
When the number of data points is odd, the middle value is returned.
When it is even, the smaller of the two middle values is returned.
"""
data = sorted(data)
n = len(data)
if n == 0:
raise StatisticsError("no median for empty data")
if n % 2 == 1:
return data[n // 2]
else:
return data[n // 2 - 1]
def median_high(data):
"""Return the high median of data.
When the number of data points is odd, the middle value is returned.
When it is even, the larger of the two middle values is returned.
"""
data = sorted(data)
n = len(data)
if n == 0:
raise StatisticsError("no median for empty data")
return data[n // 2]
def mode(data):
"""Return the most common data point from discrete or nominal data.
``mode`` assumes discrete data, and returns a single value. This is the
standard treatment of the mode as commonly taught in schools:
If there is not exactly one most common value, ``mode`` will raise
StatisticsError.
"""
# Generate a table of sorted (value, frequency) pairs.
table = counts(data)
if len(table) == 1:
return table[0][0]
elif table:
raise StatisticsError(
'no unique mode; found %d equally common values' % len(table)
)
else:
raise StatisticsError('no mode for empty data')
# === Measures of spread ===
# See http://mathworld.wolfram.com/Variance.html
# http://mathworld.wolfram.com/SampleVariance.html
# http://en.wikipedia.org/wiki/Algorithms_for_calculating_variance
#
# Under no circumstances use the so-called "computational formula for
# variance", as that is only suitable for hand calculations with a small
# amount of low-precision data. It has terrible numeric properties.
#
# See a comparison of three computational methods here:
# http://www.johndcook.com/blog/2008/09/26/comparing-three-methods-of-computing-standard-deviation/
def _ss(data, c=None):
"""Return sum of square deviations of sequence data.
If ``c`` is None, the mean is calculated in one pass, and the deviations
from the mean are calculated in a second pass. Otherwise, deviations are
calculated from ``c`` as given. Use the second case with care, as it can
lead to garbage results.
"""
if c is None:
c = mean(data)
ss = sum((x - c) ** 2 for x in data)
# The following sum should mathematically equal zero, but due to rounding
# error may not.
ss -= sum((x - c) for x in data) ** 2 / len(data)
assert not ss < 0, 'negative sum of square deviations: %f' % ss
return ss
def variance(data, xbar=None):
"""Return the sample variance of data.
data should be an iterable of Real-valued numbers, with at least two
values. The optional argument xbar, if given, should be the mean of
the data. If it is missing or None, the mean is automatically calculated.
Use this function when your data is a sample from a population. To
calculate the variance from the entire population, see ``pvariance``.
If you have already calculated the mean of your data, you can pass it as
the optional second argument ``xbar`` to avoid recalculating it:
This function does not check that ``xbar`` is actually the mean of
``data``. Giving arbitrary values for ``xbar`` may lead to invalid or
impossible results.
Decimals and Fractions are supported
"""
if iter(data) is data:
data = list(data)
n = len(data)
if n < 2:
raise StatisticsError('variance requires at least two data points')
ss = _ss(data, xbar)
return ss / (n - 1)
def pvariance(data, mu=None):
"""Return the population variance of ``data``.
data should be an iterable of Real-valued numbers, with at least one
value. The optional argument mu, if given, should be the mean of
the data. If it is missing or None, the mean is automatically calculated.
Use this function to calculate the variance from the entire population.
To estimate the variance from a sample, the ``variance`` function is
usually a better choice.
If you have already calculated the mean of the data, you can pass it as
the optional second argument to avoid recalculating it:
This function does not check that ``mu`` is actually the mean of ``data``.
Giving arbitrary values for ``mu`` may lead to invalid or impossible
results.
Decimals and Fractions are supported:
"""
if iter(data) is data:
data = list(data)
n = len(data)
if n < 1:
raise StatisticsError('pvariance requires at least one data point')
ss = _ss(data, mu)
return ss / n
def stdev(data, xbar=None):
"""Return the square root of the sample variance.
See ``variance`` for arguments and other details.
"""
var = variance(data, xbar)
try:
return var.sqrt()
except AttributeError:
return math.sqrt(var)
def pstdev(data, mu=None):
"""Return the square root of the population variance.
See ``pvariance`` for arguments and other details.
"""
var = pvariance(data, mu)
try:
return var.sqrt()
except AttributeError:
return math.sqrt(var)
def geometric_mean(data):
"""Return the geometric mean of data
"""
if not data:
raise StatisticsError('geometric_mean requires at least one data point')
# in order to support negative or null values
data = [x if x > 0 else math.e if x == 0 else 1.0 for x in data]
return math.pow(math.fabs(functools.reduce(operator.mul, data)), 1.0 / len(data))
def harmonic_mean(data):
"""Return the harmonic mean of data
"""
if not data:
raise StatisticsError('harmonic_mean requires at least one data point')
return len(data) / sum(map(lambda x: 1.0 / x if x else 0.0, data))
def skewness(data):
"""Return the skewness of the data's distribution
"""
if not data:
raise StatisticsError('skewness requires at least one data point')
size = len(data)
sd = stdev(data) ** 3
if not sd:
return 0.0
mn = mean(data)
return sum(map(lambda x: ((x - mn) ** 3 / sd), data)) / size
def kurtosis(data):
"""Return the kurtosis of the data's distribution
"""
if not data:
raise StatisticsError('kurtosis requires at least one data point')
size = len(data)
sd = stdev(data) ** 4
if not sd:
return 0.0
mn = mean(data)
return sum(map(lambda x: ((x - mn) ** 4 / sd), data)) / size - 3
def percentile(data, n):
"""Return the n-th percentile of the given data
Assume that the data are already sorted
"""
size = len(data)
idx = (n / 100.0) * size - 0.5
if idx < 0 or idx > size:
raise StatisticsError("Too few data points ({}) for {}th percentile".format(size, n))
return data[int(idx)]
def get_histogram(data):
"""Return the histogram relative to the given data
Assume that the data are already sorted
"""
count = len(data)
if count < 2:
raise StatisticsError('Too few data points ({}) for get_histogram'.format(count))
min_ = data[0]
max_ = data[-1]
std = stdev(data)
bins = get_histogram_bins(min_, max_, std, count)
res = {x: 0 for x in bins}
for value in data:
for bin_ in bins:
if value <= bin_:
res[bin_] += 1
break
return sorted(iteritems(res))
def get_histogram_bins(min_, max_, std, count):
"""
Return optimal bins given the input parameters
"""
width = _get_bin_width(std, count)
count = int(round((max_ - min_) / width) + 1)
if count:
bins = [i * width + min_ for i in xrange(1, count + 1)]
else:
bins = [min_]
return bins
def _get_bin_width(stdev, count):
"""Return the histogram's optimal bin width based on Sturges
http://www.jstor.org/pss/2965501
"""
w = int(round((3.5 * stdev) / (count ** (1.0 / 3))))
if w:
return w
else:
return 1
|
AppMetrics
|
/AppMetrics-0.5.0.tar.gz/AppMetrics-0.5.0/appmetrics/statistics.py
|
statistics.py
|
.. _documentation: https://wiki.appnexus.com/display/api/Home
.. _Thingy: https://github.com/numberly/thingy
===============
AppNexus-client
===============
.. image:: https://img.shields.io/pypi/v/appnexus-client.svg
:target: https://pypi.python.org/pypi/appnexus-client
.. image:: https://img.shields.io/github/license/numberly/appnexus-client.svg
:target: https://github.com/numberly/appnexus-client/blob/master/LICENSE
.. image:: https://img.shields.io/travis/numberly/appnexus-client/master.svg
:target: https://travis-ci.org/numberly/appnexus-client
.. image:: https://img.shields.io/coveralls/numberly/appnexus-client.svg
:target: https://coveralls.io/github/numberly/appnexus-client
.. image:: http://readthedocs.org/projects/appnexus-client/badge
:target: http://appnexus-client.readthedocs.io
|
General purpose Python client for the AppNexus API.
This library exists because most of the open-source solutions we found were for
specific AppNexus tasks, such as reporting. Our solution, however, is meant to
be used with any AppNexus service.
As it heavily relies on the AppNexus API, we advise you to read its
documentation_.
This client uses models in the same way that database ORM would do, but you can
also hook it to your own data representation class, or simply use Python
dictionaries.
Install
=======
.. code-block:: sh
$ pip install appnexus-client
Getting started
===============
Services
--------
A service is an endpoint on the AppNexus API, representing an entity such as a
creative. Here is the complete list of services usable with AppNexus-client:
``AccountRecovery``, ``AdProfile``, ``AdQualityRule``, ``Adserver``,
``Advertiser``, ``BatchSegment``, ``Brand``, ``Broker``, ``Browser``,
``BudgetSplitter``, ``Campaign``, ``Carrier``, ``Category``, ``ChangeLog``,
``ChangeLogDetail``, ``City``, ``ContentCategory``, ``Country``, ``Creative``,
``CreativeFormat``, ``Currency``, ``CustomModel``, ``CustomModelHash``,
``CustomModelLogit``, ``CustomModelLUT``, ``CustomModelParser``, ``Deal``,
``DealBuyerAccess``, ``DealFromPackage``, ``DMA``, ``DeviceMake``,
``DeviceModel``, ``DomainAuditStatus``, ``DomainList``, ``ExternalInvCode``,
``InsertionOrder``, ``InventoryAttribute``, ``InventoryResold``,
``IPRangeList``, ``Label``, ``Language``, ``LineItem``, ``LineItemModel``,
``Lookup``, ``ManualOfferRanking``, ``MediaSubtype``, ``MediaType``, ``Member``,
``MemberProfile``, ``MobileApp``, ``MobileAppInstance``,
``MobileAppInstanceList``, ``MobileAppStore``, ``NativeCustomKey``,
``ObjectLimit``, ``OperatingSystem``, ``OperatingSystemExtended``,
``OperatingSystemFamily``, ``OptimizationZone``, ``Package``,
``PackageBuyerAccess``, ``PaymentRule``, ``Pixel``, ``Placement``,
``PlatformMember``, ``PostalCode``, ``Profile``, ``ProfileSummary``,
``Publisher``, ``Region``, ``Report``, ``ReportStatus``, ``Search``,
``Segment``, ``Site``, ``TechnicalAttribute``, ``Template``,
``ThirdpartyPixel``, ``User``, ``UsergroupPattern``, ``VisibilityProfile``
Connecting
----------
First of all, you need to connect the client to AppNexus. One simple way is to
use the ``connect`` function with your credentials:
.. code-block:: python
from appnexus import connect
connect("my-username", "my-password")
From there, you can use all the features of the library.
Models
------
A model in AppNexus-client is an abstraction for a service. Most of them are
already declared and you just have to import them.
You can access the fields of an AppNexus just like any object:
``entity.field_name``
For example, to print the name of each and every city registered in AppNexus,
you could do:
.. code-block:: python
from appnexus import City
for city in City.find():
print(city.name)
You can also retrieve a single result (the first one returned by the API) using
the ``find_one`` method:
.. code-block:: python
city = City.find_one(id=1337)
Filtering and sorting
---------------------
Sorting with AppNexus-client is easy: just give a ``sort`` parameter with a
value indicating which field is sorted in which order (``asc`` or
``desc``). This parameter will be supplied to the AppNexus API which will
return a sorted response.
You can filter entities using parameters of the methods ``find`` and
``find_one``. Each parameter stand as a new filter for the field it is named
after. For example, you can search for cities whose `country_code` field is
equal to "FR" and sort them by name:
.. code-block:: python
for city in City.find(country_code="FR", sort="name.desc"):
print(city.name)
The parameters you give to the ``find`` and ``find_one`` methods are translated
into query parameters for the requests being send. For example, the snippet
``Creative.find(state="active", advertiser_id=[1, 2, 3])`` will result in a get
request on ``http://api.appnexus.com/creative?state=active&advertiser_id=1,2,3``
Please search in the AppNexus API documentation_ to understand the meaning of
each parameter.
Custom data representation
--------------------------
By default, AppNexus-client relies on Thingy_ to represent data as objects.
But you can also hook your own data representation class. For this, you must
use a function that exposes this signature:
.. code-block:: python
function(client, service, object)
The ``client`` argument is an ``AppNexusClient`` instance. ``service`` is the
string representation of the service to which the object belongs. ``object`` is
a dictionary containing the data about the AppNexus entity. The return value
of this function will be used as the data representation.
To use this function and get the desired data representation, you must pass it
to the client as the ``representation`` keyword argument.
If you want your data to be in the form of simple dictionaries rather than
Thingy_ instances, AppNexus-client provides a ``raw`` representation that you
can use pretty easily:
.. code-block:: python
from appnexus.representations import raw
connect("username", "password", representation=raw)
But if, for example, you would prefer to get lists of tuples, you would have to
craft your own representation function:
.. code-block:: python
def custom_representation(client, service_name, object):
return object.items()
connect("username", "password", representation=custom_representation)
Reports
-------
Retrieving report data has 3 steps:
1. Creating a report
2. Checking if the report is ready to download
3. Downloading the report
.. code-block:: python
from appnexus import Report
json = {
"report_type": "network_analytics",
"columns": [
"clicks",
"total_convs",
"insertion_order_id",
"line_item_id",
],
"report_interval": "lifetime",
"format": "csv"
}
report = Report(json).save()
data = report.download()
The ``download`` method on ``Report`` object takes care of checking if the
report is available for download and retires it by default for 3 times with an
interval of 1 second. The number of retries can be overridden by passing the
parameter ``retry_count`` to the ``download`` method:
.. code-block:: python
data = report.download(retry_count=5)
Changelogs
----------
The ``ChangeLog`` service allows to retrieve information about changes that
have been made to an object of those services: ``campaign``,
``insertion-order``, ``line-item`` and ``profile``.
For example, you can print the date of every change that was made on a
campaign:
.. code-block:: python
from appnexus import Campaign
campaign = Campaign.find_one()
for change in campaign.changelog:
print(change.created_on)
For more information on a change, you can use the ``ChangeLogDetail`` service
with the returned ``transaction_id`` as a parameter:
.. code-block:: python
from appnexus import ChangeLogDetail
detail = ChangeLogDetail.find_one(service="campaign",
resource_id=change.resource_id,
transaction_id=change.transaction_id)
print(detail.user_full_name)
Tests
=====
To run AppNexus-client tests:
* install developers requirements with ``pip install -r requirements.txt``;
* run ``pytest``.
License
=======
MIT
|
AppNexus-client
|
/AppNexus-client-0.8.1.tar.gz/AppNexus-client-0.8.1/README.rst
|
README.rst
|
API reference
#############
Client
======
.. automodule:: appnexus.client
:members:
:undoc-members:
Cursor
======
.. automodule:: appnexus.cursor
:members:
:undoc-members:
Exceptions
==========
.. automodule:: appnexus.exceptions
:members:
:undoc-members:
Model
=====
.. automodule:: appnexus.model
:members: Model
:undoc-members:
Services
--------
.. automodule:: appnexus.model
:members:
:undoc-members:
:exclude-members: Model
Representations
===============
.. automodule:: appnexus.representations
:members:
:undoc-members:
Utils
=====
.. automodule:: appnexus.utils
:members:
:undoc-members:
|
AppNexus-client
|
/AppNexus-client-0.8.1.tar.gz/AppNexus-client-0.8.1/docs/source/reference.rst
|
reference.rst
|
import functools
import logging
import os
import time
import requests
from appnexus.cursor import Cursor
from appnexus.exceptions import (AppNexusException, BadCredentials, NoAuth,
RateExceeded)
from appnexus.utils import normalize_service_name
try:
from configparser import ConfigParser
except ImportError: # pragma: nocover
from ConfigParser import ConfigParser
logger = logging.getLogger("appnexus-client")
class AppNexusClient(object):
"""Represents an active connection to the AppNexus API"""
url = "https://api.appnexus.com/"
test_url = "https://api-test.appnexus.com/"
error_codes = {"RATE_EXCEEDED": RateExceeded}
error_ids = {"NOAUTH": NoAuth}
def __init__(self, username=None, password=None, test=False,
representation=None, token_file=None):
self.credentials = {"username": username, "password": password}
self.token = None
self.token_file = None
self.load_token(token_file)
self.representation = representation
self.test = bool(test)
self._generate_services()
def _prepare_uri(self, service_name, **parameters):
"""Prepare the URI for a request
:param service_name: The target service
:type service_name: str
:param kwargs: query parameters
:return: The uri of the request
"""
query_parameters = []
for key, value in parameters.items():
if isinstance(value, (list, tuple)):
value = ",".join([str(member) for member in value])
if isinstance(value, bool):
value = "true" if value else "false"
query_parameters.append("{}={}".format(key, value))
if query_parameters:
uri = "{}{}?{}".format(self.base_url, service_name,
"&".join(query_parameters))
else:
uri = "{}{}".format(self.base_url, service_name)
return uri
# shiro: Coverage is disabled for this function because it's mocked and it
# doesn't need testing (for the moment) since it's a simple instruction
def _handle_rate_exceeded(self, response): # pragma: no cover
"""Handles rate exceeded errors"""
waiting_time = int(response.headers.get("Retry-After", 10))
time.sleep(waiting_time)
def _send(self, send_method, service_name, data=None, **kwargs):
"""Send a request to the AppNexus API (used for internal routing)
:param send_method: The method sending the request (usualy requests.*)
:type send_method: function
:param service_name: The target service
:param data: The payload of the request (optionnal)
:type data: anything JSON-serializable
"""
valid_response = False
raw = kwargs.pop("raw", False)
while not valid_response:
headers = dict(Authorization=self.token)
uri = self._prepare_uri(service_name, **kwargs)
logger.debug(' '.join(map(str, (headers, uri, data))))
response = send_method(uri, headers=headers, json=data)
content_type = response.headers["Content-Type"].split(";")[0]
if response.content and content_type == "application/json":
response_data = response.json()
if "response" in response_data:
response_data = response_data["response"]
elif response.content:
return response.content
else:
return None
try:
self.check_errors(response, response_data)
except RateExceeded:
self._handle_rate_exceeded(response)
except NoAuth:
self.update_token()
else:
valid_response = True
if raw:
return response.json()
return response_data
def update_token(self):
"""Request a new token and store it for future use"""
logger.info('updating token')
if None in self.credentials.values():
raise RuntimeError("You must provide an username and a password")
credentials = dict(auth=self.credentials)
url = self.test_url if self.test else self.url
response = requests.post(url + "auth",
json=credentials)
data = response.json()["response"]
if "error_id" in data and data["error_id"] == "NOAUTH":
raise BadCredentials()
if "error_code" in data and data["error_code"] == "RATE_EXCEEDED":
time.sleep(150)
return
if "error_code" in data or "error_id" in data:
raise AppNexusException(response)
self.token = data["token"]
self.save_token()
return self.token
def check_errors(self, response, data):
"""Check for errors and raise an appropriate error if needed"""
if "error_id" in data:
error_id = data["error_id"]
if error_id in self.error_ids:
raise self.error_ids[error_id](response)
if "error_code" in data:
error_code = data["error_code"]
if error_code in self.error_codes:
raise self.error_codes[error_code](response)
if "error_code" in data or "error_id" in data:
raise AppNexusException(response)
def get(self, service_name, **kwargs):
"""Retrieve data from AppNexus API"""
return self._send(requests.get, service_name, **kwargs)
def modify(self, service_name, json, **kwargs):
"""Modify an AppNexus object"""
return self._send(requests.put, service_name, json, **kwargs)
def create(self, service_name, json, **kwargs):
"""Create a new AppNexus object"""
return self._send(requests.post, service_name, json, **kwargs)
def delete(self, service_name, *ids, **kwargs):
"""Delete an AppNexus object"""
return self._send(requests.delete, service_name, id=ids, **kwargs)
def append(self, service_name, json, **kwargs):
kwargs.update({"append": True})
return self.modify(service_name, json, **kwargs)
def meta(self, service_name):
"""Retrieve meta-informations about a service"""
return self.get(service_name + "/meta")
def find(self, service_name, arguments=None, representation=None,
**kwargs):
representation = representation or self.representation
args = arguments.copy() if arguments else dict()
args.update(kwargs)
return Cursor(self, service_name, representation, **args)
def connect(self, username, password, test=None, representation=None,
token_file=None):
self.credentials = {"username": username, "password": password}
if test is not None:
self.test = bool(test)
if representation is not None:
self.representation = representation
if token_file is not None:
self.load_token(token_file)
def connect_from_file(self, filename):
config = ConfigParser()
config.read(filename)
connect_data = dict(config["appnexus"])
self.connect(**connect_data)
def _generate_services(self):
for service_name in services_list:
normalized_name = normalize_service_name(service_name)
snake_name = normalized_name.replace('-', '_')
generated_service = Service(self, normalized_name)
setattr(self, snake_name, generated_service)
def save_token(self):
if not self.token_file or not self.token:
return
with open(self.token_file, mode='w') as fp:
fp.write(self.token)
def load_token(self, token_file=None):
if not self.token_file:
if not token_file:
return
self.token_file = token_file
if not os.path.exists(self.token_file):
return
with open(self.token_file) as fp:
self.token = fp.read().strip()
@property
def base_url(self):
if self.test:
return self.test_url
else:
return self.url
services_list = ["AccountRecovery", "AdProfile", "AdQualityRule", "Adserver",
"Advertiser", "BatchSegment", "Brand", "Broker", "Browser",
"BudgetSplitter", "Campaign", "Carrier", "Category",
"ChangeLog", "ChangeLogDetail", "City", "ContentCategory",
"Country", "Creative", "CreativeFormat", "Currency",
"CustomModel", "CustomModelHash", "CustomModelLogit",
"CustomModelLUT", "CustomModelParser", "Deal",
"DealBuyerAccess", "DealFromPackage", "DMA", "DeviceMake",
"DeviceModel", "DomainAuditStatus", "DomainList",
"ExternalInvCode", "InsertionOrder", "InventoryAttribute",
"InventoryResold", "IPRangeList", "Label", "Language",
"LineItem", "LineItemModel", "Lookup", "ManualOfferRanking",
"MediaSubtype", "MediaType", "Member", "MemberProfile",
"MobileApp", "MobileAppInstance", "MobileAppInstanceList",
"MobileAppStore", "NativeCustomKey", "ObjectLimit",
"OperatingSystem", "OperatingSystemExtended",
"OperatingSystemFamily", "OptimizationZone", "Package",
"PackageBuyerAccess", "PaymentRule", "Pixel", "Placement",
"PlatformMember", "PostalCode", "Profile", "ProfileSummary",
"Publisher", "Region", "Report", "ReportStatus", "Search",
"Segment", "Site", "TechnicalAttribute", "Template",
"ThirdpartyPixel", "User", "UsergroupPattern",
"VisibilityProfile"]
class Service(object):
def __init__(self, client, name):
self.client = client
self.name = name
def find(self, arguments=None, **kwargs):
return self.client.find(self.name, arguments, **kwargs)
def find_one(self, arguments=None, **kwargs):
return self.find(arguments, **kwargs).first
def get(self, **kwargs):
return self.client.get(self.name, **kwargs)
def modify(self, json, **kwargs):
return self.client.modify(self.name, json, **kwargs)
def create(self, json, **kwargs):
return self.client.create(self.name, json, **kwargs)
def delete(self, *args):
return self.client.delete(self.name, *args)
client = AppNexusClient()
@functools.wraps(client.connect)
def connect(*args, **kwargs):
return client.connect(*args, **kwargs)
@functools.wraps(client.connect_from_file)
def connect_from_file(*args, **kwargs):
return client.connect_from_file(*args, **kwargs)
@functools.wraps(client.find)
def find(*args, **kwargs):
return client.find(*args, **kwargs)
__all__ = ["AppNexusClient", "client", "connect", "find"]
|
AppNexus-client
|
/AppNexus-client-0.8.1.tar.gz/AppNexus-client-0.8.1/appnexus/client.py
|
client.py
|
import logging
import time
from thingy import Thingy
from appnexus.client import AppNexusClient, client, services_list
from appnexus.utils import classproperty, normalize_service_name
logger = logging.getLogger("appnexus-client")
class Model(Thingy):
"""Generic model for AppNexus data"""
_update_on_save = True
client = client
@classmethod
def connect(cls, username, password):
cls.client = AppNexusClient(username, password)
return cls.client
@classmethod
def find(cls, **kwargs):
representation = (kwargs.pop("representation", None)
or cls.client.representation
or cls.constructor)
return cls.client.find(cls.service_name, representation=representation,
**kwargs)
@classmethod
def find_one(cls, **kwargs):
return cls.find(**kwargs).first
@classmethod
def count(cls, **kwargs):
return cls.find(**kwargs).count()
@classmethod
def meta(cls):
return cls.client.meta(cls.service_name)
@classproperty
def service_name(cls):
return normalize_service_name(cls.__name__)
@classmethod
def create(cls, payload, **kwargs):
payload = {cls.service_name: payload}
return cls.client.create(cls.service_name, payload, **kwargs)
@classmethod
def delete(cls, *args, **kwargs):
return cls.client.delete(cls.service_name, *args, **kwargs)
@classmethod
def modify(cls, payload, **kwargs):
payload = {cls.service_name: payload}
return cls.client.modify(cls.service_name, payload, **kwargs)
@classmethod
def constructor(cls, client, service_name, obj):
cls.client = client
cls.service_name = service_name
return cls(obj)
def save(self, **kwargs):
payload = self.__dict__
if "id" not in self.__dict__:
logger.info("creating a {}".format(self.service_name))
result = self.create(payload, **kwargs)
else:
result = self.modify(payload, id=self.id, **kwargs)
if self._update_on_save:
self.update(result)
return self
class AlphaModel(Model):
_update_on_save = False
_modifiable_fields = ()
def __setattr__(self, attr, value):
if self._modifiable_fields and attr not in self._modifiable_fields:
raise AttributeError("'{}' can't be modified".format(attr))
super(AlphaModel, self).__setattr__(attr, value)
@classmethod
def find(cls, **kwargs):
raise NotImplementedError("Can't get multiple objects on '{}' service"
.format(cls.service_name))
@classmethod
def find_one(cls, id, **kwargs):
representation = (kwargs.pop("representation", None)
or cls.client.representation
or cls.constructor)
response = cls.client.get(cls.service_name, id=id, **kwargs)
if representation:
return representation(cls.client, cls.service_name, response)
return response
@classmethod
def modify(cls, payload, **kwargs):
non_modifiable_fields = set(payload) - set(cls._modifiable_fields)
for field in non_modifiable_fields:
del payload[field]
return super(AlphaModel, cls).modify(payload, **kwargs)
class CustomModelHash(AlphaModel):
_modifiable_fields = ("coefficients",)
class CustomModelLogit(AlphaModel):
_modifiable_fields = ("beta0", "active", "predictors", "scale", "min",
"max", "name", "offset", "member_id")
class CustomModelLUT(AlphaModel):
_modifiable_fields = ("coefficients",)
class LineItemModel(AlphaModel):
pass
class Report(Model):
def download(self, retry_count=3, **kwargs):
while not self.is_ready and retry_count > 0:
retry_count -= 1
time.sleep(1)
return self.client.get("report-download", id=self.report_id)
@property
def is_ready(self):
status = self.client.get("report",
id=self.report_id).get("execution_status")
return (status == "ready")
class BudgetSplitterMixin():
@property
def budget_splitter(self):
return BudgetSplitter.find_one(id=self.id) # noqa: F821
class ChangeLogMixin():
@property
def changelog(self):
return ChangeLog.find(service=self.service_name, # noqa: F821
resource_id=self.id)
class ProfileMixin():
@property
def profile(self):
return Profile.find_one(id=self.profile_id) # noqa: F821
def create_models(services_list):
for service_name in services_list:
ancestors = [Model]
if service_name in ("LineItem"):
ancestors.append(BudgetSplitterMixin)
if service_name in ("Campaign", "InsertionOrder", "LineItem",
"Profile"):
ancestors.append(ChangeLogMixin)
if service_name in ("AdQualityRule", "Advertiser", "Campaign",
"Creative", "LineItem", "PaymentRule"):
ancestors.append(ProfileMixin)
model = type(service_name, tuple(ancestors), {})
globals().setdefault(service_name, model)
create_models(services_list)
__all__ = ["Model", "services_list"] + services_list
|
AppNexus-client
|
/AppNexus-client-0.8.1.tar.gz/AppNexus-client-0.8.1/appnexus/model.py
|
model.py
|
class Cursor(object):
"""Represents a cursor on collection of AppNexus objects"""
batch_size = 100
common_keys = {"status", "count", "dbg_info", "num_elements",
"start_element"}
def __init__(self, client, service_name, representation, **specs):
"""Initialize the object
:param client: an AppNexusClient instance
:param service_name: the service to which the request was made
:param specs: The specifications sent to AppNexus with the request
"""
# Health checks
if client is None or service_name is None:
raise RuntimeError("client and service can't be set to None")
if representation is None or not callable(representation):
raise TypeError("representation must be non-null and callable")
self.client = client
self.service_name = service_name
self.representation = representation
self.specs = specs
self.retrieved = 0
self._skip = 0
self._limit = float('inf')
def __len__(self):
"""Returns the number of elements matching the specifications"""
return self.count()
def __getitem__(self, idx):
"""Returns the nth element matching the specifications"""
page = self.get_page(num_elements=1, start_element=idx)
data = self.extract_data(page)
return data[0]
def __iter__(self):
"""Iterate over all AppNexus objects matching the specifications"""
for page in self.iter_pages():
data = self.extract_data(page)
if self._skip >= len(data):
self._skip -= len(data)
continue
elif self._skip:
self._skip = 0
data = data[self._skip:]
lasting = self._limit - self.retrieved
if not lasting:
break
elif lasting < len(data):
data = data[:lasting]
for entity in data:
self.retrieved += 1
yield entity
def extract_data(self, page):
"""Extract the AppNexus object or list of objects from the response"""
response_keys = set(page.keys())
uncommon_keys = response_keys - self.common_keys
for possible_data_key in uncommon_keys:
element = page[possible_data_key]
if isinstance(element, dict):
return [self.representation(self.client, self.service_name,
element)]
if isinstance(element, list):
return [self.representation(self.client, self.service_name, x)
for x in element]
@property
def first(self):
"""Extract the first AppNexus object present in the response"""
page = self.get_page(num_elements=1)
data = self.extract_data(page)
if data:
return data[0]
def get_page(self, start_element=0, num_elements=None):
"""Get a page (100 elements) starting from `start_element`"""
if num_elements is None:
num_elements = self.batch_size
specs = self.specs.copy()
specs.update(start_element=start_element, num_elements=num_elements)
return self.client.get(self.service_name, **specs)
def iter_pages(self, skip_elements=0):
"""Iterate as much as needed to get all available pages"""
start_element = skip_elements
count = -1
while start_element < count or count == -1:
page = self.get_page(start_element)
yield page
start_element = page["start_element"] + page["num_elements"]
count = page["count"]
def count(self):
"""Returns the number of elements matching the specifications"""
return self.get_page(num_elements=1)["count"]
def clone(self):
return Cursor(self.client, self.service_name, self.representation,
**self.specs)
def limit(self, number):
"""Limit the cursor to retrieve at most `number` elements"""
self._limit = number
return self
def skip(self, number):
"""Skip the first `number` elements of the cursor"""
self._skip = number
return self
def size(self):
"""Return the number of elements of the cursor with skip and limit"""
initial_count = self.count()
count_with_skip = max(0, initial_count - self._skip)
size = min(count_with_skip, self._limit)
return size
__all__ = ["Cursor"]
|
AppNexus-client
|
/AppNexus-client-0.8.1.tar.gz/AppNexus-client-0.8.1/appnexus/cursor.py
|
cursor.py
|
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "{}"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright 2018 Christian Heider Nielsen
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
|
AppPathy
|
/AppPathy-0.0.1-py36-none-any.whl/AppPathy-0.0.1.dist-info/LICENSE.md
|
LICENSE.md
|
import sys
__author__ = "cnheider"
__doc__ = ""
PY3 = sys.version_info[0] == 3
if PY3:
unicode = str
if sys.platform.startswith("java"):
import platform
os_name = platform.java_ver()[3][0]
if os_name.startswith("Windows"): # "Windows XP", "Windows 7", etc.
SYSTEM = "win32"
elif os_name.startswith("Mac"): # "Mac OS X", etc.
SYSTEM = "darwin"
else: # "Linux", "SunOS", "FreeBSD", etc.
# Setting this to "linux2" is not ideal, but only Windows or Mac
# are actually checked for and the rest of the module expects
# *sys.platform* style strings.
SYSTEM = "linux2"
else:
SYSTEM = sys.platform
def _get_win_folder_from_registry(csidl_name):
"""This is a fallback technique at best. I'm not sure if using the
registry for this guarantees us the correct answer for all CSIDL_*
names.
"""
if PY3:
import winreg as _winreg
else:
import _winreg
shell_folder_name = {
"CSIDL_APPDATA": "AppData",
"CSIDL_COMMON_APPDATA":"Common AppData",
"CSIDL_LOCAL_APPDATA": "Local AppData",
}[csidl_name]
key = _winreg.OpenKey(
_winreg.HKEY_CURRENT_USER, r"Software\Microsoft\Windows\CurrentVersion\Explorer\Shell Folders"
)
dir, type = _winreg.QueryValueEx(key, shell_folder_name)
return dir
def _get_win_folder_with_pywin32(csidl_name):
from win32com.shell import shellcon, shell
dir = shell.SHGetFolderPath(0, getattr(shellcon, csidl_name), 0, 0)
# Try to make this a unicode path because SHGetFolderPath does
# not return unicode strings when there is unicode data in the
# path.
try:
dir = unicode(dir)
# Downgrade to short path name if have highbit chars. See
# <http://bugs.activestate.com/show_bug.cgi?id=85099>.
has_high_char = False
for c in dir:
if ord(c) > 255:
has_high_char = True
break
if has_high_char:
try:
import win32api
dir = win32api.GetShortPathName(dir)
except ImportError:
pass
except UnicodeError:
pass
return dir
def _get_win_folder_with_ctypes(csidl_name):
import ctypes
csidl_const = {"CSIDL_APPDATA":26, "CSIDL_COMMON_APPDATA":35, "CSIDL_LOCAL_APPDATA":28}[csidl_name]
buf = ctypes.create_unicode_buffer(1024)
ctypes.windll.shell32.SHGetFolderPathW(None, csidl_const, None, 0, buf)
# Downgrade to short path name if have highbit chars. See
# <http://bugs.activestate.com/show_bug.cgi?id=85099>.
has_high_char = False
for c in buf:
if ord(c) > 255:
has_high_char = True
break
if has_high_char:
buf2 = ctypes.create_unicode_buffer(1024)
if ctypes.windll.kernel32.GetShortPathNameW(buf.value, buf2, 1024):
buf = buf2
return buf.value
def _get_win_folder_with_jna(csidl_name):
import array
from com.sun import jna
from com.sun.jna.platform import win32
buf_size = win32.WinDef.MAX_PATH * 2
buf = array.zeros("c", buf_size)
shell = win32.Shell32.INSTANCE
shell.SHGetFolderPath(None, getattr(win32.ShlObj, csidl_name), None, win32.ShlObj.SHGFP_TYPE_CURRENT, buf)
dir = jna.Native.toString(buf.tostring()).rstrip("\0")
# Downgrade to short path name if have highbit chars. See
# <http://bugs.activestate.com/show_bug.cgi?id=85099>.
has_high_char = False
for c in dir:
if ord(c) > 255:
has_high_char = True
break
if has_high_char:
buf = array.zeros("c", buf_size)
kernel = win32.Kernel32.INSTANCE
if kernel.GetShortPathName(dir, buf, buf_size):
dir = jna.Native.toString(buf.tostring()).rstrip("\0")
return dir
get_win_folder = None
if SYSTEM == "win32":
try:
import win32com.shell
get_win_folder = _get_win_folder_with_pywin32
except ImportError:
try:
from ctypes import windll
get_win_folder = _get_win_folder_with_ctypes
except ImportError:
try:
import com.sun.jna
get_win_folder = _get_win_folder_with_jna
except ImportError:
get_win_folder = _get_win_folder_from_registry
|
AppPathy
|
/AppPathy-0.0.1-py36-none-any.whl/apppath/app_path_utilities.py
|
app_path_utilities.py
|
import datetime
import os
from warnings import warn
import pkg_resources
from pip._internal.utils.misc import dist_is_editable
from .app_path import AppPath
__author__ = "cnheider"
__version__ = "0.0.1"
__doc__ = r"""
Created on 27/04/2019
A class and a set of functions for providing for system-consensual path for apps to store data, logs, cache...
@author: cnheider
"""
'''
def dist_is_editable(dist):
# type: (Distribution) -> bool
"""
Return True if given Distribution is an editable install.
"""
for path_item in sys.path:
egg_link = os.path.join(path_item, dist.project_name + '.egg-link')
if os.path.isfile(egg_link):
return True
return False
'''
PROJECT_NAME = 'AppPath'
PROJECT_AUTHOR = __author__
PROJECT_APP_PATH = AppPath(app_name=PROJECT_NAME, app_author=PROJECT_AUTHOR)
distributions = {v.key:v for v in pkg_resources.working_set}
if PROJECT_NAME in distributions:
distribution = distributions[PROJECT_NAME]
DEVELOP = dist_is_editable(distribution)
else:
DEVELOP = True
def get_version(append_time=DEVELOP):
version = __version__
if not version:
version = os.getenv("VERSION", "0.0.0")
if append_time:
now = datetime.datetime.utcnow()
date_version = now.strftime("%Y%m%d%H%M%S")
# date_version = time.time()
if version:
# Most git tags are prefixed with 'v' (example: v1.2.3) this is
# never desirable for artifact repositories, so we strip the
# leading 'v' if it's present.
version = (
version[1:]
if isinstance(version, str) and version.startswith("v")
else version
)
else:
# Default version is an ISO8601 compliant datetime. PyPI doesn't allow
# the colon ':' character in its versions, and time is required to allow
# for multiple publications to master in one day. This datetime string
# uses the 'basic' ISO8601 format for both its date and time components
# to avoid issues with the colon character (ISO requires that date and
# time components of a date-time string must be uniformly basic or
# extended, which is why the date component does not have dashes.
#
# Publications using datetime versions should only be made from master
# to represent the HEAD moving forward.
warn(
f"Environment variable VERSION is not set, only using datetime: {date_version}"
)
# warn(f'Environment variable VERSION is not set, only using timestamp: {version}')
version = f"{version}.{date_version}"
return version
if __version__ is None:
__version__ = get_version(append_time=True)
__version_info__ = tuple(int(segment) for segment in __version__.split("."))
|
AppPathy
|
/AppPathy-0.0.1-py36-none-any.whl/apppath/__init__.py
|
__init__.py
|
import pathlib
__author__ = "cnheider"
__doc__ = "Application data directories extension for pathlib"
import os
from apppath.app_path_utilities import get_win_folder, SYSTEM
class AppPath(object):
r"""
AppPath class for easing cross platform access to proper app data directories
"""
def __init__(
self,
app_name=None,
app_author=None,
app_version=None,
roaming=False,
multi_path=False,
ensure_existence=True,
):
self._app_name = app_name
self._app_author = app_author
self._app_version = app_version
self._roaming = roaming
self._multi_path = multi_path
self._ensure_existence = ensure_existence
@staticmethod
def ensure_existence(enabled, out):
if enabled:
if not out.exists():
out.mkdir(parents=True)
@property
def user_data(self) -> pathlib.Path:
out = self.user_data_path(
self._app_name, self._app_author, version=self._app_version, roaming=self._roaming
)
self.ensure_existence(self._ensure_existence, out)
return out
@property
def site_data(self) -> pathlib.Path:
out = self.site_data_path(
self._app_name, self._app_author, version=self._app_version, multi_path=self._multi_path
)
self.ensure_existence(self._ensure_existence, out)
return out
@property
def user_config(self) -> pathlib.Path:
out = self.user_config_path(
self._app_name, self._app_author, version=self._app_version, roaming=self._roaming
)
self.ensure_existence(self._ensure_existence, out)
return out
@property
def site_config(self) -> pathlib.Path:
out = self.site_config_path(
self._app_name, self._app_author, version=self._app_version, multi_path=self._multi_path
)
self.ensure_existence(self._ensure_existence, out)
return out
@property
def user_cache(self) -> pathlib.Path:
out = self.user_cache_path(self._app_name, self._app_author, version=self._app_version)
self.ensure_existence(self._ensure_existence, out)
return out
@property
def user_state(self) -> pathlib.Path:
out = self.user_state_path(self._app_name, self._app_author, version=self._app_version)
self.ensure_existence(self._ensure_existence, out)
return out
@property
def user_log(self) -> pathlib.Path:
out = self.user_log_path(self._app_name, self._app_author, version=self._app_version)
self.ensure_existence(self._ensure_existence, out)
return out
@staticmethod
def user_data_path(app_name=None, app_author=None, version=None, roaming=False) -> pathlib.Path:
r"""Return full path to the user-specific data dir for this application.
"app_name" is the name of application.
If None, just the system directory is returned.
"app_author" (only used on Windows) is the name of the
app_author or distributing body for this application. Typically
it is the owning company name. This falls back to app_name. You may
pass False to disable it.
"version" is an optional version path element to append to the
path. You might want to use this if you want multiple versions
of your app to be able to run independently. If used, this
would typically be "<major>.<minor>".
Only applied when app_name is present.
"roaming" (boolean, default False) can be set True to use the Windows
roaming appdata directory. That means that for users on a Windows
network setup for roaming profiles, this user data will be
sync'd on login. See
<http://technet.microsoft.com/en-us/library/cc766489(WS.10).aspx>
for a discussion of issues.
Notes:
- MSDN on where to store app data files:
http://support.microsoft.com/default.aspx?scid=kb;en-us;310294#XSLTH3194121123120121120120
- Mac OS X: http://developer.apple.com/documentation/MacOSX/Conceptual/BPFileSystem/index.html
- XDG spec for Un*x: https://standards.freedesktop.org/basedir-spec/basedir-spec-latest.html
Typical user data directories are:
Mac OS X: ~/Library/Application Support/<AppName>
Unix: ~/.local/share/<AppName> # or in $XDG_DATA_HOME, if defined
Win XP (not roaming): C:\Documents and Settings\<username>\Application Data\<AppAuthor>\<AppName>
Win XP (roaming): C:\Documents and Settings\<username>\Local Settings\Application
Data\<AppAuthor>\<AppName>
Win 7 (not roaming): C:\Users\<username>\AppData\Local\<AppAuthor>\<AppName>
Win 7 (roaming): C:\Users\<username>\AppData\Roaming\<AppAuthor>\<AppName>
For Unix, we follow the XDG spec and support $XDG_DATA_HOME.
That means, by default "~/.local/share/<AppName>".
"""
if SYSTEM == "win32":
if app_author is None:
app_author = app_name
const = roaming and "CSIDL_APPDATA" or "CSIDL_LOCAL_APPDATA"
path = pathlib.Path(os.path.normpath(get_win_folder(const)))
if app_name:
if app_author is not False:
path = path / app_author / app_name
else:
path = path / app_name
elif SYSTEM == "darwin":
path = pathlib.Path(os.path.expanduser("~/Library/Application Support/"))
if app_name:
path = path / app_name
else:
path = pathlib.Path(os.getenv("XDG_DATA_HOME", os.path.expanduser("~/.local/share")))
if app_name:
path = path / app_name
if app_name and version:
path = path / version
return path
@staticmethod
def site_data_path(app_name=None, app_author=None, version=None, multi_path=False) -> pathlib.Path:
r"""Return full path to the user-shared data dir for this application.
"app_name" is the name of application.
If None, just the system directory is returned.
"app_author" (only used on Windows) is the name of the
app_author or distributing body for this application. Typically
it is the owning company name. This falls back to app_name. You may
pass False to disable it.
"version" is an optional version path element to append to the
path. You might want to use this if you want multiple versions
of your app to be able to run independently. If used, this
would typically be "<major>.<minor>".
Only applied when app_name is present.
"multi_path" is an optional parameter only applicable to *nix
which indicates that the entire list of data dirs should be
returned. By default, the first item from XDG_DATA_DIRS is
returned, or '/usr/local/share/<AppName>',
if XDG_DATA_DIRS is not set
Typical site data directories are:
Mac OS X: /Library/Application Support/<AppName>
Unix: /usr/local/share/<AppName> or /usr/share/<AppName>
Win XP: C:\Documents and Settings\All Users\Application Data\<AppAuthor>\<AppName>
Vista: (Fail! "C:\ProgramData" is a hidden *system* directory on Vista.)
Win 7: C:\ProgramData\<AppAuthor>\<AppName> # Hidden, but writeable on Win 7.
For Unix, this is using the $XDG_DATA_DIRS[0] default.
WARNING: Do not use this on Windows. See the Vista-Fail note above for why.
"""
if SYSTEM == "win32":
if app_author is None:
app_author = app_name
path = pathlib.Path(os.path.normpath(get_win_folder("CSIDL_COMMON_APPDATA")))
if app_name:
if app_author is not False:
path = path / app_author / app_name
else:
path = path / app_name
elif SYSTEM == "darwin":
path = pathlib.Path(os.path.expanduser("/Library/Application Support"))
if app_name:
path = path / app_name
else:
# XDG default for $XDG_DATA_DIRS
# only first, if multipath is False
path = os.getenv("XDG_DATA_DIRS", os.pathsep.join(["/usr/local/share", "/usr/share"]))
path_list = [os.path.expanduser(x.rstrip(os.sep)) for x in path.split(os.pathsep)]
if app_name:
if version:
app_name = os.path.join(app_name, version)
path_list = [os.sep.join([x, app_name]) for x in path_list]
path_list = [pathlib.Path(a) for a in path_list]
if multi_path:
path = os.pathsep.join(path_list)
else:
path = path_list[0]
return path
if app_name and version:
path = path / version
return path
@staticmethod
def user_config_path(app_name=None, app_author=None, version=None, roaming=False) -> pathlib.Path:
r"""Return full path to the user-specific config dir for this application.
"app_name" is the name of application.
If None, just the system directory is returned.
"app_author" (only used on Windows) is the name of the
app_author or distributing body for this application. Typically
it is the owning company name. This falls back to app_name. You may
pass False to disable it.
"version" is an optional version path element to append to the
path. You might want to use this if you want multiple versions
of your app to be able to run independently. If used, this
would typically be "<major>.<minor>".
Only applied when app_name is present.
"roaming" (boolean, default False) can be set True to use the Windows
roaming appdata directory. That means that for users on a Windows
network setup for roaming profiles, this user data will be
sync'd on login. See
<http://technet.microsoft.com/en-us/library/cc766489(WS.10).aspx>
for a discussion of issues.
Typical user config directories are:
Mac OS X: ~/Library/Preferences/<AppName>
Unix: ~/.config/<AppName> # or in $XDG_CONFIG_HOME, if defined
Win *: same as user_data_dir
For Unix, we follow the XDG spec and support $XDG_CONFIG_HOME.
That means, by default "~/.config/<AppName>".
"""
if SYSTEM == "win32":
path = AppPath.user_data_path(app_name, app_author, None, roaming)
elif SYSTEM == "darwin":
path = pathlib.Path(os.path.expanduser("~/Library/Preferences/"))
if app_name:
path = path / app_name
else:
path = pathlib.Path(os.getenv("XDG_CONFIG_HOME", os.path.expanduser("~/.config")))
if app_name:
path = path / app_name
if app_name and version:
path = path / version
return path
@staticmethod
def site_config_path(app_name=None, app_author=None, version=None, multi_path=False) -> pathlib.Path:
r"""Return full path to the user-shared data dir for this application.
"app_name" is the name of application.
If None, just the system directory is returned.
"app_author" (only used on Windows) is the name of the
app_author or distributing body for this application. Typically
it is the owning company name. This falls back to appname. You may
pass False to disable it.
"version" is an optional version path element to append to the
path. You might want to use this if you want multiple versions
of your app to be able to run independently. If used, this
would typically be "<major>.<minor>".
Only applied when app_name is present.
"multi_path" is an optional parameter only applicable to *nix
which indicates that the entire list of config dirs should be
returned. By default, the first item from XDG_CONFIG_DIRS is
returned, or '/etc/xdg/<AppName>', if XDG_CONFIG_DIRS is not set
Typical site config directories are:
Mac OS X: same as site_data_dir
Unix: /etc/xdg/<AppName> or $XDG_CONFIG_DIRS[i]/<AppName> for each value in
$XDG_CONFIG_DIRS
Win *: same as site_data_dir
Vista: (Fail! "C:\ProgramData" is a hidden *system* directory on Vista.)
For Unix, this is using the $XDG_CONFIG_DIRS[0] default, if multipath=False
WARNING: Do not use this on Windows. See the Vista-Fail note above for why.
"""
if SYSTEM == "win32":
path = AppPath.site_data_path(app_name, app_author)
if app_name and version:
path = path / version
elif SYSTEM == "darwin":
path = pathlib.Path(os.path.expanduser("/Library/Preferences"))
if app_name:
path = path / app_name
else:
# XDG default for $XDG_CONFIG_DIRS
# only first, if multi_path is False
path = os.getenv("XDG_CONFIG_DIRS", "/etc/xdg")
path_list = [os.path.expanduser(x.rstrip(os.sep)) for x in path.split(os.pathsep)]
if app_name:
if version:
app_name = os.path.join(app_name, version)
path_list = [os.sep.join([x, app_name]) for x in path_list]
path_list = [pathlib.Path(a) for a in path_list]
if multi_path:
path = os.pathsep.join(path_list)
else:
path = path_list[0]
return path
@staticmethod
def user_cache_path(app_name=None, app_author=None, version=None, opinion=True) -> pathlib.Path:
r"""Return full path to the user-specific cache dir for this application.
"appname" is the name of application.
If None, just the system directory is returned.
"appauthor" (only used on Windows) is the name of the
appauthor or distributing body for this application. Typically
it is the owning company name. This falls back to appname. You may
pass False to disable it.
"version" is an optional version path element to append to the
path. You might want to use this if you want multiple versions
of your app to be able to run independently. If used, this
would typically be "<major>.<minor>".
Only applied when appname is present.
"opinion" (boolean) can be False to disable the appending of
"Cache" to the base app data dir for Windows. See
discussion below.
Typical user cache directories are:
Mac OS X: ~/Library/Caches/<AppName>
Unix: ~/.cache/<AppName> (XDG default)
Win XP: C:\Documents and Settings\<username>\Local Settings\Application
Data\<AppAuthor>\<AppName>\Cache
Vista: C:\Users\<username>\AppData\Local\<AppAuthor>\<AppName>\Cache
On Windows the only suggestion in the MSDN docs is that local settings go in
the `CSIDL_LOCAL_APPDATA` directory. This is identical to the non-roaming
app data dir (the default returned by `user_data_dir` above). Apps typically
put cache data somewhere *under* the given dir here. Some examples:
...\Mozilla\Firefox\Profiles\<ProfileName>\Cache
...\Acme\SuperApp\Cache\1.0
OPINION: This function appends "Cache" to the `CSIDL_LOCAL_APPDATA` value.
This can be disabled with the `opinion=False` option.
"""
if SYSTEM == "win32":
if app_author is None:
app_author = app_name
path = pathlib.Path(os.path.normpath(get_win_folder("CSIDL_LOCAL_APPDATA")))
if app_name:
if app_author is not False:
path = path / app_author / app_name
else:
path = path / app_name
if opinion:
path = path / "Cache"
elif SYSTEM == "darwin":
path = pathlib.Path(os.path.expanduser("~/Library/Caches"))
if app_name:
path = path / app_name
else:
path = pathlib.Path(os.getenv("XDG_CACHE_HOME", os.path.expanduser("~/.cache")))
if app_name:
path = path / app_name
if app_name and version:
path = path / version
return path
@staticmethod
def user_state_path(app_name=None, app_author=None, version=None, roaming=False) -> pathlib.Path:
r"""Return full path to the user-specific state dir for this application.
"app_name" is the name of application.
If None, just the system directory is returned.
"app_author" (only used on Windows) is the name of the
app_author or distributing body for this application. Typically
it is the owning company name. This falls back to appname. You may
pass False to disable it.
"version" is an optional version path element to append to the
path. You might want to use this if you want multiple versions
of your app to be able to run independently. If used, this
would typically be "<major>.<minor>".
Only applied when app_name is present.
"roaming" (boolean, default False) can be set True to use the Windows
roaming appdata directory. That means that for users on a Windows
network setup for roaming profiles, this user data will be
sync'd on login. See
<http://technet.microsoft.com/en-us/library/cc766489(WS.10).aspx>
for a discussion of issues.
Typical user state directories are:
Mac OS X: same as user_data_dir
Unix: ~/.local/state/<AppName> # or in $XDG_STATE_HOME, if defined
Win *: same as user_data_dir
For Unix, we follow this Debian proposal <https://wiki.debian.org/XDGBaseDirectorySpecification#state>
to extend the XDG spec and support $XDG_STATE_HOME.
That means, by default "~/.local/state/<AppName>".
"""
if SYSTEM in ["win32", "darwin"]:
path = AppPath.user_data_path(app_name, app_author, None, roaming)
else:
path = pathlib.Path(os.getenv("XDG_STATE_HOME", os.path.expanduser("~/.local/state")))
if app_name:
path = path / app_name
if app_name and version:
path = path / version
return path
@staticmethod
def user_log_path(app_name=None, app_author=None, version=None, opinion=True) -> pathlib.Path:
r"""Return full path to the user-specific log dir for this application.
"app_name" is the name of application.
If None, just the system directory is returned.
"app_author" (only used on Windows) is the name of the
app_author or distributing body for this application. Typically
it is the owning company name. This falls back to appname. You may
pass False to disable it.
"version" is an optional version path element to append to the
path. You might want to use this if you want multiple versions
of your app to be able to run independently. If used, this
would typically be "<major>.<minor>".
Only applied when app_name is present.
"opinion" (boolean) can be False to disable the appending of
"Logs" to the base app data dir for Windows, and "log" to the
base cache dir for Unix. See discussion below.
Typical user log directories are:
Mac OS X: ~/Library/Logs/<AppName>
Unix: ~/.cache/<AppName>/log # or under $XDG_CACHE_HOME if defined
Win XP: C:\Documents and Settings\<username>\Local Settings\Application
Data\<AppAuthor>\<AppName>\Logs
Vista: C:\Users\<username>\AppData\Local\<AppAuthor>\<AppName>\Logs
On Windows the only suggestion in the MSDN docs is that local settings
go in the `CSIDL_LOCAL_APPDATA` directory. (Note: I'm interested in
examples of what some windows apps use for a logs dir.)
OPINION: This function appends "Logs" to the `CSIDL_LOCAL_APPDATA`
value for Windows and appends "log" to the user cache dir for Unix.
This can be disabled with the `opinion=False` option.
"""
if SYSTEM == "darwin":
path = pathlib.Path.joinpath(pathlib.Path(os.path.expanduser("~/Library/Logs")), app_name)
elif SYSTEM == "win32":
path = AppPath.user_data_path(app_name, app_author, version)
version = False
if opinion:
path = path / "Logs"
else:
path = AppPath.user_cache_path(app_name, app_author, version)
version = False
if opinion:
path = path / "log"
if app_name and version:
path = path / version
return path
if __name__ == "__main__":
_app_name = "MyApp"
_app_author = __author__
props = ("user_data", "user_config", "user_cache", "user_state", "user_log", "site_data", "site_config")
print("-- app dirs (with optional 'version')")
dirs = AppPath(_app_name, _app_author, app_version="1.0", ensure_existence=False)
for prop in props:
print("%s: %s" % (prop, getattr(dirs, prop)))
print("\n-- app dirs (without optional 'version')")
dirs = AppPath(_app_name, _app_author, ensure_existence=False)
for prop in props:
print("%s: %s" % (prop, getattr(dirs, prop)))
print("\n-- app dirs (without optional '_app_author')")
dirs = AppPath(_app_name, ensure_existence=False)
for prop in props:
print("%s: %s" % (prop, getattr(dirs, prop)))
print("\n-- app dirs (with disabled '_app_author')")
dirs = AppPath(_app_name, app_author=False, ensure_existence=False)
for prop in props:
print("%s: %s" % (prop, getattr(dirs, prop)))
|
AppPathy
|
/AppPathy-0.0.1-py36-none-any.whl/apppath/app_path.py
|
app_path.py
|
### README.md 👋
# AppSafe
[](https://github.com/Justaus3r/Penta)
[](https://opensource.org/licenses/)

[](https://github.com/Justaus3r)
[](https://www.python.org/)

A simple App Locker written in python.
# Installation:
- Just git clone it using ```git clone https://github.com/Xeroxxhah/AppSafe.git```
- install requirements using ```python3 -m pip install -r requirements.txt```
- Now simply ```python3 appsafe.py ```
### Bug report
Found any bug!
Report it to me at [email protected]
or open an [issue](https://github.com/Xeroxxhah/AppSafe/issues)
### Updates:
Soon ..
### Contributions:
All contributions are welcomed.fork this repo,improve it and [pull requests](https://github.com/Xeroxxhah/AppSafe/pulls)
### License
Distributed under GPLV3.0
|
AppSafe
|
/AppSafe-1.2.16.tar.gz/AppSafe-1.2.16/README.md
|
README.md
|
import exceptions
import urllib
import urllib2
import cookielib
import socket
import logging
import md5
import sys
import os
import marshal
### Extracted and hacked up from appcfg.py of Google App Engine SDK
def GetUserAgent():
python_version = ".".join(str(i) for i in sys.version_info)
return 'AppState-Python/%s' % python_version
class ClientLoginError(urllib2.HTTPError):
"""Raised to indicate there was an error authenticating with ClientLogin."""
def __init__(self, url, code, msg, headers, args):
urllib2.HTTPError.__init__(self, url, code, msg, headers, None)
self.args = args
self.reason = args["Error"]
class AbstractRpcServer(object):
"""Provides a common interface for a simple RPC server."""
def __init__(self, host):
"""Creates a new HttpRpcServer.
Args:
host: The host to send requests to.
"""
self.host = host
self.authenticated = False
self.extra_headers = {
"User-agent": GetUserAgent()
}
self.cookie_jar = cookielib.MozillaCookieJar()
self.opener = self._GetOpener()
logging.info("Server: %s", self.host)
def _GetOpener(self):
"""Returns an OpenerDirector for making HTTP requests.
Returns:
A urllib2.OpenerDirector object.
"""
raise NotImplemented()
def _CreateRequest(self, url, data=None):
"""Creates a new urllib request."""
logging.debug("Creating request for: '%s' with payload:\n%s", url, data)
req = urllib2.Request(url, data=data)
for key, value in self.extra_headers.iteritems():
req.add_header(key, value)
return req
def _GetAuthToken(self, email, password):
"""Uses ClientLogin to authenticate the user, returning an auth token.
Args:
email: The user's email address
password: The user's password
Raises:
ClientLoginError: If there was an error authenticating with ClientLogin.
HTTPError: If there was some other form of HTTP error.
Returns:
The authentication token returned by ClientLogin.
"""
req = self._CreateRequest(
url="https://www.google.com/accounts/ClientLogin",
data=urllib.urlencode({
"Email": email,
"Passwd": password,
"service": "ah",
"source": "Python-test",
"accountType": "HOSTED_OR_GOOGLE"
})
)
try:
response = self.opener.open(req)
response_body = response.read()
response_dict = dict(x.split("=")
for x in response_body.split("\n") if x)
return response_dict["Auth"]
except urllib2.HTTPError, e:
if e.code == 403:
body = e.read()
response_dict = dict(x.split("=", 1) for x in body.split("\n") if x)
raise ClientLoginError(req.get_full_url(), e.code, e.msg,
e.headers, response_dict)
else:
raise
def _GetAuthCookie(self, auth_token):
"""Fetches authentication cookies for an authentication token.
Args:
auth_token: The authentication token returned by ClientLogin.
Raises:
HTTPError: If there was an error fetching the authentication cookies.
"""
continue_location = "http://localhost/"
args = {"continue": continue_location, "auth": auth_token}
req = self._CreateRequest("http://%s/_ah/login?%s" %
(self.host, urllib.urlencode(args)))
try:
response = self.opener.open(req)
except urllib2.HTTPError, e:
response = e
if (response.code != 302 or
response.info()["location"] != continue_location):
raise urllib2.HTTPError(req.get_full_url(), response.code, response.msg,
response.headers, response.fp)
self.authenticated = True
def _Authenticate(self, email, password):
"""Authenticates the user.
The authentication process works as follows:
1) We get a username and password from the user
2) We use ClientLogin to obtain an AUTH token for the user
(see http://code.google.com/apis/accounts/AuthForInstalledApps.html).
3) We pass the auth token to /_ah/login on the server to obtain an
authentication cookie. If login was successful, it tries to redirect
us to the URL we provided.
If we attempt to access the upload API without first obtaining an
authentication cookie, it returns a 401 response and directs us to
authenticate ourselves with ClientLogin.
https://www.google.com/accounts/DisplayUnlockCaptcha
and verify you are a human. Then try again.
"""
auth_token = self._GetAuthToken(email, password)
self._GetAuthCookie(auth_token)
return
def Send(self, request_path, args={}, payload="",
content_type="application/octet-stream",
timeout=None):
"""Sends an RPC and returns the response.
Args:
request_path: The path to send the request to, eg /api/appversion/create.
args: CGI keyword arguments, as a dict
payload: The body of the request, or None to send an empty request.
content_type: The Content-Type header to use.
timeout: timeout in seconds; default None i.e. no timeout.
(Note: for large requests on OS X, the timeout doesn't work right.)
Returns:
The response body, as a string.
"""
old_timeout = socket.getdefaulttimeout()
socket.setdefaulttimeout(timeout)
try:
tries = 0
while True:
tries += 1
url = "http://%s%s?%s" % (self.host, request_path,
urllib.urlencode(args))
req = self._CreateRequest(url=url, data=payload)
req.add_header("Content-Type", content_type)
#req.add_header("X-appcfg-api-version", "1")
try:
f = self.opener.open(req)
response = f.read()
f.close()
return response
except urllib2.HTTPError, e:
if tries > 3:
raise
elif e.code == 401:
self._Authenticate()
elif e.code >= 500 and e.code < 600:
continue
else:
raise
finally:
socket.setdefaulttimeout(old_timeout)
class HttpRpcServer(AbstractRpcServer):
"""Provides a simplified RPC-style interface for HTTP requests."""
def _GetOpener(self):
"""Returns an OpenerDirector that supports cookies and ignores redirects.
Returns:
A urllib2.OpenerDirector object.
"""
opener = urllib2.OpenerDirector()
opener.add_handler(urllib2.ProxyHandler())
opener.add_handler(urllib2.UnknownHandler())
opener.add_handler(urllib2.HTTPHandler())
opener.add_handler(urllib2.HTTPDefaultErrorHandler())
opener.add_handler(urllib2.HTTPSHandler())
opener.add_handler(urllib2.HTTPErrorProcessor())
opener.add_handler(urllib2.HTTPCookieProcessor(self.cookie_jar))
return opener
### End extraction
class DataCorruptionError(Exception): pass
class PRPC():
def __init__(self, hostname, command='/prpc'):
self.hostname = hostname
self.command = command
self.server = HttpRpcServer(hostname)
def login(self, email, password):
self.server._Authenticate(email, password)
def send(self, cmd, arg1=None, arg2=None, arg3=None, arg4=None):
args = {'cmd':cmd}
if arg1 is not None: args['arg1'] = arg1
if arg2 is not None: args['arg2'] = arg2
if arg3 is not None: args['arg3'] = arg3
if arg4 is not None: args['arg4'] = arg4
return self.server.Send(self.command,
content_type='application/x-www-form-urlencoded; charset=utf-8',
payload = urllib.urlencode(args))
|
AppState
|
/AppState-0.1.tar.gz/AppState-0.1/prpc.py
|
prpc.py
|
__version__ = '1.0.2'
__all__ = ['dumps', 'loads', 'version']
# Original bencode module by Petru Paler, et al.
#
# Modifications by Connelly Barnes:
#
# - Added support for floats (sent as 32-bit or 64-bit in network
# order), bools, None.
# - Allowed dict keys to be of any serializable type.
# - Lists/tuples are always decoded as tuples (thus, tuples can be
# used as dict keys).
# - Embedded extra information in the 'typecodes' to save some space.
# - Added a restriction on integer length, so that malicious hosts
# cannot pass us large integers which take a long time to decode.
#
# Licensed by Bram Cohen under the "MIT license":
#
# "Copyright (C) 2001-2002 Bram Cohen
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# The Software is provided "AS IS", without warranty of any kind,
# express or implied, including but not limited to the warranties of
# merchantability, fitness for a particular purpose and
# noninfringement. In no event shall the authors or copyright holders
# be liable for any claim, damages or other liability, whether in an
# action of contract, tort or otherwise, arising from, out of or in
# connection with the Software or the use or other dealings in the
# Software."
#
# (The rencode module is licensed under the above license as well).
#
import struct
import string
from threading import Lock
# Default number of bits for serialized floats, either 32 or 64 (also a parameter for dumps()).
DEFAULT_FLOAT_BITS = 32
# Maximum length of integer when written as base 10 string.
MAX_INT_LENGTH = 64
# The bencode 'typecodes' such as i, d, etc have been extended and
# relocated on the base-256 character set.
CHR_LIST = chr(59)
CHR_DICT = chr(60)
CHR_INT = chr(61)
CHR_INT1 = chr(62)
CHR_INT2 = chr(63)
CHR_INT4 = chr(64)
CHR_INT8 = chr(65)
CHR_FLOAT32 = chr(66)
CHR_FLOAT64 = chr(44)
CHR_TRUE = chr(67)
CHR_FALSE = chr(68)
CHR_NONE = chr(69)
CHR_TERM = chr(127)
# Positive integers with value embedded in typecode.
INT_POS_FIXED_START = 0
INT_POS_FIXED_COUNT = 44
# Dictionaries with length embedded in typecode.
DICT_FIXED_START = 102
DICT_FIXED_COUNT = 25
# Negative integers with value embedded in typecode.
INT_NEG_FIXED_START = 70
INT_NEG_FIXED_COUNT = 32
# Strings with length embedded in typecode.
STR_FIXED_START = 128
STR_FIXED_COUNT = 64
# Lists with length embedded in typecode.
LIST_FIXED_START = STR_FIXED_START+STR_FIXED_COUNT
LIST_FIXED_COUNT = 64
def decode_int(x, f):
f += 1
newf = x.index(CHR_TERM, f)
if newf - f >= MAX_INT_LENGTH:
raise ValueError('overflow')
try:
n = int(x[f:newf])
except (OverflowError, ValueError):
n = long(x[f:newf])
if x[f] == '-':
if x[f + 1] == '0':
raise ValueError
elif x[f] == '0' and newf != f+1:
raise ValueError
return (n, newf+1)
def decode_intb(x, f):
f += 1
return (struct.unpack('!b', x[f:f+1])[0], f+1)
def decode_inth(x, f):
f += 1
return (struct.unpack('!h', x[f:f+2])[0], f+2)
def decode_intl(x, f):
f += 1
return (struct.unpack('!l', x[f:f+4])[0], f+4)
def decode_intq(x, f):
f += 1
return (struct.unpack('!q', x[f:f+8])[0], f+8)
def decode_float32(x, f):
f += 1
n = struct.unpack('!f', x[f:f+4])[0]
return (n, f+4)
def decode_float64(x, f):
f += 1
n = struct.unpack('!d', x[f:f+8])[0]
return (n, f+8)
def decode_string(x, f):
colon = x.index(':', f)
try:
n = int(x[f:colon])
except (OverflowError, ValueError):
n = long(x[f:colon])
if x[f] == '0' and colon != f+1:
raise ValueError
colon += 1
return (x[colon:colon+n], colon+n)
def decode_list(x, f):
r, f = [], f+1
while x[f] != CHR_TERM:
v, f = decode_func[x[f]](x, f)
r.append(v)
return (r, f + 1)
def decode_dict(x, f):
r, f = {}, f+1
while x[f] != CHR_TERM:
k, f = decode_func[x[f]](x, f)
r[k], f = decode_func[x[f]](x, f)
return (r, f + 1)
def decode_true(x, f):
return (True, f+1)
def decode_false(x, f):
return (False, f+1)
def decode_none(x, f):
return (None, f+1)
decode_func = {}
decode_func['0'] = decode_string
decode_func['1'] = decode_string
decode_func['2'] = decode_string
decode_func['3'] = decode_string
decode_func['4'] = decode_string
decode_func['5'] = decode_string
decode_func['6'] = decode_string
decode_func['7'] = decode_string
decode_func['8'] = decode_string
decode_func['9'] = decode_string
decode_func[CHR_LIST ] = decode_list
decode_func[CHR_DICT ] = decode_dict
decode_func[CHR_INT ] = decode_int
decode_func[CHR_INT1 ] = decode_intb
decode_func[CHR_INT2 ] = decode_inth
decode_func[CHR_INT4 ] = decode_intl
decode_func[CHR_INT8 ] = decode_intq
decode_func[CHR_FLOAT32] = decode_float32
decode_func[CHR_FLOAT64] = decode_float64
decode_func[CHR_TRUE ] = decode_true
decode_func[CHR_FALSE ] = decode_false
decode_func[CHR_NONE ] = decode_none
def make_fixed_length_string_decoders():
def make_decoder(slen):
def f(x, f):
return (x[f+1:f+1+slen], f+1+slen)
return f
for i in range(STR_FIXED_COUNT):
decode_func[chr(STR_FIXED_START+i)] = make_decoder(i)
make_fixed_length_string_decoders()
def make_fixed_length_list_decoders():
def make_decoder(slen):
def f(x, f):
r, f = [], f+1
for i in range(slen):
v, f = decode_func[x[f]](x, f)
r.append(v)
return (r, f)
return f
for i in range(LIST_FIXED_COUNT):
decode_func[chr(LIST_FIXED_START+i)] = make_decoder(i)
make_fixed_length_list_decoders()
def make_fixed_length_int_decoders():
def make_decoder(j):
def f(x, f):
return (j, f+1)
return f
for i in range(INT_POS_FIXED_COUNT):
decode_func[chr(INT_POS_FIXED_START+i)] = make_decoder(i)
for i in range(INT_NEG_FIXED_COUNT):
decode_func[chr(INT_NEG_FIXED_START+i)] = make_decoder(-1-i)
make_fixed_length_int_decoders()
def make_fixed_length_dict_decoders():
def make_decoder(slen):
def f(x, f):
r, f = {}, f+1
for j in range(slen):
k, f = decode_func[x[f]](x, f)
r[k], f = decode_func[x[f]](x, f)
return (r, f)
return f
for i in range(DICT_FIXED_COUNT):
decode_func[chr(DICT_FIXED_START+i)] = make_decoder(i)
make_fixed_length_dict_decoders()
def encode_dict(x,r):
r.append(CHR_DICT)
for k, v in x.items():
encode_func[type(k)](k, r)
encode_func[type(v)](v, r)
r.append(CHR_TERM)
def loads(x):
try:
r, l = decode_func[x[0]](x, 0)
except (IndexError, KeyError):
raise ValueError
if l != len(x):
raise ValueError
return r
from types import StringType, IntType, LongType, DictType, ListType, TupleType, FloatType, NoneType
def encode_int(x, r):
if 0 <= x < INT_POS_FIXED_COUNT:
r.append(chr(INT_POS_FIXED_START+x))
elif -INT_NEG_FIXED_COUNT <= x < 0:
r.append(chr(INT_NEG_FIXED_START-1-x))
elif -128 <= x < 128:
r.extend((CHR_INT1, struct.pack('!b', x)))
elif -32768 <= x < 32768:
r.extend((CHR_INT2, struct.pack('!h', x)))
elif -2147483648 <= x < 2147483648:
r.extend((CHR_INT4, struct.pack('!l', x)))
elif -9223372036854775808 <= x < 9223372036854775808:
r.extend((CHR_INT8, struct.pack('!q', x)))
else:
s = str(x)
if len(s) >= MAX_INT_LENGTH:
raise ValueError('overflow')
r.extend((CHR_INT, s, CHR_TERM))
def encode_float32(x, r):
r.extend((CHR_FLOAT32, struct.pack('!f', x)))
def encode_float64(x, r):
r.extend((CHR_FLOAT64, struct.pack('!d', x)))
def encode_bool(x, r):
r.extend({False: CHR_FALSE, True: CHR_TRUE}[bool(x)])
def encode_none(x, r):
r.extend(CHR_NONE)
def encode_string(x, r):
if len(x) < STR_FIXED_COUNT:
r.extend((chr(STR_FIXED_START + len(x)), x))
else:
r.extend((str(len(x)), ':', x))
def encode_list(x, r):
if len(x) < LIST_FIXED_COUNT:
r.append(chr(LIST_FIXED_START + len(x)))
for i in x:
encode_func[type(i)](i, r)
else:
r.append(CHR_LIST)
for i in x:
encode_func[type(i)](i, r)
r.append(CHR_TERM)
def encode_dict(x,r):
if len(x) < DICT_FIXED_COUNT:
r.append(chr(DICT_FIXED_START + len(x)))
for k, v in x.items():
encode_func[type(k)](k, r)
encode_func[type(v)](v, r)
else:
r.append(CHR_DICT)
for k, v in x.items():
encode_func[type(k)](k, r)
encode_func[type(v)](v, r)
r.append(CHR_TERM)
encode_func = {}
encode_func[IntType] = encode_int
encode_func[LongType] = encode_int
encode_func[StringType] = encode_string
encode_func[ListType] = encode_list
encode_func[TupleType] = encode_list
encode_func[DictType] = encode_dict
encode_func[NoneType] = encode_none
lock = Lock()
try:
from types import BooleanType
encode_func[BooleanType] = encode_bool
except ImportError:
pass
def dumps(x, float_bits=DEFAULT_FLOAT_BITS):
"""
Dump data structure to str.
Here float_bits is either 32 or 64.
"""
lock.acquire()
try:
if float_bits == 32:
encode_func[FloatType] = encode_float32
elif float_bits == 64:
encode_func[FloatType] = encode_float64
else:
raise ValueError('Float bits (%d) is not 32 or 64' % float_bits)
r = []
encode_func[type(x)](x, r)
finally:
lock.release()
return ''.join(r)
def test():
f1 = struct.unpack('!f', struct.pack('!f', 25.5))[0]
f2 = struct.unpack('!f', struct.pack('!f', 29.3))[0]
f3 = struct.unpack('!f', struct.pack('!f', -0.6))[0]
L = [[{'a':15, 'bb':f1, 'ccc':f2, '':[f3,[],False,True,'']},['a',10**20],list(range(-100000,100000)),'b'*31,'b'*62,'b'*64,2**30,2**33,2**62,2**64,2**30,2**33,2**62,2**64,False,False, True, -1, 2, 0]]
assert loads(dumps(L)) == L
d = dict(zip(range(-100000,100000),range(-100000,100000)))
d.update({'a':20, 20:40, 40:41, f1:f2, f2:f3, f3:False, False:True, True:False})
L = [d, {}, {5:6}, {7:7,True:8}, {9:10, 22:39, 49:50, 44: ''}]
assert loads(dumps(L)) == L
L = ['', 'a'*10, 'a'*100, 'a'*1000, 'a'*10000, 'a'*100000, 'a'*1000000, 'a'*10000000]
assert loads(dumps(L)) == L
L = [dict(zip(range(n),range(n))) for n in range(100)] + ['b']
assert loads(dumps(L)) == L
L = [dict(zip(range(n),range(-n,0))) for n in range(100)] + ['b']
assert loads(dumps(L)) == L
L = [list(range(n)) for n in range(100)] + ['b']
assert loads(dumps(L)) == L
L = ['a'*n for n in range(1000)] + ['b']
assert loads(dumps(L)) == L
L = ['a'*n for n in range(1000)] + [None,True,None]
assert loads(dumps(L)) == L
assert loads(dumps(None)) == None
assert loads(dumps({None:None})) == {None:None}
assert 1e-10<abs(loads(dumps(1.1))-1.1)<1e-6
assert 1e-10<abs(loads(dumps(1.1,32))-1.1)<1e-6
assert abs(loads(dumps(1.1,64))-1.1)<1e-12
try:
import psyco
psyco.bind(dumps)
psyco.bind(loads)
except ImportError:
pass
if __name__ == '__main__':
test()
|
AppState
|
/AppState-0.1.tar.gz/AppState-0.1/rencode.py
|
rencode.py
|
=AppState=
==Application Shared State==
Copyright 2008, Nathan Whitehead
Released under Apache-2.0 and MIT licenses (see LICENSE section)
This module allows your Python programs to easily have a persistent
global shared state. This state can be used to store things like
number of users, game high scores, message of the day from the author,
etc. The state has an interface like a dictionary with some
additional synchronization functionality and some restrictions. This
module connects to a server on the Google App Engine to store and
manage data. It uses a simple security model based on Google
accounts.
Why would you want to use this module?
* Quickly add multiplayer features to your Pygame game
* Take advantage of Google's efficient and reliable infrastructure
(for free!)
* Super easy to get started and use
* Includes security features
==DOWNLOAD==
A source distribution is available at PyPI:
http://pypi.python.org/pypi/AppState
==INSTALLATION==
AppState is packaged as Python source using distutils. To install, run
the following command as root:
python setup.py install
For more information and options about using distutils, read:
http://docs.python.org/inst/inst.html
==DOCUMENTATION==
The documentation consists of this README and the
[http://www.paranoidbrain.com/appstate.html pydoc function
documentation].
==OVERVIEW==
The basic idea is that you have an application that runs on many
different computers, maybe simultaneously. You would like to share
data between instances of your application that are being run by
different users. This module implements a shared state space
that all the instances of your application can use to communicate
data safely.
Before the shared space can be used, the creator of the application
must register the application with the server. Once the application
is registered, users that are running an instance of your application
will connect to the server and join the shared application space.
They may also login using their Google account information. Once they
have joined, they may start reading and modifying the state. Changes
to the state are visible to all users.
The state itself is similar to a dictionary in python. Keys are
strings and values can be any python value that can be encoded by
rencode. These values are things like bools, ints, strings, floats,
lists, dictionaries with simple keys. It does NOT include
objects from user-defined classes.
Every call to query or update the distributed state requires a HTTP
connection to the Google App Engine server. For me this means that
every operation takes between 0.1 seconds and 3 seconds. Other people
may have a different experience with performance. In any case, the
shared state is not fast enough for real-time games. It MAY be fast
enough for turn-based games. It is more suitable for setting up a
message board to match players and exchange IP addresses for a
real-time online game, or for sharing game objects (e.g. custom
levels, achievements) between players.
The distributed state also does not currently have a push capability,
that is, there is no way for the server to send a message to an
application instance telling it that something has happened. The
server just waits for application instances to connect and query
the state.
==HOW TO USE==
===Creating a new app===
You need a Google account to register an application and be an admin.
If you do not have a Google account, sign up at:
https://www.google.com/accounts/Login
Every application has a unique application id. Every instance of your
application will use the same application id. Recommended naming
scheme for appid:
(your name OR your domain name) PLUS application name
So one of my hypothetical application ids is:
'NathanWhitehead+AstroMaxBlaster'
The basic steps are to connect to the server by creating a DistributedState
object, login, then call new_app().
{{{
state = DistributedState()
state.login('your@email', 'password')
state.new_app(appid, ANY, ANY)
}}}
The arguments to new_app() are your unique appid, read permissions,
and write permissions. Choices for permissions are:
* ANY - means anyone can do the operation, even if not logged in
* ADMIN_ONLY - only the admin can do the operation
* AUTHORIZED_ONLY - only users that have been explicitly authorized
can do the operation
* UNBANNED_ONLY - any logged in user that has not been banned can do
the operation
For example, setting readmode=ANY and writemode=ADMIN_ONLY means that
anyone can read the application state, but only the admin can make any
changes. You cannot change readmode or writemode once the application
has been registered. Only the admin can authorize users or ban them.
The only mode that allows users who are not logged in is ANY. The
other modes require the person to be logged in.
You only need to create the application object one time, it will
persist in the Google App Engine datastore until you delete it.
===Joining a state===
Once the application id has been registered, people can start joining
the shared application state. To join, they do:
{{{
state = DistributedState()
state.join(appid)
}}}
If the read and write modes require it, users might also need to
login after joining.
===Using the state===
The simplest way to use the distributed state object is to treat it
like a dictionary. You can store values into the state using:
{{{
state['key'] = value
}}}
The key must be a string of reasonable size without linebreaks. The
values you can store in the state are any python data value that can
be encoded by rencode. These values include bools, ints, strings,
floats, lists, dictionaries with simple keys. It does NOT include
objects from user-defined classes. The values must be 20K or less in
size after being serialized and encoded into a string. To check the
size of a value, use:
{{{
length_data(value)
}}}
To retrieve stored values, use:
{{{
state['key']
}}}
If no value has been stored under that key, this will raise a KeyError
exception.
You can delete keys using del:
{{{
del state['key']
}}}
===EXAMPLE: Message of the Day===
A very simple example is a 'message of the day' feature. The
application retrieves the state and displays it. Only the admin who
created the application can change the state, but anyone can read it.
The application is registered by the creator and the initial message
is set with:
{{{
import appstate
state = appstate.DistributedState()
state.login('your@email', 'password')
state.new_app('YourName+MessageOfTheDayForMyGreatApp',
readmode=appstate.ANY,
writemode=appstate.ADMIN_ONLY)
state['message'] = 'New version of MyGreatApp available today!'
}}}
The beginning of MyGreatApp will do the following:
{{{
import appstate
state = appstate.DistributedState()
state.join('YourName+MessageOfTheDayForMyGreatApp')
print state['message']
# Do the rest of MyGreatApp
}}}
When people run MyGreatApp they will see the latest message that you
have set. To change the message, you do:
{{{
import appstate
state = appstate.DistributedState()
state.login('your@email', 'password')
state.join('YourName+MessageOfTheDayForMyGreatApp')
state['message'] = 'If you liked MyGreatApp, you will love MyGreatestApp'
}}}
===Updating state===
Just using direct dictionary operations is not a good idea for
applications that will have more than one person updating values.
For example, suppose the state keeps track of a number representing
how many times the application has been run anywhere. When the
application starts up it might run something like
state['count'] = state['count'] + 1.
But what if two different people run the application at the same
time? Both of them evaluate state['count'] and get 102. They both
increment to get 103, then they both set the count to 103. But
that's wrong, the count should go to 104 since two copies of the
application started.
First register the counter testing application.
{{{
import appstate
state = appstate.DistributedState()
state.login('your@email', 'password')
state.new_app('YourName+CounterTest', appstate.ANY, appstate.ANY)
}}}
Now here's the BAD counting application:
{{{
import appstate
state = appstate.DistributedState()
state.join('YourName+CounterTest')
def incr_count():
try:
old = state['count']
new = old + 1
state['count'] = new
except KeyError:
state['count'] = 1
incr_count()
print state['count']
}}}
In Linux you can see the bad behavior by running two copies simultaneously:
{{{
python test/counter_bad.py & python test/counter_bad.py &
}}}
The solution is to use update(). When you call update() you give a
hash of the previous version of the value you are updating along with
your new value. If everything goes well the value will update. If
someone else has beaten you to changing the value, the hash value you
passed will not match and the function will raise an exception
UpdateFailedError. You can get the new value and try again.
For the example, both applications try to update from 102->103. One
of them will succeed but the other one has to fail. The one that
fails rereads the updated count (103), increments it to 104, and then
tries to update 103->104. If no one else has beaten the application
again then the update will succeed.
Here's the GOOD counting application:
{{{
import appstate
state = appstate.DistributedState()
state.join('YourName+CounterTest')
def incr_count():
try:
old = state['count']
new = old + 1
oldhash = appstate.hash_value(old)
state.update('count', oldhash, new)
except appstate.UpdateFailedError:
incr_count() # try again
except KeyError:
state['count'] = 1
incr_count()
print state['count']
}}}
===State operations===
To help automate the process of using update() you can use apply_op().
This way you don't have to worry about catching exceptions and
retrying operations on the state. To use apply_op(), you must define a
function on python values that will be applied. The function you
write must take exactly one argument, the input value, and return one
value, the output value.
Some optional keyword arguments control how apply_op() deals with
missing keys: 'create' is a boolean that indicates to create
new values for the key, and 'defaultvalue' indicates what value
to use.
Here is the good counting example rewritten to use apply_op().
{{{
import appstate
state = appstate.DistributedState()
state.join('YourName+CounterTest')
def inc(x):
return x + 1
state.apply_op('count', inc, create=True, defaultvalue=1)
print state['count']
}}}
===Optimized gets===
To save time and network traffic, you can request values from the
shared store and only get updated values when there is something new
to report. Use the get_if_changed() function to do this. To use it,
pass it the key and a hash of the value you already know about. The
function will either return the new current value that you don't know,
or it will raise a DataUnchangedError exception if there have not been
any changes.
Here's a code snippet that gets a list from the shared state:
{{{
lst = [1,2,3,4,5]
try:
lst = state.get_if_changed('somenums', appstate.hash_value(lst))
except appstate.DataUnchangedError:
pass
print lst
}}}
===Authorizing and banning users===
For many applications it is useful to be able to ban troublesome users
or limit changes to a small set of authorized users. Only the admin
can ban or authorize users.
In the mode AUTHORIZED_ONLY, every user that is allowed to read/write
must be explicitly authorized by the admin. Here's how the admin
authorizes a user:
{{{
import appstate
state = appstate.DistributedState()
state.login('admin@email', 'password')
state.join('appid')
state.authorize('otheruser@email')
}}}
Banning works similarly.
{{{
import appstate
state = appstate.DistributedState()
state.login('admin@email', 'password')
state.join('appid')
state.ban('malicious@email')
}}}
Note that in the mode AUTHORIZED_ONLY it doesn't matter if a user is
banned or not. It only matters whether they have been authorized. In
the mode UNBANNED_ONLY it does not matter whether a user has been
authorized, it only matters whether they have been banned.
To reverse an authorization or a ban, use unauthorize() or unban().
Only the admin can reverse an authorization or a ban.
===Sending email===
Not all players will be online at the same time. To let players that
are involved with the game but not currently playing know that something
has happened, this module can send emails.
To prevent abuse, any application instance that wishes to send an
email must be logged in to Google accounts. The email sent will
always include a correct sender with the logged in users email.
To send an email message:
{{{
import appstate
state = appstate.DistributedState()
state.login('your@email', 'password')
state.join('YourName+GrueAttack')
msg = '''
I attacked you with a grue!
Continue playing GrueAttacker to counterattack.'''
state.email('buddys@email', 'Grue attack!', msg)
}}}
==COMMAND LINE==
As a convenience, admin operations are available through the command
line. This can be useful for scripting and testing.
{{{
Usage: python appstate.py command [args]
Commands:
version
new_app email password appid readmode writemode
delete_app email password appid
authorize youremail yourpassword appid subjemail
ban youremail yourpassword appid subjemail
unauthorize youremail yourpassword appid subjemail
unban youremail yourpassword appid subjemail
}}}
==SECURITY==
This module provides some rudimentary security features but you should
not use this module to create applications that require security
guarantees.
===Plaintext===
All messages except login information are transmitted in plain text.
This means that an adversary snooping network traffic can see the
information your application sends back and forth to the server and
potentially modify it. For example, this means that if you set your
security model to ADMIN_ONLY for writing, a malicious attacker may be
able to corrupt changes that are sent by the legitimate admin. The
attacker should not be able to steal the login credentials and then be
able to masquerade later as the admin by sniffing network traffic.
Since python source can easily be modified, you cannot rely on every
instance of your application to behave nicely. For example, if your
application is a game and the shared state is a high score board, a
malicious user could alter the gameplay to automatically give himself
a high score. There is no good way to defend against this attack
short of securing the hardware used to run the application (this is
the model used by the XBox 360).
===Encoding values===
Encoding and decoding of python values in the shared state is done by
rencode. This means that if an attacker corrupts the data in the
shared state, you may get a ValueError exception when retrieving
information from the state. It should not be possible for corrupted
data to make your application segfault or execute arbitrary python
code.
Be careful how you interpret data in the shared state. Remember that
an attacker can change the data there. For example, it is a very
bad idea to store filenames in the shared state and then use those
filenames to read and write to the local filesystem. Instead, your
application should construct its own filenames and store data from
the shared state using those filenames.
Never construct shell commands from data in the state. There are too
many ways that corrupted data can alter the effects of the shell
command.
Never use pickle to store and retrieve python values in the shared
state. Unpickling objects allows arbitrary python code to execute (by
design). If you absolutely need to store a user object in the shared
state, extend rencode to handle your object safely.
===User emails===
Google App Engine does not currently (as of 9/14/2008) have the ability
to uniquely identify users other than by email address. This means
that when you ban an email address, the same user can change their
email address to unban themselves. There is also no way to ban users
by IP address.
===Abuse===
Please do not use too much space for no good reason, there is a 500MB
limit for all users of the server. There is no hard-code size limit
per application, just don't be wasteful or I will delete your
application. Please do not try to crash the server and make Google
ban me from the App Engine. That said, I have secret tools that are
not being released that monitor access to the server and attempt to
mitigate abuse.
If you discover a security problem with this package or encounter
problem behavior from attackers, please email me.
==PERFORMANCE==
Each access to the shared state requires communication with the Google
App Engine server. To minimize slowdowns, try to cache data values
locally whenever possible. Use get_if_changed() when possible. When
writing values to the shared state, batch together as many changes as
possible. Instead of looping and calling apply_op() once per loop,
make the operation you are applying loop over the data and make the
changes. This way there will only be one call to apply_op().
When deciding how to structure you shared data space, try to make a
good tradeoff between the number of keys to use and the size of the
values in each key. You want to use as few keys as possible to
minimize the number of round-trip calls between the application
instances and the server. On the other hand, you want the data stored
under each key to be as short as possible so that the messages that
are exchanged are as small as possible.
Another consideration is that atomic updates using update() or
apply_op() can only happen on one key at a time. If you split up data
into different keys, you can no longer guarantee the consistency of
the data when there are multiple concurrent writers. That is not
necessarily a bad thing, just something to be aware of.
On the flip side, if only one key is used then all accesses to the
central server will be serialized. If there are many simultaneous
users attempting to modify the state, each user will experience many
update() failures and each modification will take longer.
My advice is to start with all the state stored in one key and only
use update() or apply_op() to change the state. This makes reasoning
about what is happening simpler. Once the state starts including
auxiliary information that takes up too much space, start separating
out the bulky data into separate keys, with references in the main
state key. Auxiliary features that are independent of the main state
should also go into new keys (e.g. like adding a message board).
==LICENSE==
This module is released under the Apache-2.0 license, the same license
that Google uses for their Google App Engine SDK. One file,
rencode.py, is distributed under the MIT license. Both licenses are
compatible with the LGPL used by Pygame.
===AppState excluding rencode.py===
Copyright 2008 Nathan Whitehead
Copyright 2007 Google Inc.
Licensed under the Apache License, Version 2.0 (the "License"); you
may not use this file except in compliance with the License. You may
obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied. See the License for the specific language governing
permissions and limitations under the License.
===rencode.py===
Modifications by Nathan Whitehead 2008 released to public domain.
Modifications by Connelly Barnes 2006-2007 released to public domain.
Licensed under the "MIT license":
Copyright (C) 2001-2002 Bram Cohen
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
The Software is provided "AS IS", without warranty of any kind,
express or implied, including but not limited to the warranties of
merchantability, fitness for a particular purpose and
noninfringement. In no event shall the authors or copyright holders be
liable for any claim, damages or other liability, whether in an action
of contract, tort or otherwise, arising from, out of or in connection
with the Software or the use or other dealings in the Software."
(The rencode module is licensed under the above license as well).
==KNOWN BUGS==
None so far.
==THE FUTURE==
The server does not currently have any way of notifying clients that a
client has updated something. I'm working on nice ways to do this
that don't bog down the server too much and are convenient for python
programmers.
Another thing I'm thinking about is a little library that does about
the same thing as AppState but is designed in a self-contained way.
Instead of using Google App Engine as the server, the first person to
start the game would be the server. This might be fast enough for
realtime multiplayer (but there are a lot of issues to work out).
|
AppState
|
/AppState-0.1.tar.gz/AppState-0.1/README
|
README
|
__author__ = 'Nathan Whitehead'
__version__ = '0.1'
import sys
import md5
import rencode
import prpc
class InvalidAddressError(Exception): pass
class SizeError(Exception): pass
class DataUnchangedError(Exception): pass
class DuplicateError(Exception): pass
class PermissionError(Exception): pass
class UnjoinedError(Exception): pass
class LoginError(Exception): pass
class AppIdError(Exception): pass
class UpdateFailedError(Exception): pass
class UnexpectedError(Exception): pass
ANY = 0
ADMIN_ONLY = 1
AUTHORIZED_ONLY = 2
UNBANNED_ONLY = 3
# Utility functions
def hash(x):
return md5.new(x).hexdigest()
def serialize(x):
return rencode.dumps(x)
def unserialize(s):
return rencode.loads(s)
def length_data(x):
return len(serialize(x))
def hash_value(x):
return hash(serialize(x))
# Main class
class DistributedState():
'''Distributed Persistent Data Store'''
def __init__(self, server='pygameserver.appspot.com'):
'''Create new connection to server
This function establishes a connection to Google App Engine.
Server will normally be pygameserver.appspot.com
'''
self.appid = None
self.appkey = None
self.serv = prpc.PRPC(hostname=server)
self.joined = False
def join(self, appid = None):
'''Join an existing application
Establishes connection to the pygame application distributed
state using application id (appid). Will raise AppIdError if
the appid does not exist.
'''
if appid is not None: self.appid = appid
self.appkey = self.serv.send('getapp', self.appid)
if self.appkey[:10] == '!!!!!appid': raise AppIdError
if self.appkey[:5] == '!!!!!': raise UnexpectedError
self.joined = True
# Administrative functions
def version(self):
'''Get version string returned by server'''
resp = self.serv.send('version')
if resp[:5] == '!!!!!': raise UnexpectedError
return resp
def login(self, email, password):
'''Login to Google account
Calling this function logs into a Google account with the
given email and password. Will raise LoginError if there is a
problem. Check the 'reasons' field of the exception to see
why the login was denied if there was an exception raised.
Your account must already exist and be in good standing. To
create new accounts or manage problems with your existing
account, go to:
https://www.google.com/accounts/Login
If you have been locked out of your account for attempting to
login too quickly with incorrect passwords, unlock the account
by going to:
https://www.google.com/accounts/DisplayUnlockCaptcha
'''
try:
self.serv.login(email, password)
except prpc.ClientLoginError, e:
e2 = LoginError()
e2.reason = e.reason
raise e2
def new_app(self, appid, readmode=ANY, writemode=ANY):
'''Register a new application
You must be logged in. Your account will be the admin
account for the application. After creating the app,
joins it.
Each application must have a unique id. Your appid is shared
between all instances of your application that are running.
Recommended naming scheme for appids:
(your name OR your domain name) PLUS application name
So one of my hypothetical application ids is:
NathanWhitehead+AstroMaxBlaster
The readmode and writemode arguments indicate how permissions
work. They can be the following values:
ANY - means anyone can do the operation, even if not logged in
ADMIN_ONLY - only the admin can do the operation
AUTHORIZED_ONLY - only users that have been explicitly
authorized can do the operation
UNBANNED_ONLY - any logged in user that has not been banned
can do the operation
For example, setting readmode=ANY and writemode=ADMIN_ONLY
means that anyone can read the application state, but only the
admin can make any changes. You cannot change readmode or
writemode once the application has been registered. Only the
admin can authorize users or ban them.
Will raise PermissionError if you are not logged in. Will raise
DuplicateError if the application id is already used.
'''
self.appid = appid
resp = self.serv.send('registerapp', appid, readmode, writemode)
if resp[:9] == '!!!!!must': raise PermissionError
if resp[:10] == '!!!!!appid': raise DuplicateError
if resp[:5] == '!!!!!': raise UnexpectedError
self.join()
def delete_app(self):
'''Delete the application
You must be logged in and have joined the application. Your
account muse be the admin account for the application. Cannot
be undone.
Will raise PermissionError if you are not logged in and the
application admin. Will raise UnjoinedError if you have not
joined the application.
'''
if not self.joined: raise UnjoinedError
resp = self.serv.send('deleteapp', self.appkey)
if resp[:8] == '!!!!!you': raise PermissionError
if resp[:9] == '!!!!!muse': raise PermissionError
if resp[:5] == '!!!!!': raise UnexpectedError
self.appkey = None
self.appid = None
self.joined = False
def authorize(self, email):
'''Authorize a user
You must be logged in as administrator and have joined the
application. Note that in some read and write modes,
authorizing users has no effect.
Raises UnjoinedError if you haven't joined an application
state. Raises DuplicateError if the email has already been
authorized. Raises PermissionError if you are not logged in
as admin.
'''
if not self.joined: raise UnjoinedError
resp = self.serv.send('authorize', self.appkey, email)
if resp[:12] == '!!!!!already': raise DuplicateError
if resp[:8] == '!!!!!you': raise PermissionError
if resp[:5] == '!!!!!': raise UnexpectedError
def unauthorize(self, email):
'''Unauthorize a user
You must be logged in as administrator and have joined the
application. Note that in some read and write modes,
authorizing users has no effect.
Raises UnjoinedError if you haven't joined an application
state. Raises DuplicateError if the email is not authorized.
Raises PermissionError if you are not logged in as admin.
'''
if not self.joined: raise UnjoinedError
resp = self.serv.send('unauthorize', self.appkey, email)
if resp[:8] == '!!!!!not': raise DuplicateError
if resp[:8] == '!!!!!you': raise PermissionError
if resp[:5] == '!!!!!': raise UnexpectedError
def ban(self, email):
'''Ban a user
You must be logged in as administrator and have joined the
application. Note that in some read and write modes,
banning users has no effect.
Raises UnjoinedError if you haven't joined an application
state. Raises DuplicateError if the email has already been
banned. Raises PermissionError if you are not logged in as
admin.
'''
if not self.joined: raise UnjoinedError
resp = self.serv.send('ban', self.appkey, email)
if resp[:12] == '!!!!!already': raise DuplicateError
if resp[:8] == '!!!!!you': raise PermissionError
if resp[:5] == '!!!!!': raise UnexpectedError
def unban(self, email):
'''Unban a user
You must be logged in as administrator and have joined the
application. Note that in some read and write modes,
banning users has no effect.
Raises UnjoinedError if you haven't joined an application
state. Raises DuplicateError if the email is not banned.
Raises PermissionError if you are not logged in as admin.
'''
if not self.joined: raise UnjoinedError
resp = self.serv.send('unban', self.appkey, email)
if resp[:8] == '!!!!!not': raise DuplicateError
if resp[:8] == '!!!!!you': raise PermissionError
if resp[:5] == '!!!!!': raise UnexpectedError
def email(self, addr, subj, body):
'''Send email
You must be logged in as administrator and have joined the
application. Destination address must be a valid email
address.
Raises UnjoinedError if you have not joined the application.
Raises PermissionError if you are not admin.
'''
if not self.joined: raise UnjoinedError
resp = self.serv.send('email', self.appkey, addr, subj, body)
if resp[:8] == '!!!!!you': raise PermissionError
if resp[:12] == '!!!!!invalid': raise InvalidAddressError
if resp[:5] == '!!!!!': raise UnexpectedError
# Direct access to persistent global state of application
def __getitem__(self, key):
'''Retrieve the most current value for the given key
Will raise KeyError if there have not been any calls setting
the value of the key. Will raise PermissionError if you do
not have permission to read the key value. May raise other
various exceptions if the connection times out, if the server
reports a problem, or if the application data gets corrupted.
The return value will be Python data.
'''
if not self.joined: raise UnjoinedError
resp = self.serv.send('get', self.appkey, key)
if resp[:7] == '!!!!!no': raise PermissionError
if resp[:8] == '!!!!!key': raise KeyError
if resp[:5] == '!!!!!': raise UnexpectedError
return unserialize(resp)
def get_if_changed(self, key, oldhash):
'''Retrieve the value for the given key if it has changed
You pass a key and the hash value that you already know about,
and the server will either send you the most current value
that has a different hash, or raise DataUnchangedError if
there are no changes to report.
Will raise KeyError if there have not been any calls setting
the value of the key. Will raise PermissionError if you do
not have permission to read the key value. May raise other
various exceptions if the connection times out, if the server
reports a problem, or if the application data gets corrupted.
The return value will be Python data.
'''
if not self.joined: raise UnjoinedError
resp = self.serv.send('getifchanged', self.appkey, key, oldhash)
if resp[:7] == '!!!!!no': raise PermissionError
if resp[:8] == '!!!!!key': raise KeyError
if resp[:9] == '!!!!!hash': raise DataUnchangedError
if resp[:5] == '!!!!!': raise UnexpectedError
return unserialize(resp)
def __setitem__(self, key, value):
'''Set the value for a given key
This function accepts any Python data for the value that is not
too big. The size of the data when serialized and encoded is
limited to 20K.
Note that this function does not care what the previous state
of the application was. Other copies of the application may
have already updated the value by the time you call set(). It
is recommended to use update() rather than this function.
Will raise UnjoinedError if you have not joined the
application. Will raise PermissionError if you do not have
permission to write to the state. Will raise SizeError if the
value is too big. May raise various exceptions if the
connection times out, if the server reports a problem, or if
the application state data gets corrupted.
'''
if not self.joined: raise UnjoinedError
resp = self.serv.send('set', self.appkey, key, serialize(value))
if resp[:7] == '!!!!!no': raise PermissionError
if resp[:8] == '!!!!!too': raise SizeError
if resp[:5] == '!!!!!': raise UnexpectedError
def __delitem__(self, key):
'''Delete the value for a given key
Will raise PermissionError if you do not have permission to
write to the state. Will raise KeyError if the key has no
value to delete.
'''
if not self.joined: raise UnjoinedError
resp = self.serv.send('del', self.appkey, key)
if resp[:7] == '!!!!!no': raise PermissionError
if resp[:8] == '!!!!!key': raise KeyError
if resp[:5] == '!!!!!': raise UnexpectedError
# Synchronized access functions
def update(self, key, oldhash, value):
'''Update the value associated with a given key
This function checks that the current value matches the given
hash value, then updates the value associated with the key.
The size of the new value when serialized and encoded is
limited to 20K. If the hash value you give does not match
the hash of the current value, this function will raise
UpdateFailedError.
To calculate the hash of a value v, use: hash_value(v)
'''
if not self.joined: raise UnjoinedError
resp = self.serv.send('update', self.appkey, key, oldhash, serialize(value))
if resp[:12] == '!!!!!no perm': raise PermissionError
if resp[:8] == '!!!!!too': raise SizeError
if resp[:11] == '!!!!!no val': raise KeyError
if resp[:9] == '!!!!!hash': raise UpdateFailedError
if resp[:5] == '!!!!!': raise UnexpectedError
def apply_op(self, key, func, create=False, defaultvalue=None):
'''Apply a function to the value stored at a key
The function func must take one argument and return one
argument. The state will be updated to reflect applying the
function to the value stored at the given key. The function
may be called more than once if the value is being changed
by other instances of the application. To make debugging
easier, try to limit side effects in the function.
If the key has no value, and create is false, will raise
a KeyError exception. If create is true, the function
will apply to the given defaultvalue.
This function attempts to guarantee that even if many
instances are simultaneously attempting to update the same
value in the state, all the functions will be applied in some
order. For example, if the value is a list and the operation
is inserting an element into the list, using this method will
guarantee that all elements will be inserted.
This function cannot guarantee that two instances will not
simultaneously create a new value. If you need absolute
consistency in this case, create default values in the state
before distributing multiple instances of the application.
'''
try:
old = self[key]
new = func(old)
oldhash = hash_value(old)
self.update(key, hash_value(old), new)
except UpdateFailedError:
self.apply_op(key, func, create=create, defaultvalue=defaultvalue)
except KeyError:
if create:
self[key] = func(defaultvalue)
else:
raise KeyError
#SERVER = 'localhost:8080'
SERVER = 'pygameserver.appspot.com'
if __name__ == '__main__':
if len(sys.argv) <= 1:
print '''
Usage: python %s command [args]
Commands:
version
new_app email password appid readmode writemode
delete_app email password appid
authorize youremail yourpassword appid subjemail
ban youremail yourpassword appid subjemail
unauthorize youremail yourpassword appid subjemail
unban youremail yourpassword appid subjemail
''' % sys.argv[0]
sys.exit()
cmd = sys.argv[1]
ds = DistributedState()
if cmd == 'version':
print ds.version()
sys.exit()
if cmd == 'new_app':
ds.login(sys.argv[2], sys.argv[3])
modes = {'ANY' : ANY,
'ADMIN_ONLY' : ADMIN_ONLY,
'AUTHORIZED_ONLY' : AUTHORIZED_ONLY,
'UNBANNED_ONLY' : UNBANNED_ONLY,
str(ANY) : ANY,
str(ADMIN_ONLY) : ADMIN_ONLY,
str(AUTHORIZED_ONLY) : AUTHORIZED_ONLY,
str(UNBANNED_ONLY) : UNBANNED_ONLY,
}
appid = sys.argv[4]
readmode = modes[sys.argv[5]]
writemode = modes[sys.argv[6]]
ds.new_app(appid, readmode, writemode)
sys.exit()
if cmd == 'delete_app':
ds.login(sys.argv[2], sys.argv[3])
appid = sys.argv[4]
ds.join(appid)
ds.delete_app()
sys.exit()
if cmd == 'ban':
ds.login(sys.argv[2], sys.argv[3])
ds.join(sys.argv[4])
ds.ban(sys.argv[5])
sys.exit()
if cmd == 'unban':
ds.login(sys.argv[2], sys.argv[3])
ds.join(sys.argv[4])
ds.unban(sys.argv[5])
sys.exit()
if cmd == 'authorize':
ds.login(sys.argv[2], sys.argv[3])
ds.join(sys.argv[4])
ds.authorize(sys.argv[5])
sys.exit()
if cmd == 'unauthorize':
ds.login(sys.argv[2], sys.argv[3])
ds.join(sys.argv[4])
ds.unauthorize(sys.argv[5])
sys.exit()
print 'Unknown command %s\n' % cmd
|
AppState
|
/AppState-0.1.tar.gz/AppState-0.1/appstate.py
|
appstate.py
|
import md5
import logging
from google.appengine.api import users
from google.appengine.api import mail
from google.appengine.api import memcache
from google.appengine.ext import webapp
from google.appengine.ext.webapp.util import run_wsgi_app
from google.appengine.ext import db
VERSION = 'Version 0.4'
# How big can one entry in application table be?
SINGLE_SIZE_LIMIT = 32000
def hash(x): return md5.new(x).hexdigest()
class Application(db.Model):
'''Information about a client application'''
appid = db.StringProperty(multiline=False)
admin = db.UserProperty()
readmode = db.IntegerProperty()
# 0 = anyone can read
# 1 = only admin can read
# 2 = anyone logged in, authorized, can read
# 3 = anyone logged in, not banned, can read
writemode = db.IntegerProperty()
# 0-3, same as readmode but for writing
class AppDataInstance(db.Model):
appref = db.ReferenceProperty(Application)
shelfkey = db.StringProperty(multiline=False)
shelfdata = db.BlobProperty()
datalen = db.IntegerProperty()
who = db.UserProperty()
class AuthorizedUser(db.Model):
appref = db.ReferenceProperty(Application)
who = db.UserProperty()
class BannedUser(db.Model):
appref = db.ReferenceProperty(Application)
who = db.UserProperty()
# Functions that benefit from being cached
def lookup_app(appkey):
data = memcache.get(appkey)
if data is not None: return data
try:
data = db.get(db.Key(appkey))
if not memcache.add(appkey, data, 60 * 60 ): # 1 hour expire time
logging.error('memcache add() failed for appkey')
return data
except:
return None
def can_do(appl, mode, who):
if mode == 0: return True
if mode == 1: return (who == appl.admin)
if mode == 2:
auth = AuthorizedUser.all().filter('appref =', appl).filter('who =', who).get()
return (auth is not None)
if mode == 3:
ban = BannedUser.all().filter('appref =', appl).filter('who =', who).get()
return (ban is None)
return False
def can_read(appl, who):
memcachekey = 'R' + str(appl.key()) + ':' + str(who)
data = memcache.get(memcachekey)
if data is not None: return data
data = can_do(appl, appl.readmode, who)
if not memcache.add(memcachekey, data, 60 * 10):
logging.error('memcache add() failed for read perm')
return data
def can_write(appl, who):
memcachekey = 'W' + str(appl.key()) + ':' + str(who)
data = memcache.get(memcachekey)
if data is not None: return data
data = can_do(appl, appl.writemode, who)
if not memcache.add(memcachekey, data, 60 * 10):
logging.error('memcache add() failed for write perm')
return data
# Process requests
def Process(cmd, arg1, arg2, arg3, arg4):
'''Process PythonRemoteProcedureCall request'''
user = users.get_current_user()
if cmd == 'version':
return VERSION
if cmd == 'registerapp':
appid = arg1
# Make sure logged in
if user is None:
return '!!!!!must be logged in'
# Check if appid is already in use
prevapp = Application.all().filter('appid =', appid).get()
if prevapp is not None:
return '!!!!!appid in use'
app = Application()
app.appid = arg1
app.admin = user
app.readmode = int(arg2)
app.writemode = int(arg3)
app.put()
return 'OK'
if cmd == 'deleteapp':
appkey = arg1
# Make sure logged in
if user is None:
return '!!!!!must be logged in'
appl = lookup_app(appkey)
if appl is None:
return '!!!!!appkey not found'
if user != appl.admin:
return '!!!!!you must be admin'
appl.delete()
return 'OK'
if cmd == 'getapp':
appid = arg1
# Retrieve key of application
app = Application.all().filter('appid =', appid).get()
if app is None:
return '!!!!!appid not found'
return str(app.key())
if cmd == 'authorize':
appkey = arg1
appl = lookup_app(appkey)
if appl is None:
return '!!!!!appkey not found'
if user != appl.admin:
return '!!!!!you must be admin'
auser = users.User(arg2)
if auser is None:
# Currently this doesn't happen, invalid emails
# create ghost User objects
return '!!!!!user email not found'
prevauth = AuthorizedUser.all().filter('appref =', appl).filter('who =', auser).get()
if prevauth is not None:
return '!!!!!already authorized'
authuser = AuthorizedUser(appref=appl, who=auser)
authuser.put()
# Clear permissions cache
memcache.delete('R' + str(appl.key()) + ':' + str(auser))
memcache.delete('W' + str(appl.key()) + ':' + str(auser))
return 'OK'
if cmd == 'unauthorize':
appkey = arg1
appl = lookup_app(appkey)
if appl is None:
return '!!!!!appkey not found'
if user != appl.admin:
return '!!!!!you must be admin'
auser = users.User(arg2)
if auser is None:
# Currently this doesn't happen, invalid emails
# create ghost User objects
return '!!!!!user email not found'
prevauth = AuthorizedUser.all().filter('appref =', appl).filter('who =', auser).get()
if prevauth is None:
return '!!!!!not already authorized'
prevauth.delete()
# Clear permissions cache
memcache.delete('R' + str(appl.key()) + ':' + str(auser))
memcache.delete('W' + str(appl.key()) + ':' + str(auser))
return 'OK'
if cmd == 'ban':
appkey = arg1
appl = lookup_app(appkey)
if appl is None:
return '!!!!!appkey not found'
if user != appl.admin:
return '!!!!!you must be admin'
auser = users.User(arg2)
if auser is None:
# Currently this doesn't happen, invalid emails
# create ghost User objects
return '!!!!!user email not found'
prevban = BannedUser.all().filter('appref =', appl).filter('who =', auser).get()
if prevban is not None:
return '!!!!!already banned'
banuser = BannedUser(appref=appl, who=auser)
banuser.put()
# Clear permissions cache
memcache.delete('R' + str(appl.key()) + ':' + str(auser))
memcache.delete('W' + str(appl.key()) + ':' + str(auser))
return 'OK'
if cmd == 'unban':
appkey = arg1
appl = lookup_app(appkey)
if appl is None:
return '!!!!!appkey not found'
if user != appl.admin:
return '!!!!!you must be admin'
auser = users.User(arg2)
if auser is None:
# Currently this doesn't happen, invalid emails
# create ghost User objects
return '!!!!!user email not found'
prevban = BannedUser.all().filter('appref =', appl).filter('who =', auser).get()
if prevban is None:
return '!!!!!not banned'
prevban.delete()
# Clear permissions cache
memcache.delete('R' + str(appl.key()) + ':' + str(auser))
memcache.delete('W' + str(appl.key()) + ':' + str(auser))
return 'OK'
if cmd == 'get':
appkey = arg1
shelfkey = arg2
appl = lookup_app(appkey)
if appl is None:
return '!!!!!appkey not found'
if not can_read(appl, user):
return '!!!!!no permission to read'
# First check the cache
memcachekey = 'K' + str(appl.key()) + ':' + shelfkey
data = memcache.get(memcachekey)
if data is not None: return data
# Not in cache, do a query
appinst = AppDataInstance.all().filter('appref =', appl).filter('shelfkey =', shelfkey).get()
if appinst is None:
return '!!!!!keyerror'
else: data = appinst.shelfdata
if not memcache.add(memcachekey, data, 60 * 60):
logging.error('error adding memcache in get()')
return data
if cmd == 'getifchanged':
appkey = arg1
shelfkey = arg2
oldhash = arg3
appl = lookup_app(appkey)
if appl is None:
return '!!!!!appkey not found'
if not can_read(appl, user):
return '!!!!!no permission to read'
# First check the cache
memcachekey = 'K' + str(appl.key()) + ':' + shelfkey
data = memcache.get(memcachekey)
if data is not None:
if oldhash == hash(data):
return '!!!!!hash match'
return data
# Not in cache, do a query
appinst = AppDataInstance.all().filter('appref =', appl).filter('shelfkey =', shelfkey).get()
if appinst is None:
return '!!!!!keyerror'
else: data = appinst.shelfdata
if not memcache.add(memcachekey, data, 60 * 60):
logging.error('error adding memcache in get()')
if oldhash == hash(data):
return '!!!!!hash match'
return data
if cmd == 'set':
appkey = arg1
shelfkey = arg2
shelfdata = arg3
appl = lookup_app(appkey)
if appl is None:
return '!!!!!appkey not found'
if not can_write(appl, user):
return '!!!!!no permission to write'
if len(shelfdata) > SINGLE_SIZE_LIMIT:
return '!!!!!too big'
appinst = AppDataInstance.all().filter('appref =', appl).filter('shelfkey =', shelfkey).get()
if appinst is None:
appinst = AppDataInstance()
appinst.appref = appl
appinst.shelfkey = shelfkey
appinst.shelfdata = shelfdata
appinst.datalen = len(shelfdata)
appinst.who = user
appinst.put()
memcachekey = 'K' + str(appl.key()) + ':' + shelfkey
memcache.delete(memcachekey)
return 'OK'
if cmd == 'del':
appkey = arg1
shelfkey = arg2
appl = lookup_app(appkey)
if appl is None:
return '!!!!!appkey not found'
if not can_write(appl, user):
return '!!!!!no permission to write'
appinst = AppDataInstance.all().filter('appref =', appl).filter('shelfkey =', shelfkey).get()
if appinst is None:
return '!!!!!keyerror'
appinst.delete()
memcachekey = 'K' + str(appl.key()) + ':' + shelfkey
memcache.delete(memcachekey)
return 'OK'
if cmd == 'update':
appkey = arg1
shelfkey = arg2
oldhash = arg3
shelfdata = arg4
appl = lookup_app(appkey)
if appl is None:
return '!!!!!appkey not found'
if not can_write(appl, user):
return '!!!!!no permission to write'
if len(shelfdata) > SINGLE_SIZE_LIMIT:
return '!!!!!too big'
appinst = AppDataInstance.all().filter('appref =', appl).filter('shelfkey =', shelfkey).get()
if appinst is None:
return '!!!!!no value'
if oldhash != hash(appinst.shelfdata):
return '!!!!!hash mismatch'
appinst.shelfdata = shelfdata
appinst.datalen = len(shelfdata)
appinst.who = user
appinst.put()
memcachekey = 'K' + str(appl.key()) + ':' + shelfkey
memcache.delete(memcachekey)
return 'OK'
if cmd == 'memcache':
stats = memcache.get_stats()
return '%d hits\n%d misses\n' % (stats['hits'], stats['misses'])
if cmd == 'email':
appkey = arg1
addr = arg2
subj = arg3
body = arg4
appl = lookup_app(appkey)
if appl is None:
return '!!!!!appkey not found'
if not user:
return '!!!!!you must be logged in'
if not mail.is_email_valid(addr):
return '!!!!!invalid address'
mail.send_mail(user.email(), addr, subj, body)
return 'OK'
return '!!!!!unknown command'
class Prpc(webapp.RequestHandler):
def get(self):
self.response.headers['Content-Type'] = 'text/plain'
self.response.out.write('ERROR\nMust use POST method\n')
def post(self):
self.response.headers['Content-Type'] = 'text/plain'
cmd = self.request.get('cmd')
arg1 = self.request.get('arg1')
arg2 = self.request.get('arg2')
arg3 = self.request.get('arg3')
arg4 = self.request.get('arg4')
#try:
resp = Process(cmd, arg1, arg2, arg3, arg4)
#except:
# self.response.out.write('!!!!!process')
# return
self.response.out.write(resp)
class Version(webapp.RequestHandler):
def get(self):
self.response.out.write(VERSION + '<br>\n')
def post(self):
self.response.out.write(VERSION + '<br>\n')
application = webapp.WSGIApplication([
('/prpc', Prpc),
('/version', Version),
], debug=True)
def main():
run_wsgi_app(application)
if __name__ == "__main__":
main()
|
AppState
|
/AppState-0.1.tar.gz/AppState-0.1/server/server.py
|
server.py
|
# AppTrack
#### 介绍
这是一个开源的分布式应用程序追踪系统,目前支持flask web应用程序,以及分布式计算系统等。
#### 软件架构
软件架构说明
#### 安装教程
1. pip install apptrack
2. xxxx
3. xxxx
#### 使用说明
1. xxxx
2. xxxx
3. xxxx
#### 参与贡献
1. Fork 本仓库
2. 新建 Feat_xxx 分支
3. 提交代码
4. 新建 Pull Request
#### 特技
1. 使用 Readme\_XXX.md 来支持不同的语言,例如 Readme\_en.md, Readme\_zh.md
2. Gitee 官方博客 [blog.gitee.com](https://blog.gitee.com)
3. 你可以 [https://gitee.com/explore](https://gitee.com/explore) 这个地址来了解 Gitee 上的优秀开源项目
4. [GVP](https://gitee.com/gvp) 全称是 Gitee 最有价值开源项目,是综合评定出的优秀开源项目
5. Gitee 官方提供的使用手册 [https://gitee.com/help](https://gitee.com/help)
6. Gitee 封面人物是一档用来展示 Gitee 会员风采的栏目 [https://gitee.com/gitee-stars/](https://gitee.com/gitee-stars/)
|
AppTrack
|
/AppTrack-1.0.tar.gz/AppTrack-1.0/README.md
|
README.md
|
from __future__ import absolute_import, unicode_literals, print_function
from . import __version__
import six
# Max number of bits to use when generating random ID
MAX_ID_BITS = 64
# How often remotely controller sampler polls for sampling strategy
DEFAULT_SAMPLING_INTERVAL = 60
# How often remote reporter does a preemptive flush of its buffers
DEFAULT_FLUSH_INTERVAL = 1
# Name of the HTTP header used to encode trace ID
TRACE_ID_HEADER = 'apptrack-trace-id' if six.PY3 else b'apptrack-trace-id'
# Prefix for HTTP headers used to record baggage items
BAGGAGE_HEADER_PREFIX = 'apptrackctx-' if six.PY3 else b'apptrackctx-'
# The name of HTTP header or a TextMap carrier key which, if found in the
# carrier, forces the trace to be sampled as "debug" trace. The value of the
# header is recorded as the tag on the # root span, so that the trace can
# be found in the UI using this value as a correlation ID.
DEBUG_ID_HEADER_KEY = 'apptrack-debug-id'
APPTRACK_CLIENT_VERSION = 'Python2-v%s' % __version__
# Tracer-scoped tag that tells the version of Algo client library
APPTRACK_VERSION_TAG_KEY = 'apptrack.version'
# Tracer-scoped tag that contains the hostname
APPTRACK_HOSTNAME_TAG_KEY = 'apptrack.hostname'
APPTRACK_IPV4_TAG_KEY = 'apptrack.ip_v4'
# the type of sampler that always makes the same decision.
SAMPLER_TYPE_CONST = 'const'
# the type of sampler that polls Algo agent for sampling strategy.
SAMPLER_TYPE_REMOTE = 'remote'
# the type of sampler that samples traces with a certain fixed probability.
SAMPLER_TYPE_PROBABILISTIC = 'probabilistic'
# the type of sampler that samples only up to a fixed number
# of traces per second.
# noinspection SpellCheckingInspection
SAMPLER_TYPE_RATE_LIMITING = 'ratelimiting'
# the type of sampler that samples only up to a fixed number
# of traces per second.
# noinspection SpellCheckingInspection
SAMPLER_TYPE_LOWER_BOUND = 'lowerbound'
# max length for tag values. Longer values will be truncated.
MAX_TAG_VALUE_LENGTH = 1024
# Constant for sampled flag
SAMPLED_FLAG = 0x01
# Constant for debug flag
DEBUG_FLAG = 0x02
DATABASE_HOST_KEY = 'db.host'
DATABASE_PORT_KEY= 'db.port'
GEN_ID_RANGE = 999999
# ---------------------------------------------------------------------------
# Genetalks Cloud
# ---------------------------------------------------------------------------
USER_TOKEN = 'user_token'
PUBLIC_KEY_FOR_TOKEN = 'public_key_for_token'
PROCESS_ID = 'process_id'
PROCESS_ARGS = 'process_args'
SPAN_ID = 'span_id'
START_TIME = 'start_time'
FINISH_TIME = 'finish_time'
FINISH_INFO = 'finish_info'
FINISH_STATUS = 'finish_status'
FINISH_REASON = 'finish_reason'
CHILDREN_DIR = 'children'
CHILDREN_NUM_ALLOCATOR = "children_num_allocator"
FINISH_STATUS_SUCC = "ok"
FINISH_STATUS_FAILED = "failed"
FINISH_STATUS_RETRY = "retry"
NOOP_SPAN_ID = 'span_noop'
FINISH_RESULT = 'finish_result' # this tags's value must be { bool is_success, string result_info}
|
AppTrack
|
/AppTrack-1.0.tar.gz/AppTrack-1.0/apptrack/constants.py
|
constants.py
|
from __future__ import absolute_import
import six
from collections import namedtuple
import json
import struct
import time
import os
import random
import opentracing
from opentracing import Reference, ReferenceType, Format, UnsupportedFormatException, SpanContextCorruptedException
from opentracing.ext import tags as ext_tags
from . import db
from . import constants
from .codecs import TextCodec, BinaryCodec
from .span import Span, SAMPLED_FLAG, DEBUG_FLAG
from .span_context import SpanContext
from .constants import START_TIME, SPAN_ID, MAX_ID_BITS, GEN_ID_RANGE
from .metrics import Metrics, LegacyMetricsFactory
import md5
def msg_id(msg=''):
if not isinstance(msg, str) or msg=='':
return None
d=md5.new(msg).hexdigest()
return int(d[:6],16)
class Tracer(opentracing.Tracer):
def __init__(self, service_name, reporter, sampler, metrics=None,
metrics_factory=None,
trace_id_header=constants.TRACE_ID_HEADER,
baggage_header_prefix=constants.BAGGAGE_HEADER_PREFIX,
debug_id_header=constants.DEBUG_ID_HEADER_KEY,
tags=None,
max_tag_value_length=constants.MAX_TAG_VALUE_LENGTH,
):
self.service_name = service_name
self.reporter = reporter
self.sampler = sampler
self.metrics_factory = metrics_factory or LegacyMetricsFactory(metrics or Metrics())
self.metrics = TracerMetrics(self.metrics_factory)
self.debug_id_header = debug_id_header
self.max_tag_value_length = max_tag_value_length
self.random = random.Random(time.time() * (os.getpid() or 1))
self.codecs = {
Format.TEXT_MAP: TextCodec(
url_encoding=False,
trace_id_header=trace_id_header,
baggage_header_prefix=baggage_header_prefix,
debug_id_header=debug_id_header,
),
Format.HTTP_HEADERS: TextCodec(
url_encoding=True,
trace_id_header=trace_id_header,
baggage_header_prefix=baggage_header_prefix,
debug_id_header=debug_id_header,
),
Format.BINARY: BinaryCodec(),
}
self.tags = {
constants.APPTRACK_VERSION_TAG_KEY: constants.APPTRACK_CLIENT_VERSION,
}
if tags:
self.tags.update(tags)
self.trace_id=msg_id(self.service_name)
def start_span(self, operation_name, child_of=None, references=None, tags=None,start_time=None,log_rings=[]):
parent = child_of
if references:
if isinstance(references, list):
# TODO only the first reference is currently used
references = references[0]
parent = references.referenced_context
# allow Span to be passed as reference, not just SpanContext
if isinstance(parent, Span):
parent = parent.context
tags = tags or {}
if parent is None or parent.referenced_context.is_debug_id_container_only:
trace_id = self.trace_id if self.trace_id else self.random_id()
span_id = trace_id
parent_id = None
flags = 0
baggage = None
if parent is None:
sampled, sampler_tags = \
self.sampler.is_sampled(trace_id, operation_name)
if sampled:
flags = SAMPLED_FLAG
for k, v in six.iteritems(sampler_tags):
tags[k] = v
else: # have debug id
flags = SAMPLED_FLAG | DEBUG_FLAG
tags[self.debug_id_header] = parent.debug_id
else:
trace_id = parent.referenced_context.trace_id
#必须要求父span和子span的trace_id一致
assert(str(self.trace_id) == str(trace_id))
parent_id = parent.referenced_context.span_id
flags = parent.referenced_context.flags
baggage = dict(parent.referenced_context.baggage)
if isinstance(references, Reference):
if references.type == ReferenceType.FOLLOWS_FROM:
parent_id = parent.parent_id
span_id=self.gen_id()
span_ctx = SpanContext(trace_id=trace_id, span_id=span_id,
parent_id=parent_id, flags=flags,
baggage=baggage)
tags.update(self.tags)
span = Span(context=span_ctx, tracer=self, operation_name=operation_name,log_rings=log_rings,tags=tags)
if operation_name is not None:
span.set_operation_name(operation_name)
span.start()
return span
def inject(self, span_context, format, carrier):
codec = self.codecs.get(format, None)
if codec is None:
raise UnsupportedFormatException(format)
if isinstance(span_context, Span):
# be flexible and allow Span as argument, not only SpanContext
span_context = span_context.context
if not isinstance(span_context, SpanContext):
raise ValueError(
'Expecting Apptack SpanContext, not %s', type(span_context))
codec.inject(span_context=span_context, carrier=carrier)
def extract(self, format, carrier):
codec = self.codecs.get(format, None)
if codec is None:
raise UnsupportedFormatException(format)
context = codec.extract(carrier)
if context is not None:
#解包后的span_id是一个整数,需要和数据库里面存放的span_id进行转换
context.span_id = self.form_span_id(context.span_id)
assert(context.trace_id == context.trace_id)
return context
def report_span(self, span):
self.reporter.report_span(span)
def finish_span(self,span):
self.reporter.finish_span(span)
def random_id(self):
return self.random.getrandbits(MAX_ID_BITS)
def gen_id(self):
span_id = db.TraceDb.get_db().create_span_id()
return span_id
def form_span_id(self,span_id):
return db.TraceDb.get_db().form_span_id(span_id)
def whole_system_root_span(self):
REG_WHOLE_SYSTEM_SCRIPT = """
key = db.work_on_key()
(s, v) = db.get(key)
if s.not_found():
s = db.put(key, span_id)
if s.ok():
return '{"status":"ok", "ret": "%s" }' % span_id
else:
return '{"status":"error", "ret": "put whole system root span id %s failed" % str(span_id) }'
else:
if s.ok():
return '{"status":"ok", "ret": "%s" }' % str(v)
else:
return '{"status":"error", "ret": "get whole system root span id %s failed, because %s" % (str(span_id), str(s)) }'"""
print ggggggggggg
root_span_info = self.taskdb().get(define.WHILE_SYSTEM_ROOT_SPAN_PATH)
if root_span_info['reason'] == 'ok':
try:
span_id = root_span_info['ret']
assert name_to_standard(span_id) == span_id
span_ctx = self.extract(Format.TEXT_MAP, {"span_context": {SPAN_ID: span_id}})
return Span(self, span_ctx)
except Exception, ex:
pass
else:
s = Span(self, None)
root_span_info = self.taskdb().exec_script(define.WHILE_SYSTEM_ROOT_SPAN_PATH,
"span_id='" + s.context.span_id + "'\n" + REG_WHOLE_SYSTEM_SCRIPT)
if root_span_info['reason'] == 'ok':
try:
span_id_info = json.loads(root_span_info['ret'])
if span_id_info['status'] == 'ok':
span_id = span_id_info['ret']
assert name_to_standard(span_id) == span_id
span_ctx = self.extract(Format.TEXT_MAP, {"span_context": {SPAN_ID: span_id}})
root_span = Span(self, span_ctx)
root_span.set_operation_name("gtx_cloud_root")
return root_span
else:
raise SpanContextCorruptedException()
except Exception, ex:
# print exception_report(str(ex))
raise SpanContextCorruptedException()
else:
raise SpanContextCorruptedException()
def get_span(self,span_id,contain_child=False):
return db.TraceDb.get_db().get_span_info(span_id,contain_child)
def get_logs(self,span_id,timestamp=None,log_ring=None):
return db.TraceDb.get_db().get_span_logs(span_id,timestamp,log_ring)
def get_context(self,span_id):
return db.TraceDb.get_db().get_span_context(span_id)
def set_span_name(self,span_id,name):
db.TraceDb.get_db().set_span_name(span_id,name)
class TracerMetrics(object):
"""Tracer specific metrics."""
def __init__(self, metrics_factory):
self.traces_started_sampled = \
metrics_factory.create_counter(name='algo.traces-started', tags={'sampled': 'true'})
self.traces_started_not_sampled = \
metrics_factory.create_counter(name='algo.traces-started', tags={'sampled': 'false'})
self.traces_joined_sampled = \
metrics_factory.create_counter(name='algo.traces-joined', tags={'sampled': 'true'})
self.traces_joined_not_sampled = \
metrics_factory.create_counter(name='algo.traces-joined', tags={'sampled': 'false'})
self.spans_sampled = \
metrics_factory.create_counter(name='algo.spans', tags={'sampled': 'true'})
self.spans_not_sampled = \
metrics_factory.create_counter(name='algo.spans', tags={'sampled': 'false'})
|
AppTrack
|
/AppTrack-1.0.tar.gz/AppTrack-1.0/apptrack/tracer.py
|
tracer.py
|
from __future__ import absolute_import
from __future__ import division
from builtins import str
from builtins import object
import six
class MetricsFactory(object):
"""Generates new metrics."""
def _noop(self, *args):
pass
def create_counter(self, name, tags=None):
return self._noop
def create_timer(self, name, tags=None):
return self._noop
def create_gauge(self, name, tags=None):
return self._noop
class LegacyMetricsFactory(MetricsFactory):
"""A MetricsFactory adapter for legacy Metrics class."""
def __init__(self, metrics):
self._metrics = metrics
def create_counter(self, name, tags=None):
key = self._get_key(name, tags)
def increment(value):
return self._metrics.count(key, value)
return increment
def create_timer(self, name, tags=None):
key = self._get_key(name, tags)
def record(value):
# Convert microseconds to milliseconds for legacy
return self._metrics.timing(key, value / 1000.0)
return record
def create_gauge(self, name, tags=None):
key = self._get_key(name, tags)
def update(value):
return self._metrics.gauge(key, value)
return update
def _get_key(self, name, tags=None):
if not tags:
return name
key = name
for k in sorted(six.iterkeys(tags)):
key = key + '.' + str(k) + '_' + str(tags[k])
return key
class Metrics(object):
"""
Provides an abstraction of metrics reporting framework.
This Class has been deprecated, please use MetricsFactory
instead.
"""
def __init__(self, count=None, gauge=None, timing=None):
self._count = count
self._gauge = gauge
self._timing = timing
if not callable(self._count):
self._count = None
if not callable(self._gauge):
self._gauge = None
if not callable(self._timing):
self._timing = None
def count(self, key, value):
if self._count:
self._count(key, value)
def timing(self, key, value):
if self._timing:
self._timing(key, value)
def gauge(self, key, value):
if self._gauge:
self._gauge(key, value)
|
AppTrack
|
/AppTrack-1.0.tar.gz/AppTrack-1.0/apptrack/metrics.py
|
metrics.py
|
from __future__ import absolute_import
from builtins import object
import logging
import threading
import socket
from concurrent.futures import Future
from .constants import DEFAULT_FLUSH_INTERVAL
from . import thrift
from .metrics import Metrics, LegacyMetricsFactory
from .utils import ErrorReporter
import json
import six
import slugify
default_logger = logging.getLogger(__name__)
class NullReporter(object):
"""Ignores all spans."""
def report_span(self, span):
pass
def set_process(self, service_name, tags, max_length):
pass
def close(self):
fut = Future()
fut.set_result(True)
return fut
class InMemoryReporter(NullReporter):
"""Stores spans in memory and returns them via get_spans()."""
def __init__(self):
super(InMemoryReporter, self).__init__()
self.spans = []
self.lock = threading.Lock()
def report_span(self, span):
with self.lock:
self.spans.append(span)
def get_spans(self):
with self.lock:
return self.spans[:]
class LoggingReporter(NullReporter):
"""Logs all spans."""
def __init__(self, logger=None):
self.logger = logger if logger else default_logger
def report_span(self, span):
self.logger.info('Reporting span %s', span)
class Reporter(NullReporter):
"""Receives completed spans from Tracer and submits them out of process."""
def __init__(self, channel, error_reporter=None, metrics=None, metrics_factory=None, **kwargs):
from threading import Lock
self.metrics_factory = metrics_factory or LegacyMetricsFactory(metrics or Metrics())
self.metrics = ReporterMetrics(self.metrics_factory)
self.error_reporter = error_reporter or ErrorReporter(Metrics())
self.logger = kwargs.get('logger', default_logger)
# self.agent = Agent.Client(self._channel, self)
self.stopped = False
self.stop_lock = Lock()
self._process_lock = Lock()
self._process = None
self.channel = channel
def set_process(self, service_name, tags, max_length):
with self._process_lock:
self._process = thrift.make_process(
service_name=service_name, tags=tags, max_length=max_length,
)
def report_span(self, span):
self._send(span)
def _send(self, span):
if not span:
return
span_id=span.span_id
self.channel.set_span_name(span_id,span.operation_name)
self.channel.start_span(span_id,parent_id=span.parent_id,tags=span.tags,context=six.iteritems(span.context))
def put_log(self,span_id,level,msg,rings=[],**kwargs):
self.channel.put_log(span_id,level,msg,rings,**kwargs)
def finish_span(self,span):
self.channel.finish_span(span.span_id)
def close(self):
with self.stop_lock:
self.stopped = True
def put_tag(self,span,key,value):
self.channel.put_tag(span.span_id,key,value)
def update_context(self,context):
self.channel.update_context(six.iteritems(context))
class ReporterMetrics(object):
def __init__(self, metrics_factory):
self.reporter_success = \
metrics_factory.create_counter(name='algo.spans', tags={'reported': 'true'})
self.reporter_failure = \
metrics_factory.create_counter(name='algo.spans', tags={'reported': 'false'})
self.reporter_dropped = \
metrics_factory.create_counter(name='algo.spans', tags={'dropped': 'true'})
self.reporter_socket = \
metrics_factory.create_counter(name='algo.spans', tags={'socket_error': 'true'})
class CompositeReporter(NullReporter):
"""Delegates reporting to one or more underlying reporters."""
def __init__(self, *reporters):
self.reporters = reporters
def set_process(self, service_name, tags, max_length):
for reporter in self.reporters:
reporter.set_process(service_name, tags, max_length)
def report_span(self, span):
for reporter in self.reporters:
reporter.report_span(span)
def close(self):
from threading import Lock
lock = Lock()
count = [0]
future = Future()
def on_close(_):
with lock:
count[0] += 1
if count[0] == len(self.reporters):
future.set_result(True)
for reporter in self.reporters:
f = reporter.close()
f.add_done_callback(on_close)
return future
|
AppTrack
|
/AppTrack-1.0.tar.gz/AppTrack-1.0/apptrack/reporter.py
|
reporter.py
|
from __future__ import absolute_import
from builtins import object
import logging
import os
import threading
import opentracing
from opentracing.propagation import Format
from . import db
from . import Tracer
from .reporter import (
Reporter,
CompositeReporter,
LoggingReporter,
)
from .sampler import (
ConstSampler,
ProbabilisticSampler,
RateLimitingSampler,
)
from .constants import (
DEFAULT_SAMPLING_INTERVAL,
DEFAULT_FLUSH_INTERVAL,
SAMPLER_TYPE_CONST,
SAMPLER_TYPE_PROBABILISTIC,
SAMPLER_TYPE_RATE_LIMITING,
TRACE_ID_HEADER,
BAGGAGE_HEADER_PREFIX,
DEBUG_ID_HEADER_KEY,
MAX_TAG_VALUE_LENGTH,
DATABASE_HOST_KEY,
DATABASE_PORT_KEY,
)
from .utils import get_boolean
from .metrics import LegacyMetricsFactory, MetricsFactory, Metrics
DEFAULT_REPORTING_HOST = 'localhost'
DEFAULT_REPORTING_PORT = 1206
DEFAULT_SAMPLING_PORT = 5778
LOCAL_AGENT_DEFAULT_ENABLED = True
DEFAULT_DATABASE_HOST = 'localhost'
DEFAULT_DATABASE_PORT = 1206
logger = logging.getLogger(__name__)
logging.basicConfig()
class Config(object):
_initialized = False
_initialized_lock = threading.Lock()
def __init__(self, config, service_name=None, metrics=None, metrics_factory=None):
self.config = config
if get_boolean(self.config.get('metrics', True), True):
self._metrics_factory = metrics_factory or LegacyMetricsFactory(metrics or Metrics())
else:
self._metrics_factory = MetricsFactory()
self._service_name = config.get('service_name', service_name)
if self._service_name is None:
raise ValueError('service_name required in the config or param')
@property
def service_name(self):
return self._service_name
def local_agent_group(self):
return self.config.get('local_agent', None)
@property
def local_agent_reporting_port(self):
# noinspection PyBroadException
try:
return int(self.local_agent_group()['reporting_port'])
except:
return DEFAULT_REPORTING_PORT
@property
def local_agent_reporting_host(self):
# noinspection PyBroadException
try:
return self.local_agent_group()['reporting_host']
except:
return DEFAULT_REPORTING_HOST
@property
def logging(self):
return get_boolean(self.config.get('logging', False), False)
@property
def trace_id_header(self):
"""
:return: Returns the name of the HTTP header used to encode trace ID
"""
return self.config.get('trace_id_header', TRACE_ID_HEADER)
@property
def baggage_header_prefix(self):
"""
:return: Returns the prefix for HTTP headers used to record baggage
items
"""
return self.config.get('baggage_header_prefix', BAGGAGE_HEADER_PREFIX)
@property
def debug_id_header(self):
"""
:return: Returns the name of HTTP header or a TextMap carrier key
which, if found in the carrier, forces the trace to be sampled as
"debug" trace. The value of the header is recorded as the tag on the
root span, so that the trace can be found in the UI using this value
as a correlation ID.
"""
return self.config.get('debug_id_header', DEBUG_ID_HEADER_KEY)
@property
def enabled(self):
return get_boolean(self.config.get('enabled', True), True)
@property
def max_tag_value_length(self):
"""
:return: Returns max allowed tag value length. Longer values will
be truncated.
"""
return self.config.get('max_tag_value_length', MAX_TAG_VALUE_LENGTH)
@property
def sampler(self):
sampler_config = self.config.get('sampler', {})
sampler_type = sampler_config.get('type', None)
sampler_param = sampler_config.get('param', None)
if not sampler_type:
return None
elif sampler_type == SAMPLER_TYPE_CONST:
return ConstSampler(decision=get_boolean(sampler_param, False))
elif sampler_type == SAMPLER_TYPE_PROBABILISTIC:
return ProbabilisticSampler(rate=float(sampler_param))
elif sampler_type in [SAMPLER_TYPE_RATE_LIMITING, 'rate_limiting']:
return RateLimitingSampler(
max_traces_per_second=float(sampler_param))
else:
return ConstSampler(decision=get_boolean(sampler_param, False))
raise ValueError('Unknown sampler type %s' % sampler_type)
@property
def database_addr(self):
return self.config.get(DATABASE_HOST_KEY,DEFAULT_DATABASE_HOST)
@property
def database_port(self):
return self.config.get(DATABASE_PORT_KEY,DEFAULT_DATABASE_PORT)
@property
def max_operations(self):
return self.config.get('max_operations', None)
@property
def tags(self):
"""
:return: Returns tags from config and `ALGO_TAGS` environment variable
to use as process-wide tracer tags
"""
tags = self.config.get('tags', {})
env_tags = os.environ.get('APPTRACK_TAGS', '')
if env_tags:
for kv in env_tags.split(','):
key, value = kv.split('=')
tags[key.strip()] = value.strip()
return tags
@staticmethod
def initialized():
with Config._initialized_lock:
return Config._initialized
def initialize_tracer(self, io_loop=None,default_backend=None,**kwargs):
"""
Initialize Algo Tracer based on the passed `algo_client.Config`.
Save it to `opentracing.tracer` global variable.
Only the first call to this method has any effect.
"""
if opentracing.is_global_tracer_registered():
return opentracing.global_tracer()
with Config._initialized_lock:
if Config._initialized:
logger.warn('apptrack tracer already initialized, skipping')
return
Config._initialized = True
channel = self._create_agent_channel()
channel.set_default_backend(default_backend,**kwargs)
sampler = self.sampler
logger.info('Using sampler %s', sampler)
reporter = Reporter(channel=channel, metrics_factory=self._metrics_factory)
if self.logging:
reporter = CompositeReporter(reporter, LoggingReporter(logger))
tracer = self.create_tracer(
reporter=reporter,
sampler=sampler,
)
self._initialize_global_tracer(tracer=tracer)
return tracer
def create_tracer(self, reporter, sampler):
return Tracer(
service_name=self.service_name,
reporter=reporter,
sampler=sampler,
trace_id_header=self.trace_id_header,
baggage_header_prefix=self.baggage_header_prefix,
debug_id_header=self.debug_id_header,
tags=self.tags,
max_tag_value_length=self.max_tag_value_length,
)
def _initialize_global_tracer(self, tracer):
opentracing.set_global_tracer(tracer)
logger.info('opentracing.tracer initialized to %s[app_name=%s]',
tracer, self.service_name)
def _create_agent_channel(self, io_loop=None):
return db.TraceDb.get_db()
|
AppTrack
|
/AppTrack-1.0.tar.gz/AppTrack-1.0/apptrack/config.py
|
config.py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.