gt
stringclasses 1
value | context
stringlengths 2.49k
119k
|
---|---|
import datetime
import io
from typing import Any, Dict
from botocore.response import StreamingBody
get_databases_response = {
"DatabaseList": [
{
"Name": "flights-database",
"CreateTime": datetime.datetime(2021, 6, 9, 14, 14, 19),
"CreateTableDefaultPermissions": [
{
"Principal": {
"DataLakePrincipalIdentifier": "IAM_ALLOWED_PRINCIPALS"
},
"Permissions": ["ALL"],
}
],
"CatalogId": "123412341234",
},
{
"Name": "test-database",
"CreateTime": datetime.datetime(2021, 6, 1, 14, 55, 2),
"CreateTableDefaultPermissions": [
{
"Principal": {
"DataLakePrincipalIdentifier": "IAM_ALLOWED_PRINCIPALS"
},
"Permissions": ["ALL"],
}
],
"CatalogId": "123412341234",
},
]
}
get_tables_response_1 = {
"TableList": [
{
"Name": "avro",
"DatabaseName": "flights-database",
"Owner": "owner",
"CreateTime": datetime.datetime(2021, 6, 9, 14, 17, 35),
"UpdateTime": datetime.datetime(2021, 6, 9, 14, 17, 35),
"LastAccessTime": datetime.datetime(2021, 6, 9, 14, 17, 35),
"Retention": 0,
"StorageDescriptor": {
"Columns": [
{"Name": "yr", "Type": "int"},
{"Name": "flightdate", "Type": "string"},
{"Name": "uniquecarrier", "Type": "string"},
{"Name": "airlineid", "Type": "int"},
{"Name": "carrier", "Type": "string"},
{"Name": "flightnum", "Type": "string"},
{"Name": "origin", "Type": "string"},
],
"Location": "s3://crawler-public-us-west-2/flight/avro/",
"InputFormat": "org.apache.hadoop.hive.ql.io.avro.AvroContainerInputFormat",
"OutputFormat": "org.apache.hadoop.hive.ql.io.avro.AvroContainerOutputFormat",
"Compressed": False,
"NumberOfBuckets": -1,
"SerdeInfo": {
"SerializationLibrary": "org.apache.hadoop.hive.serde2.avro.AvroSerDe",
"Parameters": {
"avro.schema.literal": '{"type":"record","name":"flights_avro_subset","namespace":"default","fields":[{"name":"yr","type":["null","int"],"default":null},{"name":"flightdate","type":["null","string"],"default":null},{"name":"uniquecarrier","type":["null","string"],"default":null},{"name":"airlineid","type":["null","int"],"default":null},{"name":"carrier","type":["null","string"],"default":null},{"name":"flightnum","type":["null","string"],"default":null},{"name":"origin","type":["null","string"],"default":null},{"name":"dest","type":["null","string"],"default":null},{"name":"depdelay","type":["null","int"],"default":null},{"name":"carrierdelay","type":["null","int"],"default":null},{"name":"weatherdelay","type":["null","int"],"default":null}]}',
"serialization.format": "1",
},
},
"BucketColumns": [],
"SortColumns": [],
"Parameters": {
"CrawlerSchemaDeserializerVersion": "1.0",
"CrawlerSchemaSerializerVersion": "1.0",
"UPDATED_BY_CRAWLER": "flights-crawler",
"averageRecordSize": "55",
"avro.schema.literal": '{"type":"record","name":"flights_avro_subset","namespace":"default","fields":[{"name":"yr","type":["null","int"],"default":null},{"name":"flightdate","type":["null","string"],"default":null},{"name":"uniquecarrier","type":["null","string"],"default":null},{"name":"airlineid","type":["null","int"],"default":null},{"name":"carrier","type":["null","string"],"default":null},{"name":"flightnum","type":["null","string"],"default":null},{"name":"origin","type":["null","string"],"default":null},{"name":"dest","type":["null","string"],"default":null},{"name":"depdelay","type":["null","int"],"default":null},{"name":"carrierdelay","type":["null","int"],"default":null},{"name":"weatherdelay","type":["null","int"],"default":null}]}',
"classification": "avro",
"compressionType": "none",
"objectCount": "30",
"recordCount": "169222196",
"sizeKey": "9503351413",
"typeOfData": "file",
},
"StoredAsSubDirectories": False,
},
"PartitionKeys": [{"Name": "year", "Type": "string"}],
"TableType": "EXTERNAL_TABLE",
"Parameters": {
"CrawlerSchemaDeserializerVersion": "1.0",
"CrawlerSchemaSerializerVersion": "1.0",
"UPDATED_BY_CRAWLER": "flights-crawler",
"averageRecordSize": "55",
"avro.schema.literal": '{"type":"record","name":"flights_avro_subset","namespace":"default","fields":[{"name":"yr","type":["null","int"],"default":null},{"name":"flightdate","type":["null","string"],"default":null},{"name":"uniquecarrier","type":["null","string"],"default":null},{"name":"airlineid","type":["null","int"],"default":null},{"name":"carrier","type":["null","string"],"default":null},{"name":"flightnum","type":["null","string"],"default":null},{"name":"origin","type":["null","string"],"default":null},{"name":"dest","type":["null","string"],"default":null},{"name":"depdelay","type":["null","int"],"default":null},{"name":"carrierdelay","type":["null","int"],"default":null},{"name":"weatherdelay","type":["null","int"],"default":null}]}',
"classification": "avro",
"compressionType": "none",
"objectCount": "30",
"recordCount": "169222196",
"sizeKey": "9503351413",
"typeOfData": "file",
},
"CreatedBy": "arn:aws:sts::123412341234:assumed-role/AWSGlueServiceRole-flights-crawler/AWS-Crawler",
"IsRegisteredWithLakeFormation": False,
"CatalogId": "123412341234",
}
]
}
get_tables_response_2 = {
"TableList": [
{
"Name": "test_jsons_markers",
"DatabaseName": "test-database",
"Owner": "owner",
"CreateTime": datetime.datetime(2021, 6, 2, 12, 6, 59),
"UpdateTime": datetime.datetime(2021, 6, 2, 12, 6, 59),
"LastAccessTime": datetime.datetime(2021, 6, 2, 12, 6, 59),
"Retention": 0,
"StorageDescriptor": {
"Columns": [
{
"Name": "markers",
"Type": "array<struct<name:string,position:array<double>,location:array<double>>>",
}
],
"Location": "s3://test-glue-jsons/markers/",
"InputFormat": "org.apache.hadoop.mapred.TextInputFormat",
"OutputFormat": "org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat",
"Compressed": False,
"NumberOfBuckets": -1,
"SerdeInfo": {
"SerializationLibrary": "org.openx.data.jsonserde.JsonSerDe",
"Parameters": {"paths": "markers"},
},
"BucketColumns": [],
"SortColumns": [],
"Parameters": {
"CrawlerSchemaDeserializerVersion": "1.0",
"CrawlerSchemaSerializerVersion": "1.0",
"UPDATED_BY_CRAWLER": "test-jsons",
"averageRecordSize": "273",
"classification": "json",
"compressionType": "none",
"objectCount": "1",
"recordCount": "1",
"sizeKey": "273",
"typeOfData": "file",
},
"StoredAsSubDirectories": False,
},
"PartitionKeys": [],
"TableType": "EXTERNAL_TABLE",
"Parameters": {
"CrawlerSchemaDeserializerVersion": "1.0",
"CrawlerSchemaSerializerVersion": "1.0",
"UPDATED_BY_CRAWLER": "test-jsons",
"averageRecordSize": "273",
"classification": "json",
"compressionType": "none",
"objectCount": "1",
"recordCount": "1",
"sizeKey": "273",
"typeOfData": "file",
},
"CreatedBy": "arn:aws:sts::795586375822:assumed-role/AWSGlueServiceRole-test-crawler/AWS-Crawler",
"IsRegisteredWithLakeFormation": False,
"CatalogId": "795586375822",
},
{
"Name": "test_parquet",
"DatabaseName": "test-database",
"Owner": "owner",
"CreateTime": datetime.datetime(2021, 6, 1, 16, 14, 53),
"UpdateTime": datetime.datetime(2021, 6, 1, 16, 14, 53),
"LastAccessTime": datetime.datetime(2021, 6, 1, 16, 14, 53),
"Retention": 0,
"StorageDescriptor": {
"Columns": [
{"Name": "yr", "Type": "int"},
{"Name": "quarter", "Type": "int"},
{"Name": "month", "Type": "int"},
{"Name": "dayofmonth", "Type": "int"},
],
"Location": "s3://crawler-public-us-west-2/flight/parquet/",
"InputFormat": "org.apache.hadoop.hive.ql.io.parquet.MapredParquetInputFormat",
"OutputFormat": "org.apache.hadoop.hive.ql.io.parquet.MapredParquetOutputFormat",
"Compressed": False,
"NumberOfBuckets": -1,
"SerdeInfo": {
"SerializationLibrary": "org.apache.hadoop.hive.ql.io.parquet.serde.ParquetHiveSerDe",
"Parameters": {"serialization.format": "1"},
},
"BucketColumns": [],
"SortColumns": [],
"Parameters": {
"CrawlerSchemaDeserializerVersion": "1.0",
"CrawlerSchemaSerializerVersion": "1.0",
"UPDATED_BY_CRAWLER": "test",
"averageRecordSize": "19",
"classification": "parquet",
"compressionType": "none",
"objectCount": "60",
"recordCount": "167497743",
"sizeKey": "4463574900",
"typeOfData": "file",
},
"StoredAsSubDirectories": False,
},
"PartitionKeys": [{"Name": "year", "Type": "string"}],
"TableType": "EXTERNAL_TABLE",
"Parameters": {
"CrawlerSchemaDeserializerVersion": "1.0",
"CrawlerSchemaSerializerVersion": "1.0",
"UPDATED_BY_CRAWLER": "test",
"averageRecordSize": "19",
"classification": "parquet",
"compressionType": "none",
"objectCount": "60",
"recordCount": "167497743",
"sizeKey": "4463574900",
"typeOfData": "file",
},
"CreatedBy": "arn:aws:sts::795586375822:assumed-role/AWSGlueServiceRole-test-crawler/AWS-Crawler",
"IsRegisteredWithLakeFormation": False,
"CatalogId": "795586375822",
},
]
}
get_jobs_response = {
"Jobs": [
{
"Name": "test-job-1",
"Description": "The first test job",
"Role": "arn:aws:iam::123412341234:role/service-role/AWSGlueServiceRole-glue-crawler",
"CreatedOn": datetime.datetime(2021, 6, 10, 16, 51, 25, 690000),
"LastModifiedOn": datetime.datetime(2021, 6, 10, 16, 55, 35, 307000),
"ExecutionProperty": {"MaxConcurrentRuns": 1},
"Command": {
"Name": "glueetl",
"ScriptLocation": "s3://aws-glue-assets-123412341234-us-west-2/scripts/job-1.py",
"PythonVersion": "3",
},
"DefaultArguments": {
"--TempDir": "s3://aws-glue-assets-123412341234-us-west-2/temporary/",
"--class": "GlueApp",
"--enable-continuous-cloudwatch-log": "true",
"--enable-glue-datacatalog": "true",
"--enable-metrics": "true",
"--enable-spark-ui": "true",
"--encryption-type": "sse-s3",
"--job-bookmark-option": "job-bookmark-enable",
"--job-language": "python",
"--spark-event-logs-path": "s3://aws-glue-assets-123412341234-us-west-2/sparkHistoryLogs/",
},
"MaxRetries": 3,
"AllocatedCapacity": 10,
"Timeout": 2880,
"MaxCapacity": 10.0,
"WorkerType": "G.1X",
"NumberOfWorkers": 10,
"GlueVersion": "2.0",
},
{
"Name": "test-job-2",
"Description": "The second test job",
"Role": "arn:aws:iam::123412341234:role/service-role/AWSGlueServiceRole-glue-crawler",
"CreatedOn": datetime.datetime(2021, 6, 10, 16, 58, 32, 469000),
"LastModifiedOn": datetime.datetime(2021, 6, 10, 16, 58, 32, 469000),
"ExecutionProperty": {"MaxConcurrentRuns": 1},
"Command": {
"Name": "glueetl",
"ScriptLocation": "s3://aws-glue-assets-123412341234-us-west-2/scripts/job-2.py",
"PythonVersion": "3",
},
"DefaultArguments": {
"--TempDir": "s3://aws-glue-assets-123412341234-us-west-2/temporary/",
"--class": "GlueApp",
"--enable-continuous-cloudwatch-log": "true",
"--enable-glue-datacatalog": "true",
"--enable-metrics": "true",
"--enable-spark-ui": "true",
"--encryption-type": "sse-s3",
"--job-bookmark-option": "job-bookmark-enable",
"--job-language": "python",
"--spark-event-logs-path": "s3://aws-glue-assets-123412341234-us-west-2/sparkHistoryLogs/",
},
"MaxRetries": 3,
"AllocatedCapacity": 10,
"Timeout": 2880,
"MaxCapacity": 10.0,
"WorkerType": "G.1X",
"NumberOfWorkers": 10,
"GlueVersion": "2.0",
},
]
}
# for job 1
get_dataflow_graph_response_1 = {
"DagNodes": [
{
"Id": "Transform0_job1",
"NodeType": "Filter",
"Args": [
{"Name": "f", "Value": "lambda row : ()", "Param": False},
{
"Name": "transformation_ctx",
"Value": '"Transform0"',
"Param": False,
},
],
"LineNumber": 32,
},
{
"Id": "Transform1_job1",
"NodeType": "ApplyMapping",
"Args": [
{
"Name": "mappings",
"Value": '[("yr", "int", "yr", "int"), ("flightdate", "string", "flightdate", "string"), ("uniquecarrier", "string", "uniquecarrier", "string"), ("airlineid", "int", "airlineid", "int"), ("carrier", "string", "carrier", "string"), ("flightnum", "string", "flightnum", "string"), ("origin", "string", "origin", "string"), ("dest", "string", "dest", "string"), ("depdelay", "int", "depdelay", "int"), ("carrierdelay", "int", "carrierdelay", "int"), ("weatherdelay", "int", "weatherdelay", "int"), ("year", "string", "year", "string")]',
"Param": False,
},
{
"Name": "transformation_ctx",
"Value": '"Transform1"',
"Param": False,
},
],
"LineNumber": 37,
},
{
"Id": "Transform2_job1",
"NodeType": "ApplyMapping",
"Args": [
{
"Name": "mappings",
"Value": '[("yr", "int", "yr", "int"), ("flightdate", "string", "flightdate", "string"), ("uniquecarrier", "string", "uniquecarrier", "string"), ("airlineid", "int", "airlineid", "int"), ("carrier", "string", "carrier", "string"), ("flightnum", "string", "flightnum", "string"), ("origin", "string", "origin", "string"), ("dest", "string", "dest", "string"), ("depdelay", "int", "depdelay", "int"), ("carrierdelay", "int", "carrierdelay", "int"), ("weatherdelay", "int", "weatherdelay", "int"), ("year", "string", "year", "string")]',
"Param": False,
},
{
"Name": "transformation_ctx",
"Value": '"Transform2"',
"Param": False,
},
],
"LineNumber": 22,
},
{
"Id": "Transform3_job1",
"NodeType": "Join",
"Args": [
{
"Name": "keys2",
"Value": '["(right) flightdate"]',
"Param": False,
},
{
"Name": "transformation_ctx",
"Value": '"Transform3"',
"Param": False,
},
{"Name": "keys1", "Value": '["yr"]', "Param": False},
],
"LineNumber": 47,
},
{
"Id": "DataSource0_job1",
"NodeType": "DataSource",
"Args": [
{
"Name": "database",
"Value": '"flights-database"',
"Param": False,
},
{"Name": "table_name", "Value": '"avro"', "Param": False},
{
"Name": "transformation_ctx",
"Value": '"DataSource0"',
"Param": False,
},
],
"LineNumber": 17,
},
{
"Id": "DataSink0_job1",
"NodeType": "DataSink",
"Args": [
{
"Name": "database",
"Value": '"test-database"',
"Param": False,
},
{
"Name": "table_name",
"Value": '"test_jsons_markers"',
"Param": False,
},
{
"Name": "transformation_ctx",
"Value": '"DataSink0"',
"Param": False,
},
],
"LineNumber": 57,
},
{
"Id": "Transform4_job1",
"NodeType": "ApplyMapping",
"Args": [
{
"Name": "mappings",
"Value": '[("yr", "int", "yr", "int"), ("flightdate", "string", "flightdate", "string"), ("uniquecarrier", "string", "uniquecarrier", "string"), ("airlineid", "int", "airlineid", "int"), ("carrier", "string", "carrier", "string"), ("flightnum", "string", "flightnum", "string"), ("origin", "string", "origin", "string"), ("dest", "string", "dest", "string"), ("depdelay", "int", "depdelay", "int"), ("carrierdelay", "int", "carrierdelay", "int"), ("weatherdelay", "int", "weatherdelay", "int"), ("year", "string", "year", "string")]',
"Param": False,
},
{
"Name": "transformation_ctx",
"Value": '"Transform4"',
"Param": False,
},
],
"LineNumber": 27,
},
{
"Id": "Transform5_job1",
"NodeType": "ApplyMapping",
"Args": [
{
"Name": "mappings",
"Value": '[("yr", "int", "(right) yr", "int"), ("flightdate", "string", "(right) flightdate", "string"), ("uniquecarrier", "string", "(right) uniquecarrier", "string"), ("airlineid", "int", "(right) airlineid", "int"), ("carrier", "string", "(right) carrier", "string"), ("flightnum", "string", "(right) flightnum", "string"), ("origin", "string", "(right) origin", "string"), ("dest", "string", "(right) dest", "string"), ("depdelay", "int", "(right) depdelay", "int"), ("carrierdelay", "int", "(right) carrierdelay", "int"), ("weatherdelay", "int", "(right) weatherdelay", "int"), ("year", "string", "(right) year", "string")]',
"Param": False,
},
{
"Name": "transformation_ctx",
"Value": '"Transform5"',
"Param": False,
},
],
"LineNumber": 42,
},
{
"Id": "DataSink1_job1",
"NodeType": "DataSink",
"Args": [
{"Name": "connection_type", "Value": '"s3"', "Param": False},
{"Name": "format", "Value": '"json"', "Param": False},
{
"Name": "connection_options",
"Value": '{"path": "s3://test-glue-jsons/", "partitionKeys": []}',
"Param": False,
},
{
"Name": "transformation_ctx",
"Value": '"DataSink1"',
"Param": False,
},
],
"LineNumber": 52,
},
],
"DagEdges": [
{
"Source": "Transform2_job1",
"Target": "Transform0_job1",
"TargetParameter": "frame",
},
{
"Source": "Transform0_job1",
"Target": "Transform1_job1",
"TargetParameter": "frame",
},
{
"Source": "DataSource0_job1",
"Target": "Transform2_job1",
"TargetParameter": "frame",
},
{
"Source": "Transform4_job1",
"Target": "Transform3_job1",
"TargetParameter": "frame1",
},
],
}
# for job 2
get_dataflow_graph_response_2 = {
"DagNodes": [
{
"Id": "Transform0_job2",
"NodeType": "SplitFields",
"Args": [
{
"Name": "paths",
"Value": '["yr", "quarter", "month", "dayofmonth", "dayofweek", "flightdate", "uniquecarrier"]',
"Param": False,
},
{
"Name": "name2",
"Value": '"Transform0Output1"',
"Param": False,
},
{
"Name": "name1",
"Value": '"Transform0Output0"',
"Param": False,
},
{
"Name": "transformation_ctx",
"Value": '"Transform0"',
"Param": False,
},
],
"LineNumber": 42,
},
{
"Id": "Transform1_job2",
"NodeType": "ApplyMapping",
"Args": [
{
"Name": "mappings",
"Value": '[("yr", "int", "yr", "int"), ("quarter", "int", "quarter", "int"), ("month", "int", "month", "int"), ("dayofmonth", "int", "dayofmonth", "int"), ("dayofweek", "int", "dayofweek", "int"), ("flightdate", "string", "flightdate", "string"), ("uniquecarrier", "string", "uniquecarrier", "string"), ("airlineid", "int", "airlineid", "int"), ("carrier", "string", "carrier", "string")]',
"Param": False,
},
{
"Name": "transformation_ctx",
"Value": '"Transform1"',
"Param": False,
},
],
"LineNumber": 22,
},
{
"Id": "Transform2_job2",
"NodeType": "FillMissingValues",
"Args": [
{
"Name": "missing_values_column",
"Value": '"dayofmonth"',
"Param": False,
},
{
"Name": "transformation_ctx",
"Value": '"Transform2"',
"Param": False,
},
],
"LineNumber": 27,
},
{
"Id": "Transform3_job2",
"NodeType": "SelectFields",
"Args": [
{"Name": "paths", "Value": "[]", "Param": False},
{
"Name": "transformation_ctx",
"Value": '"Transform3"',
"Param": False,
},
],
"LineNumber": 32,
},
{
"Id": "DataSource0_job2",
"NodeType": "DataSource",
"Args": [
{
"Name": "database",
"Value": '"test-database"',
"Param": False,
},
{
"Name": "table_name",
"Value": '"test_parquet"',
"Param": False,
},
{
"Name": "transformation_ctx",
"Value": '"DataSource0"',
"Param": False,
},
],
"LineNumber": 17,
},
{
"Id": "DataSink0_job2",
"NodeType": "DataSink",
"Args": [
{"Name": "connection_type", "Value": '"s3"', "Param": False},
{"Name": "format", "Value": '"json"', "Param": False},
{
"Name": "connection_options",
"Value": '{"path": "s3://test-glue-jsons/", "partitionKeys": []}',
"Param": False,
},
{
"Name": "transformation_ctx",
"Value": '"DataSink0"',
"Param": False,
},
],
"LineNumber": 37,
},
],
"DagEdges": [
{
"Source": "Transform1_job2",
"Target": "Transform0_job2",
"TargetParameter": "frame",
},
{
"Source": "DataSource0_job2",
"Target": "Transform1_job2",
"TargetParameter": "frame",
},
{
"Source": "Transform1_job2",
"Target": "Transform2_job2",
"TargetParameter": "frame",
},
{
"Source": "Transform2_job2",
"Target": "Transform3_job2",
"TargetParameter": "frame",
},
{
"Source": "Transform3_job2",
"Target": "DataSink0_job2",
"TargetParameter": "frame",
},
],
}
get_object_body_1 = """
import sys
from awsglue.transforms import *
from awsglue.utils import getResolvedOptions
from pyspark.context import SparkContext
from awsglue.context import GlueContext
from awsglue.job import Job
import re
## @params: [JOB_NAME]
args = getResolvedOptions(sys.argv, ['JOB_NAME'])
sc = SparkContext()
glueContext = GlueContext(sc)
spark = glueContext.spark_session
job = Job(glueContext)
job.init(args['JOB_NAME'], args)
## @type: DataSource
## @args: [database = "flights-database", table_name = "avro", transformation_ctx = "DataSource0"]
## @return: DataSource0
## @inputs: []
DataSource0 = glueContext.create_dynamic_frame.from_catalog(database = "flights-database", table_name = "avro", transformation_ctx = "DataSource0")
## @type: ApplyMapping
## @args: [mappings = [("yr", "int", "yr", "int"), ("flightdate", "string", "flightdate", "string"), ("uniquecarrier", "string", "uniquecarrier", "string"), ("airlineid", "int", "airlineid", "int"), ("carrier", "string", "carrier", "string"), ("flightnum", "string", "flightnum", "string"), ("origin", "string", "origin", "string"), ("dest", "string", "dest", "string"), ("depdelay", "int", "depdelay", "int"), ("carrierdelay", "int", "carrierdelay", "int"), ("weatherdelay", "int", "weatherdelay", "int"), ("year", "string", "year", "string")], transformation_ctx = "Transform2"]
## @return: Transform2
## @inputs: [frame = DataSource0]
Transform2 = ApplyMapping.apply(frame = DataSource0, mappings = [("yr", "int", "yr", "int"), ("flightdate", "string", "flightdate", "string"), ("uniquecarrier", "string", "uniquecarrier", "string"), ("airlineid", "int", "airlineid", "int"), ("carrier", "string", "carrier", "string"), ("flightnum", "string", "flightnum", "string"), ("origin", "string", "origin", "string"), ("dest", "string", "dest", "string"), ("depdelay", "int", "depdelay", "int"), ("carrierdelay", "int", "carrierdelay", "int"), ("weatherdelay", "int", "weatherdelay", "int"), ("year", "string", "year", "string")], transformation_ctx = "Transform2")
## @type: ApplyMapping
## @args: [mappings = [("yr", "int", "yr", "int"), ("flightdate", "string", "flightdate", "string"), ("uniquecarrier", "string", "uniquecarrier", "string"), ("airlineid", "int", "airlineid", "int"), ("carrier", "string", "carrier", "string"), ("flightnum", "string", "flightnum", "string"), ("origin", "string", "origin", "string"), ("dest", "string", "dest", "string"), ("depdelay", "int", "depdelay", "int"), ("carrierdelay", "int", "carrierdelay", "int"), ("weatherdelay", "int", "weatherdelay", "int"), ("year", "string", "year", "string")], transformation_ctx = "Transform4"]
## @return: Transform4
## @inputs: [frame = Transform2]
Transform4 = ApplyMapping.apply(frame = Transform2, mappings = [("yr", "int", "yr", "int"), ("flightdate", "string", "flightdate", "string"), ("uniquecarrier", "string", "uniquecarrier", "string"), ("airlineid", "int", "airlineid", "int"), ("carrier", "string", "carrier", "string"), ("flightnum", "string", "flightnum", "string"), ("origin", "string", "origin", "string"), ("dest", "string", "dest", "string"), ("depdelay", "int", "depdelay", "int"), ("carrierdelay", "int", "carrierdelay", "int"), ("weatherdelay", "int", "weatherdelay", "int"), ("year", "string", "year", "string")], transformation_ctx = "Transform4")
## @type: Filter
## @args: [f = lambda row : (), transformation_ctx = "Transform0"]
## @return: Transform0
## @inputs: [frame = Transform2]
Transform0 = Filter.apply(frame = Transform2, f = lambda row : (), transformation_ctx = "Transform0")
## @type: ApplyMapping
## @args: [mappings = [("yr", "int", "yr", "int"), ("flightdate", "string", "flightdate", "string"), ("uniquecarrier", "string", "uniquecarrier", "string"), ("airlineid", "int", "airlineid", "int"), ("carrier", "string", "carrier", "string"), ("flightnum", "string", "flightnum", "string"), ("origin", "string", "origin", "string"), ("dest", "string", "dest", "string"), ("depdelay", "int", "depdelay", "int"), ("carrierdelay", "int", "carrierdelay", "int"), ("weatherdelay", "int", "weatherdelay", "int"), ("year", "string", "year", "string")], transformation_ctx = "Transform1"]
## @return: Transform1
## @inputs: [frame = Transform0]
Transform1 = ApplyMapping.apply(frame = Transform0, mappings = [("yr", "int", "yr", "int"), ("flightdate", "string", "flightdate", "string"), ("uniquecarrier", "string", "uniquecarrier", "string"), ("airlineid", "int", "airlineid", "int"), ("carrier", "string", "carrier", "string"), ("flightnum", "string", "flightnum", "string"), ("origin", "string", "origin", "string"), ("dest", "string", "dest", "string"), ("depdelay", "int", "depdelay", "int"), ("carrierdelay", "int", "carrierdelay", "int"), ("weatherdelay", "int", "weatherdelay", "int"), ("year", "string", "year", "string")], transformation_ctx = "Transform1")
## @type: ApplyMapping
## @args: [mappings = [("yr", "int", "(right) yr", "int"), ("flightdate", "string", "(right) flightdate", "string"), ("uniquecarrier", "string", "(right) uniquecarrier", "string"), ("airlineid", "int", "(right) airlineid", "int"), ("carrier", "string", "(right) carrier", "string"), ("flightnum", "string", "(right) flightnum", "string"), ("origin", "string", "(right) origin", "string"), ("dest", "string", "(right) dest", "string"), ("depdelay", "int", "(right) depdelay", "int"), ("carrierdelay", "int", "(right) carrierdelay", "int"), ("weatherdelay", "int", "(right) weatherdelay", "int"), ("year", "string", "(right) year", "string")], transformation_ctx = "Transform5"]
## @return: Transform5
## @inputs: [frame = Transform1]
Transform5 = ApplyMapping.apply(frame = Transform1, mappings = [("yr", "int", "(right) yr", "int"), ("flightdate", "string", "(right) flightdate", "string"), ("uniquecarrier", "string", "(right) uniquecarrier", "string"), ("airlineid", "int", "(right) airlineid", "int"), ("carrier", "string", "(right) carrier", "string"), ("flightnum", "string", "(right) flightnum", "string"), ("origin", "string", "(right) origin", "string"), ("dest", "string", "(right) dest", "string"), ("depdelay", "int", "(right) depdelay", "int"), ("carrierdelay", "int", "(right) carrierdelay", "int"), ("weatherdelay", "int", "(right) weatherdelay", "int"), ("year", "string", "(right) year", "string")], transformation_ctx = "Transform5")
## @type: Join
## @args: [keys2 = ["(right) flightdate"], keys1 = ["yr"], transformation_ctx = "Transform3"]
## @return: Transform3
## @inputs: [frame1 = Transform4, frame2 = Transform5]
Transform3 = Join.apply(frame1 = Transform4, frame2 = Transform5, keys2 = ["(right) flightdate"], keys1 = ["yr"], transformation_ctx = "Transform3")
## @type: DataSink
## @args: [connection_type = "s3", format = "json", connection_options = {"path": "s3://test-glue-jsons/", "partitionKeys": []}, transformation_ctx = "DataSink1"]
## @return: DataSink1
## @inputs: [frame = Transform3]
DataSink1 = glueContext.write_dynamic_frame.from_options(frame = Transform3, connection_type = "s3", format = "json", connection_options = {"path": "s3://test-glue-jsons/", "partitionKeys": []}, transformation_ctx = "DataSink1")
## @type: DataSink
## @args: [database = "test-database", table_name = "test_jsons_markers", transformation_ctx = "DataSink0"]
## @return: DataSink0
## @inputs: [frame = Transform3]
DataSink0 = glueContext.write_dynamic_frame.from_catalog(frame = Transform3, database = "test-database", table_name = "test_jsons_markers", transformation_ctx = "DataSink0")
job.commit()
"""
get_object_body_2 = """
import sys
from awsglue.transforms import *
from awsglue.utils import getResolvedOptions
from pyspark.context import SparkContext
from awsglue.context import GlueContext
from awsglue.job import Job
from awsglueml.transforms import FillMissingValues
## @params: [JOB_NAME]
args = getResolvedOptions(sys.argv, ['JOB_NAME'])
sc = SparkContext()
glueContext = GlueContext(sc)
spark = glueContext.spark_session
job = Job(glueContext)
job.init(args['JOB_NAME'], args)
## @type: DataSource
## @args: [database = "test-database", table_name = "test_parquet", transformation_ctx = "DataSource0"]
## @return: DataSource0
## @inputs: []
DataSource0 = glueContext.create_dynamic_frame.from_catalog(database = "test-database", table_name = "test_parquet", transformation_ctx = "DataSource0")
## @type: ApplyMapping
## @args: [mappings = [("yr", "int", "yr", "int"), ("quarter", "int", "quarter", "int"), ("month", "int", "month", "int"), ("dayofmonth", "int", "dayofmonth", "int"), ("dayofweek", "int", "dayofweek", "int"), ("flightdate", "string", "flightdate", "string"), ("uniquecarrier", "string", "uniquecarrier", "string"), ("airlineid", "int", "airlineid", "int"), ("carrier", "string", "carrier", "string")], transformation_ctx = "Transform1"]
## @return: Transform1
## @inputs: [frame = DataSource0]
Transform1 = ApplyMapping.apply(frame = DataSource0, mappings = [("yr", "int", "yr", "int"), ("quarter", "int", "quarter", "int"), ("month", "int", "month", "int"), ("dayofmonth", "int", "dayofmonth", "int"), ("dayofweek", "int", "dayofweek", "int"), ("flightdate", "string", "flightdate", "string"), ("uniquecarrier", "string", "uniquecarrier", "string"), ("airlineid", "int", "airlineid", "int"), ("carrier", "string", "carrier", "string")], transformation_ctx = "Transform1")
## @type: FillMissingValues
## @args: [missing_values_column = "dayofmonth", transformation_ctx = "Transform2"]
## @return: Transform2
## @inputs: [frame = Transform1]
Transform2 = FillMissingValues.apply(frame = Transform1, missing_values_column = "dayofmonth", transformation_ctx = "Transform2")
## @type: SelectFields
## @args: [paths = [], transformation_ctx = "Transform3"]
## @return: Transform3
## @inputs: [frame = Transform2]
Transform3 = SelectFields.apply(frame = Transform2, paths = [], transformation_ctx = "Transform3")
## @type: DataSink
## @args: [connection_type = "s3", format = "json", connection_options = {"path": "s3://test-glue-jsons/", "partitionKeys": []}, transformation_ctx = "DataSink0"]
## @return: DataSink0
## @inputs: [frame = Transform3]
DataSink0 = glueContext.write_dynamic_frame.from_options(frame = Transform3, connection_type = "s3", format = "json", connection_options = {"path": "s3://test-glue-jsons/", "partitionKeys": []}, transformation_ctx = "DataSink0")
## @type: SplitFields
## @args: [paths = ["yr", "quarter", "month", "dayofmonth", "dayofweek", "flightdate", "uniquecarrier", "airlineid", "carrier"], name2 = "Transform0Output1", name1 = "Transform0Output0", transformation_ctx = "Transform0"]
## @return: Transform0
## @inputs: [frame = Transform1]
Transform0 = SplitFields.apply(frame = Transform1, paths = ["yr", "quarter", "month", "dayofmonth", "dayofweek", "flightdate", "uniquecarrier", "airlineid", "carrier"], name2 = "Transform0Output1", name1 = "Transform0Output0", transformation_ctx = "Transform0")
job.commit()
"""
def mock_get_object_response(raw_body: str) -> Dict[str, Any]:
"""
Mock s3 client get_object() response object.
See https://gist.github.com/grantcooksey/132ddc85274a50b94b821302649f9d7b
Parameters
----------
raw_body:
Content of the 'Body' field to return
"""
encoded_message = raw_body.encode("utf-8")
raw_stream = StreamingBody(io.BytesIO(encoded_message), len(encoded_message))
return {"Body": raw_stream}
get_object_response_1 = mock_get_object_response(get_object_body_1)
get_object_response_2 = mock_get_object_response(get_object_body_2)
|
|
import os
import time
import tempfile
from latex import latex
def preview(expr, output='png', viewer=None, euler=True):
"""View expression in PNG, DVI, PostScript or PDF form.
This will generate LaTeX representation of the given expression
and compile it using available TeX distribution. Then it will
run appropriate viewer for the given output format or use the
user defined one. If you prefer not to use external viewer
then you can use combination of 'png' output and 'pyglet'
viewer. By default png output is generated.
By default pretty Euler fonts are used for typesetting (they
were used to typeset the well known "Concrete Mathematics"
book). For that to work, you need the 'eulervm.sty' LaTeX style (in
Debian/Ubuntu, install the texlive-fonts-extra package). If you prefer
default AMS fonts or your system lacks 'eulervm' LaTeX package then
unset the 'euler' keyword argument.
To use viewer auto-detection, lets say for 'png' output, issue::
>> from sympy import *
>> x, y = symbols("x,y")
>> preview(x + y, output='png')
This will choose 'pyglet by default. To select different one::
>> preview(x + y, output='png', viewer='gimp')
The 'png' format is considered special. For all other formats
the rules are slightly different. As an example we will take
'dvi' output format. If you would run::
>> preview(x + y, output='dvi')
then 'view' will look for available 'dvi' viewers on your
system (predefined in the function, so it will try evince,
first, then kdvi and xdvi). If nothing is found you will
need to set the viewer explicitly::
>> preview(x + y, output='dvi', viewer='superior-dvi-viewer')
This will skip auto-detection and will run user specified
'superior-dvi-viewer'. If 'view' fails to find it on
your system it will gracefully raise an exception.
Currently this depends on pexpect, which is not available for windows.
"""
# we don't want to depend on anything not in the
# standard library with SymPy by default
import pexpect
special = [ 'pyglet' ]
if viewer is None:
if output == "png":
viewer = "pyglet"
else:
# sorted in order from most pretty to most ugly
# very discussable, but indeed 'gv' looks awful :)
candidates = {
"dvi" : [ "evince", "okular", "kdvi", "xdvi" ],
"ps" : [ "evince", "okular", "gsview", "gv" ],
"pdf" : [ "evince", "okular", "kpdf", "acroread", "xpdf", "gv" ],
}
try:
for candidate in candidates[output]:
if pexpect.which(candidate):
viewer = candidate
break
else:
raise SystemError("No viewers found for '%s' output format." % output)
except KeyError:
raise SystemError("Invalid output format: %s" % output)
else:
if viewer not in special and not pexpect.which(viewer):
raise SystemError("Unrecognized viewer: %s" % viewer)
if not euler:
format = r"""\documentclass[12pt]{article}
\usepackage{amsmath}
\begin{document}
\pagestyle{empty}
%s
\vfill
\end{document}
"""
else:
format = r"""\documentclass[12pt]{article}
\usepackage{amsmath}
\usepackage{eulervm}
\begin{document}
\pagestyle{empty}
%s
\vfill
\end{document}
"""
if viewer == "pyglet":
# import pyglet before we change the current dir, because after that it
# would fail:
from sympy.thirdparty import import_thirdparty
pyglet = import_thirdparty("pyglet")
tmp = tempfile.mktemp()
tex = open(tmp + ".tex", "w")
tex.write(format % latex(expr, mode='inline'))
tex.close()
cwd = os.getcwd()
os.chdir(tempfile.gettempdir())
if os.system("latex -halt-on-error %s.tex" % tmp) != 0:
raise SystemError("Failed to generate DVI output.")
os.remove(tmp + ".tex")
os.remove(tmp + ".aux")
os.remove(tmp + ".log")
if output != "dvi":
command = {
"ps" : "dvips -o %s.ps %s.dvi",
"pdf" : "dvipdf %s.dvi %s.pdf",
"png" : "dvipng -T tight -z 9 " + \
"--truecolor -o %s.png %s.dvi",
}
try:
if os.system(command[output] % (tmp, tmp)) != 0:
raise SystemError("Failed to generate '%s' output." % output)
else:
os.remove(tmp + ".dvi")
except KeyError:
raise SystemError("Invalid output format: %s" % output)
src = "%s.%s" % (tmp, output)
if viewer == "pyglet":
from pyglet import window, image, gl
from pyglet.window import key
if output == "png":
from pyglet.image.codecs.png import PNGImageDecoder
img = image.load(src, decoder=PNGImageDecoder())
else:
raise SystemError("pyglet preview works only for 'png' files.")
offset = 25
win = window.Window(
width = img.width + 2*offset,
height = img.height + 2*offset,
caption = "sympy",
resizable = False
)
win.set_vsync(False)
try:
def on_close():
win.has_exit = True
win.on_close = on_close
def on_key_press(symbol, modifiers):
if symbol in [key.Q, key.ESCAPE]:
on_close()
win.on_key_press = on_key_press
def on_expose():
gl.glClearColor(1.0, 1.0, 1.0, 1.0)
gl.glClear(gl.GL_COLOR_BUFFER_BIT)
img.blit(
(win.width - img.width) / 2,
(win.height - img.height) / 2
)
win.on_expose = on_expose
while not win.has_exit:
win.dispatch_events()
win.flip()
except KeyboardInterrupt:
pass
win.close()
else:
os.system("%s %s &> /dev/null &" % (viewer, src))
time.sleep(2) # wait for the viewer to read data
os.remove(src)
os.chdir(cwd)
|
|
# Authorize.net gateways
from payment_processor.gateways import GenericGateway
from payment_processor.exceptions import *
import payment_processor.methods
URL_STANDARD = 'https://secure.authorize.net/gateway/transact.dll'
URL_TEST = 'https://test.authorize.net/gateway/transact.dll'
class AuthorizeNet():
gateway = None
def __init__( self, type='AIM', version='3.1', **kwargs ):
if type == 'AIM' and version == '3.1':
self.gateway = AuthorizeNetAIM_3_1( **kwargs )
else:
raise NoGatewayError(
"There is no authorize.net gateway with type '%s' and version '%s'." % ( type, version ) )
def __getattr__( self, value ):
return getattr( self.__dict__['gateway'], value )
class AuthorizeNetAIM_3_1( GenericGateway ):
batch_support = False
url = URL_STANDARD
api = {
## Global ##
'x_delim_data': 'TRUE',
'x_duplicate_window': '10',
'x_delim_char': '|',
'x_relay_response': 'FALSE',
'x_version': '3.1',
## Instance Specific ##
'x_login': None,
'x_tran_key': None,
'x_test_request': 'FALSE',
'x_allow_partial_Auth': None,
'x_duplicate_window': None, # Time limit duplicates can not be submitted: between 0 and 28800
## Transaction Specific ##
'x_type': None, # AUTH_CAPTURE (default), AUTH_ONLY, CAPTURE_ONLY, CREDIT, PRIOR_AUTH_CAPTURE, VOID
'x_method': None, # CC or ECHECK
'x_amount': None,
'x_recurring_billing': None, # TRUE, FALSE,T, F, YES, NO, Y, N, 1, 0
'x_trans_id': None,
'x_split_tender_id': None, # The payment gateway-assitned ID assigned when the original transaction includes two or more partial payments.
'x_auth_code': None, # The authorization code of an original transaction not authorized on the payment gateway
## CC Specific ##
'x_card_num': None,
'x_exp_date': None, # MMYY, MM/YY, MM-YY, MMYYYY, MM/YYYY, MM-YYYY
'x_card_code': None,
'x_authentication_indicator': None,
'x_cardholder_authentication_value': None,
## ECHECK Specific ##
'x_bank_aba_code': None,
'x_bank_acct_num': None,
'x_bank_name': None,
'x_bank_acct_name': None, # CHECKING, BUSINESSCHECKING, SAVINGS
'x_echeck_type': None, # ARC, BOC, CCD, PPD, TEL, WEB
'x_bank_check_number': None,
## Order Information ##
'x_invoice_num': None,
'x_description': None,
'x_line_item': None,
'x_po_num': None,
## Customer Information ##
'x_first_name': None,
'x_last_name': None,
'x_company': None,
'x_address': None,
'x_city': None,
'x_state': None,
'x_zip': None,
'x_country': None,
'x_phone': None,
'x_fax': None,
'x_email': None,
'x_cust_id': None,
'x_customer_ip': None,
## Shipping Information ##
'x_ship_to_first_name': None,
'x_ship_to_last_name': None,
'x_ship_to_company': None,
'x_ship_to_address': None,
'x_ship_to_city': None,
'x_ship_to_state': None,
'x_ship_to_zip': None,
'x_ship_to_country': None,
'x_tax': None,
'x_freight': None,
'x_duty': None,
'x_tax_exempt': None
}
def __init__( self, login=None, trans_key=None, use_test_url=False, enable_test_requests=False, **kwargs ):
GenericGateway.__init__( self, **kwargs )
if not login or not trans_key:
raise TypeError(
"The authorize.net gateway requires both a 'login' and 'trans_key' argument." )
if use_test_url:
self.url = URL_TEST
if enable_test_requests:
self.api['x_test_request'] = 'TRUE'
self.api['x_login'] = login
self.api['x_tran_key'] = trans_key
@GenericGateway.checkTransactionStatus
def process( self, transaction, api=None ):
api = self.newAPI( api )
api['x_type'] = 'AUTH_CAPTURE'
self.populateAPI( transaction, api )
return self.call( transaction, api )
@GenericGateway.checkTransactionStatus
def authorize( self, transaction, api=None ):
api = self.newAPI( api )
api['x_type'] = 'AUTH_ONLY'
self.populateAPI( transaction, api )
return self.call( transaction, api )
@GenericGateway.checkTransactionStatus
def capture( self, transaction, api=None ):
api = self.newAPI( api )
#if auth_code != None:
# api['x_type'] = 'CAPTURE_ONLY'
# api['x_auth_code'] = auth_code
api['x_type'] = 'PRIOR_AUTH_CAPTURE'
api['x_trans_id'] = transaction.trans_id
return self.call( transaction, api )
@GenericGateway.checkTransactionStatus
def void( self, transaction, api=None ):
api = self.newAPI( api )
api['x_type'] = 'VOID'
api['x_trans_id'] = transaction.trans_id
return self.call( transaction, api )
@GenericGateway.checkTransactionStatus
def refund( self, transaction, api=None ):
api = self.newAPI( api )
api['x_type'] = 'CREDIT'
api['x_trans_id'] = transaction.trans_id
self.populateAPI( transaction, api )
return self.call( transaction, api )
def handleResponse( self, transaction ):
response = transaction.last_response.split( self.api['x_delim_char'] )
print response
## Response ##
# 0 - Response Code: 1 = Approved, 2 = Declined, 3 = Error, 4 = Held for Review
# 1 - Response Subcode
# 2 - Response Reason Code = http://developer.authorize.net/guides/AIM/Transaction_Response/Response_Reason_Codes_and_Response_Reason_Text.htm
# 3 - Response Reason Text
# 4 - Authorization Code
# 5 - AVS Response
# 6 - Transaction ID
# 7 - Invoice Number
# 8 - Description
# 9 - Amount
# 10 - Method
# 11 - Transaction Type
# 12 - 23 - Customer ID, First Name, Last Name, Company, Address, City, Sate, Zip, Country, Phone, Fax, Email
# 24 - 31 - Ship First Name, Last Name, Company, Address, City, State, Zip, Country
# 32 - Tax
# 33 - Duty
# 34 - Freight
# 35 - Tax Exempt
# 36 - Purchase Order Number
# 37 - MD5 Hash
# 38 - CCV Response
# 39 - CAVV Response
# 40 - Account Number
# 41 - Card Type
# 42 - Split Tender ID
# 43 - Requested Amount
# 44 - Balance on Card
response_code = int(response[2])
response_text = response[3] + " (code %s)" % response_code
transaction.last_response_text = response_text
if response[6] != '0':
transaction.trans_id = response[6] # transaction id
## AVS Response Code Values ##
# A = Address (Street) matches, ZIP does not
# B = Address information not provided for AVS check
# E = AVS errorG = Non-U.S. Card Issuing Bank
# N = No Match on Address (Street) or ZIP
# P = AVS not applicable for this transaction
# R = Retry - System unavailable or timed out
# S = Service not supported by issuer
# U = Address information is unavailable
# W = Nine digit ZIP matches, Address (Street) does not
# X = Address (Street) and nine digit ZIP match
# Y = Address (Street) and five digit ZIP match
# Z = Five digit ZIP matches, Address (Street) does not
avs_response = response[5]
# M = Match, N = No Match, P = Not Processed, S = Should have been present, U = Issuer unable to process request
ccv_response = response[39]
#print response[0], response[2]
if response[0] != '1':
if response_code in ( 6, 37, 200, 315 ):
raise InvalidCardNumber( response_text, response_code=response_code )
if response_code in ( 7, 8, 202, 316, 317 ):
raise InvalidCardExpirationDate( response_text, response_code=response_code )
if response_code in ( 44, 45, 65 ):
raise InvalidCardCode( response_text, response_code=response_code )
if response_code in ( 9, ):
raise InvalidRoutingNumber( response_text, response_code=response_code )
if response_code in ( 10, ):
raise InvalidAccountNumber( response_text, response_code=response_code )
if response_code in ( 27, 127, 290 ):
if avs_response in ( 'A', ):
raise InvalidBillingZipcode( response_text, response_code=response_code, avs_response=avs_response )
raise InvalidBillingAddress( response_text, response_code=response_code, avs_response=avs_response )
if response_code in ( 2, 3, 4, 41, 250, 251 ):
raise TransactionDeclined( response_text, response_code=response_code )
if response_code in ( 11, 222, 318 ):
raise DuplicateTransaction( response_text, response_code=response_code )
#print api
# if response[0] == '2': # Declined
# raise ProcessingDeclined( response[3], error_code=response[2], avs_response=avs_response, ccv_response=ccv_response )
# else: # 3 = Error, 4 = Held for review
raise TransactionFailed( response_text, response_code=response_code )
def populateAPI( self, transaction, api ):
api['x_trans_id'] = transaction.trans_id
api['x_amount'] = transaction.payment.amount
api['x_first_name'] = transaction.method.first_name
api['x_last_name'] = transaction.method.last_name
api['x_company'] = transaction.method.company
api['x_address'] = transaction.method.address
if transaction.method.address2:
api['x_address']+= ', ' + transaction.method.address2
api['x_city'] = transaction.method.city
api['x_state'] = transaction.method.state
api['x_zip'] = transaction.method.zip_code
api['x_country'] = transaction.method.country or api['x_country']
api['x_fax'] = transaction.method.fax
api['x_phone'] = transaction.method.phone
api['x_email'] = transaction.method.email
api['x_customer_ip'] = transaction.payment.ip
api['x_cust_id'] = transaction.payment.customer_id
api['x_invoice_num'] = transaction.payment.order_number
api['x_description'] = transaction.payment.description
api['x_ship_to_first_name'] = transaction.payment.ship_first_name
api['x_ship_to_last_name'] = transaction.payment.ship_last_name
api['x_ship_to_company'] = transaction.payment.ship_company
api['x_ship_to_address'] = transaction.payment.ship_address
if transaction.payment.ship_address2:
api['x_ship_to_address'] += ', ' + transaction.payment.ship_address2
api['x_ship_to_city'] = transaction.payment.ship_city
api['x_ship_to_state'] = transaction.payment.ship_state
api['x_ship_to_zip'] = transaction.payment.ship_zip_code
api['x_ship_to_country'] = transaction.payment.ship_country
if transaction.method.__class__ == payment_processor.methods.CreditCard:
api['x_method'] = 'CC'
api['x_card_num'] = transaction.method.card_number
api['x_exp_date'] = transaction.method.expiration_date.strftime( '%m-%Y' )
api['x_card_code'] = transaction.method.card_code
elif transaction.method.__class__ == payment_processor.methods.Check:
api['x_bank_aba_code'] = transaction.method.routing_number
api['x_bank_acct_num'] = transaction.method.account_number
api['x_bank_name'] = transaction.method.company \
or ( transaction.method.first_name or '' ) \
+ ( ' ' + transaction.method.last_name if transaction.method.last_name else '' )
api['x_bank_acct_name'] = 'CHECKING' if transaction.method.account_type == payment_processor.methods.Check.CHECKING else 'SAVINGS'
if transaction.method.account_holder_type == payment_processor.methods.Check.BUSINESS:
api['x_bank_acct_name'] = 'BUSINESS' + api['x_bank_acct_name']
api['x_echeck_type'] = 'WEB'
api['x_bank_check_number'] = transaction.method.check_number
else:
raise PaymentMethodUnsupportedByGateway(
"Payment Method '%s' is unsupported by authorize.net AIM 3.1 gateway." % transaction.method.__class__.__name__ )
|
|
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Utility functions used by the bisect tool.
This includes functions related to checking out the depot and outputting
annotations for the Buildbot waterfall.
"""
import errno
import imp
import os
import stat
import subprocess
import sys
DEFAULT_GCLIENT_CUSTOM_DEPS = {
'src/data/page_cycler': 'https://chrome-internal.googlesource.com/'
'chrome/data/page_cycler/.git',
'src/data/dom_perf': 'https://chrome-internal.googlesource.com/'
'chrome/data/dom_perf/.git',
'src/data/mach_ports': 'https://chrome-internal.googlesource.com/'
'chrome/data/mach_ports/.git',
'src/tools/perf/data': 'https://chrome-internal.googlesource.com/'
'chrome/tools/perf/data/.git',
'src/third_party/adobe/flash/binaries/ppapi/linux':
'https://chrome-internal.googlesource.com/'
'chrome/deps/adobe/flash/binaries/ppapi/linux/.git',
'src/third_party/adobe/flash/binaries/ppapi/linux_x64':
'https://chrome-internal.googlesource.com/'
'chrome/deps/adobe/flash/binaries/ppapi/linux_x64/.git',
'src/third_party/adobe/flash/binaries/ppapi/mac':
'https://chrome-internal.googlesource.com/'
'chrome/deps/adobe/flash/binaries/ppapi/mac/.git',
'src/third_party/adobe/flash/binaries/ppapi/mac_64':
'https://chrome-internal.googlesource.com/'
'chrome/deps/adobe/flash/binaries/ppapi/mac_64/.git',
'src/third_party/adobe/flash/binaries/ppapi/win':
'https://chrome-internal.googlesource.com/'
'chrome/deps/adobe/flash/binaries/ppapi/win/.git',
'src/third_party/adobe/flash/binaries/ppapi/win_x64':
'https://chrome-internal.googlesource.com/'
'chrome/deps/adobe/flash/binaries/ppapi/win_x64/.git',
'src/chrome/tools/test/reference_build/chrome_win': None,
'src/chrome/tools/test/reference_build/chrome_mac': None,
'src/chrome/tools/test/reference_build/chrome_linux': None,
'src/third_party/WebKit/LayoutTests': None,
'src/tools/valgrind': None,
}
GCLIENT_SPEC_DATA = [
{
'name': 'src',
'url': 'https://chromium.googlesource.com/chromium/src.git',
'deps_file': '.DEPS.git',
'managed': True,
'custom_deps': {},
'safesync_url': '',
},
]
GCLIENT_SPEC_ANDROID = "\ntarget_os = ['android']"
GCLIENT_CUSTOM_DEPS_V8 = {
'src/v8_bleeding_edge': 'https://chromium.googlesource.com/v8/v8.git'
}
FILE_DEPS_GIT = '.DEPS.git'
FILE_DEPS = 'DEPS'
# Bisect working directory.
BISECT_DIR = 'bisect'
# The percentage at which confidence is considered high.
HIGH_CONFIDENCE = 95
# Below is the map of "depot" names to information about each depot. Each depot
# is a repository, and in the process of bisecting, revision ranges in these
# repositories may also be bisected.
#
# Each depot information dictionary may contain:
# src: Path to the working directory.
# recurse: True if this repository will get bisected.
# svn: URL of SVN repository. Needed for git workflow to resolve hashes to
# SVN revisions.
# from: Parent depot that must be bisected before this is bisected.
# deps_var: Key name in vars variable in DEPS file that has revision
# information.
DEPOT_DEPS_NAME = {
'chromium': {
'src': 'src',
'recurse': True,
'from': ['android-chrome'],
'viewvc': 'https://chromium.googlesource.com/chromium/src/+/',
'deps_var': 'chromium_rev'
},
'webkit': {
'src': 'src/third_party/WebKit',
'recurse': True,
'from': ['chromium'],
'viewvc': 'https://chromium.googlesource.com/chromium/blink/+/',
'deps_var': 'webkit_revision'
},
'angle': {
'src': 'src/third_party/angle',
'src_old': 'src/third_party/angle_dx11',
'recurse': True,
'from': ['chromium'],
'platform': 'nt',
'viewvc': 'https://chromium.googlesource.com/angle/angle/+/',
'deps_var': 'angle_revision'
},
'v8': {
'src': 'src/v8',
'recurse': True,
'from': ['chromium'],
'custom_deps': GCLIENT_CUSTOM_DEPS_V8,
'viewvc': 'https://chromium.googlesource.com/v8/v8.git/+/',
'deps_var': 'v8_revision'
},
'v8_bleeding_edge': {
'src': 'src/v8_bleeding_edge',
'recurse': True,
'svn': 'https://v8.googlecode.com/svn/branches/bleeding_edge',
'from': ['v8'],
'viewvc': 'https://chromium.googlesource.com/v8/v8.git/+/',
'deps_var': 'v8_revision'
},
'skia/src': {
'src': 'src/third_party/skia/src',
'recurse': True,
'from': ['chromium'],
'viewvc': 'https://chromium.googlesource.com/skia/+/',
'deps_var': 'skia_revision'
}
}
DEPOT_NAMES = DEPOT_DEPS_NAME.keys()
# The possible values of the --bisect_mode flag, which determines what to
# use when classifying a revision as "good" or "bad".
BISECT_MODE_MEAN = 'mean'
BISECT_MODE_STD_DEV = 'std_dev'
BISECT_MODE_RETURN_CODE = 'return_code'
def AddAdditionalDepotInfo(depot_info):
"""Adds additional depot info to the global depot variables."""
global DEPOT_DEPS_NAME
global DEPOT_NAMES
DEPOT_DEPS_NAME = dict(DEPOT_DEPS_NAME.items() + depot_info.items())
DEPOT_NAMES = DEPOT_DEPS_NAME.keys()
def OutputAnnotationStepStart(name):
"""Outputs annotation to signal the start of a step to a try bot.
Args:
name: The name of the step.
"""
print
print '@@@SEED_STEP %s@@@' % name
print '@@@STEP_CURSOR %s@@@' % name
print '@@@STEP_STARTED@@@'
print
sys.stdout.flush()
def OutputAnnotationStepClosed():
"""Outputs annotation to signal the closing of a step to a try bot."""
print
print '@@@STEP_CLOSED@@@'
print
sys.stdout.flush()
def OutputAnnotationStepText(text):
"""Outputs appropriate annotation to print text.
Args:
name: The text to print.
"""
print
print '@@@STEP_TEXT@%s@@@' % text
print
sys.stdout.flush()
def OutputAnnotationStepWarning():
"""Outputs appropriate annotation to signal a warning."""
print
print '@@@STEP_WARNINGS@@@'
print
def OutputAnnotationStepFailure():
"""Outputs appropriate annotation to signal a warning."""
print
print '@@@STEP_FAILURE@@@'
print
def OutputAnnotationStepLink(label, url):
"""Outputs appropriate annotation to print a link.
Args:
label: The name to print.
url: The URL to print.
"""
print
print '@@@STEP_LINK@%s@%s@@@' % (label, url)
print
sys.stdout.flush()
def LoadExtraSrc(path_to_file):
"""Attempts to load an extra source file, and overrides global values.
If the extra source file is loaded successfully, then it will use the new
module to override some global values, such as gclient spec data.
Args:
path_to_file: File path.
Returns:
The loaded module object, or None if none was imported.
"""
try:
global GCLIENT_SPEC_DATA
global GCLIENT_SPEC_ANDROID
extra_src = imp.load_source('data', path_to_file)
GCLIENT_SPEC_DATA = extra_src.GetGClientSpec()
GCLIENT_SPEC_ANDROID = extra_src.GetGClientSpecExtraParams()
return extra_src
except ImportError:
return None
def IsTelemetryCommand(command):
"""Attempts to discern whether or not a given command is running telemetry."""
return 'tools/perf/run_' in command or 'tools\\perf\\run_' in command
def _CreateAndChangeToSourceDirectory(working_directory):
"""Creates a directory 'bisect' as a subdirectory of |working_directory|.
If successful, the current working directory will be changed to the new
'bisect' directory.
Args:
working_directory: The directory to create the new 'bisect' directory in.
Returns:
True if the directory was successfully created (or already existed).
"""
cwd = os.getcwd()
os.chdir(working_directory)
try:
os.mkdir(BISECT_DIR)
except OSError, e:
if e.errno != errno.EEXIST: # EEXIST indicates that it already exists.
os.chdir(cwd)
return False
os.chdir(BISECT_DIR)
return True
def _SubprocessCall(cmd, cwd=None):
"""Runs a command in a subprocess.
Args:
cmd: The command to run.
cwd: Working directory to run from.
Returns:
The return code of the call.
"""
if os.name == 'nt':
# "HOME" isn't normally defined on windows, but is needed
# for git to find the user's .netrc file.
if not os.getenv('HOME'):
os.environ['HOME'] = os.environ['USERPROFILE']
shell = os.name == 'nt'
return subprocess.call(cmd, shell=shell, cwd=cwd)
def RunGClient(params, cwd=None):
"""Runs gclient with the specified parameters.
Args:
params: A list of parameters to pass to gclient.
cwd: Working directory to run from.
Returns:
The return code of the call.
"""
cmd = ['gclient'] + params
return _SubprocessCall(cmd, cwd=cwd)
def RunGClientAndCreateConfig(opts, custom_deps=None, cwd=None):
"""Runs gclient and creates a config containing both src and src-internal.
Args:
opts: The options parsed from the command line through parse_args().
custom_deps: A dictionary of additional dependencies to add to .gclient.
cwd: Working directory to run from.
Returns:
The return code of the call.
"""
spec = GCLIENT_SPEC_DATA
if custom_deps:
for k, v in custom_deps.iteritems():
spec[0]['custom_deps'][k] = v
# Cannot have newlines in string on windows
spec = 'solutions =' + str(spec)
spec = ''.join([l for l in spec.splitlines()])
if 'android' in opts.target_platform:
spec += GCLIENT_SPEC_ANDROID
return_code = RunGClient(
['config', '--spec=%s' % spec], cwd=cwd)
return return_code
def OnAccessError(func, path, _):
"""Error handler for shutil.rmtree.
Source: http://goo.gl/DEYNCT
If the error is due to an access error (read only file), it attempts to add
write permissions, then retries.
If the error is for another reason it re-raises the error.
Args:
func: The function that raised the error.
path: The path name passed to func.
_: Exception information from sys.exc_info(). Not used.
"""
if not os.access(path, os.W_OK):
os.chmod(path, stat.S_IWUSR)
func(path)
else:
raise
def _CleanupPreviousGitRuns(cwd=os.getcwd()):
"""Cleans up any leftover index.lock files after running git."""
# If a previous run of git crashed, or bot was reset, etc., then we might
# end up with leftover index.lock files.
for path, _, files in os.walk(cwd):
for cur_file in files:
if cur_file.endswith('index.lock'):
path_to_file = os.path.join(path, cur_file)
os.remove(path_to_file)
def RunGClientAndSync(revisions=None, cwd=None):
"""Runs gclient and does a normal sync.
Args:
revisions: List of revisions that need to be synced.
E.g., "src@2ae43f...", "src/third_party/webkit@asr1234" etc.
cwd: Working directory to run from.
Returns:
The return code of the call.
"""
params = ['sync', '--verbose', '--nohooks', '--force',
'--delete_unversioned_trees']
if revisions is not None:
for revision in revisions:
if revision is not None:
params.extend(['--revision', revision])
return RunGClient(params, cwd=cwd)
def SetupGitDepot(opts, custom_deps):
"""Sets up the depot for the bisection.
The depot will be located in a subdirectory called 'bisect'.
Args:
opts: The options parsed from the command line through parse_args().
custom_deps: A dictionary of additional dependencies to add to .gclient.
Returns:
True if gclient successfully created the config file and did a sync, False
otherwise.
"""
name = 'Setting up Bisection Depot'
try:
if opts.output_buildbot_annotations:
OutputAnnotationStepStart(name)
if RunGClientAndCreateConfig(opts, custom_deps):
return False
_CleanupPreviousGitRuns()
RunGClient(['revert'])
return not RunGClientAndSync()
finally:
if opts.output_buildbot_annotations:
OutputAnnotationStepClosed()
def CheckIfBisectDepotExists(opts):
"""Checks if the bisect directory already exists.
Args:
opts: The options parsed from the command line through parse_args().
Returns:
Returns True if it exists.
"""
path_to_dir = os.path.join(opts.working_directory, BISECT_DIR, 'src')
return os.path.exists(path_to_dir)
def CheckRunGit(command, cwd=None):
"""Run a git subcommand, returning its output and return code. Asserts if
the return code of the call is non-zero.
Args:
command: A list containing the args to git.
Returns:
A tuple of the output and return code.
"""
output, return_code = RunGit(command, cwd=cwd)
assert not return_code, 'An error occurred while running'\
' "git %s"' % ' '.join(command)
return output
def RunGit(command, cwd=None):
"""Run a git subcommand, returning its output and return code.
Args:
command: A list containing the args to git.
cwd: A directory to change to while running the git command (optional).
Returns:
A tuple of the output and return code.
"""
command = ['git'] + command
return RunProcessAndRetrieveOutput(command, cwd=cwd)
def CreateBisectDirectoryAndSetupDepot(opts, custom_deps):
"""Sets up a subdirectory 'bisect' and then retrieves a copy of the depot
there using gclient.
Args:
opts: The options parsed from the command line through parse_args().
custom_deps: A dictionary of additional dependencies to add to .gclient.
"""
if CheckIfBisectDepotExists(opts):
path_to_dir = os.path.join(os.path.abspath(opts.working_directory),
BISECT_DIR, 'src')
output, _ = RunGit(['rev-parse', '--is-inside-work-tree'], cwd=path_to_dir)
if output.strip() == 'true':
# Before checking out master, cleanup up any leftover index.lock files.
_CleanupPreviousGitRuns(path_to_dir)
# Checks out the master branch, throws an exception if git command fails.
CheckRunGit(['checkout', '-f', 'master'], cwd=path_to_dir)
if not _CreateAndChangeToSourceDirectory(opts.working_directory):
raise RuntimeError('Could not create bisect directory.')
if not SetupGitDepot(opts, custom_deps):
raise RuntimeError('Failed to grab source.')
def RunProcess(command):
"""Runs an arbitrary command.
If output from the call is needed, use RunProcessAndRetrieveOutput instead.
Args:
command: A list containing the command and args to execute.
Returns:
The return code of the call.
"""
# On Windows, use shell=True to get PATH interpretation.
shell = IsWindowsHost()
return subprocess.call(command, shell=shell)
def RunProcessAndRetrieveOutput(command, cwd=None):
"""Runs an arbitrary command, returning its output and return code.
Since output is collected via communicate(), there will be no output until
the call terminates. If you need output while the program runs (ie. so
that the buildbot doesn't terminate the script), consider RunProcess().
Args:
command: A list containing the command and args to execute.
cwd: A directory to change to while running the command. The command can be
relative to this directory. If this is None, the command will be run in
the current directory.
Returns:
A tuple of the output and return code.
"""
if cwd:
original_cwd = os.getcwd()
os.chdir(cwd)
# On Windows, use shell=True to get PATH interpretation.
shell = IsWindowsHost()
proc = subprocess.Popen(
command, shell=shell, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
output, _ = proc.communicate()
if cwd:
os.chdir(original_cwd)
return (output, proc.returncode)
def IsStringInt(string_to_check):
"""Checks whether or not the given string can be converted to an int."""
try:
int(string_to_check)
return True
except ValueError:
return False
def IsStringFloat(string_to_check):
"""Checks whether or not the given string can be converted to a float."""
try:
float(string_to_check)
return True
except ValueError:
return False
def IsWindowsHost():
return sys.platform == 'cygwin' or sys.platform.startswith('win')
def Is64BitWindows():
"""Checks whether or not Windows is a 64-bit version."""
platform = os.environ.get('PROCESSOR_ARCHITEW6432')
if not platform:
# Must not be running in WoW64, so PROCESSOR_ARCHITECTURE is correct.
platform = os.environ.get('PROCESSOR_ARCHITECTURE')
return platform and platform in ['AMD64', 'I64']
def IsLinuxHost():
return sys.platform.startswith('linux')
def IsMacHost():
return sys.platform.startswith('darwin')
|
|
# -*- coding: utf-8 -*-
import six
from girder.exceptions import ValidationException
from girder.models.folder import Folder
from girder.models.setting import Setting
from girder.models.user import User
from tests import base
from girder_item_licenses.settings import PluginSettings
def setUpModule():
base.enabledPlugins.append('item_licenses')
base.startServer()
def tearDownModule():
base.stopServer()
class ItemLicensesTestCase(base.TestCase):
def setUp(self):
base.TestCase.setUp(self)
# Create a user
user = {
'email': '[email protected]',
'login': 'user1login',
'firstName': 'First',
'lastName': 'Last',
'password': 'user1password',
'admin': False
}
self.user = User().createUser(**user)
# Get user's private folder
folders = Folder().childFolders(self.user, 'user', user=self.user)
for folder in folders:
if folder['name'] == 'Private':
self.folder = folder
break
def testItemCreateInvalid(self):
"""
Test creating items with invalid licenses.
"""
# Create item with a null name
params = {
'name': ' my item name',
'description': ' a description ',
'folderId': self.folder['_id'],
'license': None
}
resp = self.request(path='/item', method='POST', params=params,
user=self.user)
self.assertValidationError(resp, 'license')
# Create item with an invalid license name
params = {
'name': ' my item name',
'description': ' a description ',
'folderId': self.folder['_id'],
'license': 'Unsupported license'
}
resp = self.request(path='/item', method='POST', params=params,
user=self.user)
self.assertValidationError(resp, 'license')
# Create item with a valid license name with extra whitespace
params = {
'name': ' my item name',
'description': ' a description ',
'folderId': self.folder['_id'],
'license': ' The MIT License (MIT) '
}
resp = self.request(path='/item', method='POST', params=params,
user=self.user)
self.assertValidationError(resp, 'license')
def testItemCreateAndUpdate(self):
"""
Test creating, reading, and updating an item, especially with regards to
its license field.
"""
# Create item without specifying a license
params = {
'name': ' my item name',
'description': ' a description ',
'folderId': self.folder['_id']
}
resp = self.request(path='/item', method='POST', params=params,
user=self.user)
self.assertStatusOk(resp)
self.assertEqual(resp.json['license'], '')
# Create item with a blank license name
params = {
'name': ' my item name',
'description': ' a description ',
'folderId': self.folder['_id'],
'license': ''
}
resp = self.request(path='/item', method='POST', params=params,
user=self.user)
self.assertStatusOk(resp)
self.assertEqual(resp.json['license'], '')
# Fetch item
resp = self.request(path='/item/%s' % resp.json['_id'],
params=params, user=self.user)
self.assertStatusOk(resp)
self.assertEqual(resp.json['license'], '')
# Update item license
params = {
'license': 'Apache License 2'
}
resp = self.request(path='/item/%s' % resp.json['_id'], method='PUT',
params=params, user=self.user)
self.assertStatusOk(resp)
self.assertEqual(resp.json['license'], 'Apache License 2')
# Fetch item
resp = self.request(path='/item/%s' % resp.json['_id'],
params=params, user=self.user)
self.assertStatusOk(resp)
self.assertEqual(resp.json['license'], 'Apache License 2')
# Update item license to be unspecified
params = {
'license': ''
}
resp = self.request(path='/item/%s' % resp.json['_id'], method='PUT',
params=params, user=self.user)
self.assertStatusOk(resp)
self.assertEqual(resp.json['license'], '')
# Fetch item
resp = self.request(path='/item/%s' % resp.json['_id'],
params=params, user=self.user)
self.assertStatusOk(resp)
self.assertEqual(resp.json['license'], '')
# Create item with a valid license name
params = {
'name': ' my item name',
'description': ' a description ',
'folderId': self.folder['_id'],
'license': 'The MIT License (MIT)'
}
resp = self.request(path='/item', method='POST', params=params,
user=self.user)
self.assertStatusOk(resp)
self.assertEqual(resp.json['license'], 'The MIT License (MIT)')
# Fetch item
resp = self.request(path='/item/%s' % resp.json['_id'],
params=params, user=self.user)
self.assertStatusOk(resp)
self.assertEqual(resp.json['license'], 'The MIT License (MIT)')
# Update item
params = {
'name': 'changed name',
'description': 'new description',
'license': 'Apache License 2'
}
resp = self.request(path='/item/%s' % resp.json['_id'], method='PUT',
params=params, user=self.user)
self.assertStatusOk(resp)
self.assertEqual(resp.json['license'], 'Apache License 2')
# Fetch item
resp = self.request(path='/item/%s' % resp.json['_id'],
params=params, user=self.user)
self.assertStatusOk(resp)
self.assertEqual(resp.json['license'], 'Apache License 2')
# Update item with the same license name
params = {
'license': 'Apache License 2'
}
resp = self.request(path='/item/%s' % resp.json['_id'], method='PUT',
params=params, user=self.user)
self.assertStatusOk(resp)
self.assertEqual(resp.json['license'], 'Apache License 2')
def testItemCopy(self):
"""
Test copying an item, especially with regards to its license field.
"""
params = {
'name': 'original item',
'description': 'original description',
'license': 'The MIT License (MIT)',
'folderId': self.folder['_id']
}
# Create item
resp = self.request(path='/item', method='POST', params=params,
user=self.user)
self.assertStatusOk(resp)
origItemId = resp.json['_id']
# Copy to a new item with different name and license.
params = {
'name': 'new item',
'license': 'Apache License 2'
}
resp = self.request(path='/item/%s/copy' % origItemId,
method='POST', user=self.user, params=params)
self.assertStatusOk(resp)
self.assertEqual(resp.json['license'], 'Apache License 2')
# Fetch item
resp = self.request(path='/item/%s' % resp.json['_id'],
params=params, user=self.user)
self.assertStatusOk(resp)
self.assertEqual(resp.json['license'], 'Apache License 2')
def testGetLicenses(self):
"""
Test getting list of licenses.
"""
# Get default settings
resp = self.request(path='/item/licenses', user=self.user, params={
'default': True
})
self.assertStatusOk(resp)
self.assertGreater(len(resp.json), 1)
self.assertIn('category', resp.json[0])
self.assertIn('licenses', resp.json[0])
self.assertGreater(len(resp.json[0]['licenses']), 8)
self.assertIn('name', resp.json[0]['licenses'][0])
self.assertGreater(len(resp.json[0]['licenses'][0]['name']), 0)
self.assertIn('name', resp.json[0]['licenses'][1])
self.assertGreater(len(resp.json[0]['licenses'][1]['name']), 0)
# Get current settings
resp = self.request(path='/item/licenses', user=self.user)
self.assertStatusOk(resp)
self.assertGreater(len(resp.json), 1)
self.assertIn('category', resp.json[0])
self.assertIn('licenses', resp.json[0])
self.assertGreater(len(resp.json[0]['licenses']), 8)
self.assertIn('name', resp.json[0]['licenses'][0])
self.assertGreater(len(resp.json[0]['licenses'][0]['name']), 0)
self.assertIn('name', resp.json[0]['licenses'][1])
self.assertGreater(len(resp.json[0]['licenses'][1]['name']), 0)
# Change licenses
Setting().set(
PluginSettings.LICENSES,
[{'category': 'A', 'licenses': [{'name': '1'}]},
{'category': 'B', 'licenses': [{'name': '2'}, {'name': '3'}]}])
# Get default settings after changing licenses
resp = self.request(path='/item/licenses', user=self.user, params={
'default': True
})
self.assertStatusOk(resp)
self.assertStatusOk(resp)
self.assertGreater(len(resp.json), 1)
self.assertIn('category', resp.json[0])
self.assertIn('licenses', resp.json[0])
self.assertGreater(len(resp.json[0]['licenses']), 8)
self.assertIn('name', resp.json[0]['licenses'][0])
self.assertGreater(len(resp.json[0]['licenses'][0]['name']), 0)
self.assertIn('name', resp.json[0]['licenses'][1])
self.assertGreater(len(resp.json[0]['licenses'][1]['name']), 0)
# Get current settings after changing licenses
resp = self.request(path='/item/licenses', user=self.user)
self.assertStatusOk(resp)
six.assertCountEqual(
self, resp.json,
[{'category': 'A', 'licenses': [{'name': '1'}]},
{'category': 'B', 'licenses': [{'name': '2'}, {'name': '3'}]}])
def testLicensesSettingValidation(self):
"""
Test validation of licenses setting.
"""
# Test valid settings
Setting().set(
PluginSettings.LICENSES,
[])
Setting().set(
PluginSettings.LICENSES,
[{'category': 'A', 'licenses': []}])
Setting().set(
PluginSettings.LICENSES,
[{'category': 'A', 'licenses': [{'name': '1'}]}])
Setting().set(
PluginSettings.LICENSES,
[{'category': 'A', 'licenses': [{'name': '1'}, {'name': '2'}]}])
Setting().set(
PluginSettings.LICENSES,
[{'category': 'A', 'licenses': []},
{'category': 'B', 'licenses': [{'name': '1'}]}])
Setting().set(
PluginSettings.LICENSES,
[{'category': 'A', 'licenses': []},
{'category': 'B', 'licenses': [{'name': '1'}, {'name': '2'}]}])
# Test invalid top-level types
for val in (None, 1, '', {}, [{}]):
self.assertRaises(ValidationException, Setting().set, PluginSettings.LICENSES, val)
# Test invalid category types
for category, licenses in ((None, []), (1, []), ('', []), ({}, [])):
self.assertRaises(
ValidationException,
Setting().set,
PluginSettings.LICENSES,
[{'category': category, 'licenses': licenses}])
# Test invalid licenses types
for val in (None, {}, [1], ['']):
self.assertRaises(
ValidationException,
Setting().set,
PluginSettings.LICENSES,
[{'category': 'A', 'licenses': val}])
# Test invalid license names
for val in (None, 1, '', {}, []):
self.assertRaises(
ValidationException,
Setting().set,
PluginSettings.LICENSES,
[{'category': 'A', 'licenses': [{'name': val}]}])
|
|
#!/usr/bin/env python
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# This script will run regression test for packages which are added as required package by other packages
# Regression test ensures backword compatibility with released dependent package versions
import argparse
import glob
import sys
import os
import logging
from common_tasks import (
process_glob_string,
parse_setup,
run_check_call,
parse_require,
install_package_from_whl,
filter_dev_requirements,
find_packages_missing_on_pypi,
find_whl,
find_tools_packages,
get_installed_packages,
extend_dev_requirements,
str_to_bool,
)
from git_helper import (
get_release_tag,
git_checkout_tag,
git_checkout_branch,
clone_repo,
)
AZURE_GLOB_STRING = "azure*"
root_dir = os.path.abspath(os.path.join(os.path.abspath(__file__), "..", "..", ".."))
test_tools_req_file = os.path.abspath(os.path.join(root_dir, "eng", "regression_test_tools.txt"))
GIT_REPO_NAME = "azure-sdk-for-python"
GIT_MASTER_BRANCH = "main"
VENV_NAME = "regressionenv"
AZURE_SDK_FOR_PYTHON_GIT_URL = "https://github.com/Azure/azure-sdk-for-python.git"
TEMP_FOLDER_NAME = ".tmp_code_path"
OLDEST_EXTENSION_PKGS = ["msrestazure", "adal"]
logging.getLogger().setLevel(logging.INFO)
class CustomVirtualEnv:
def __init__(self, path):
self.path = os.path.join(path, VENV_NAME)
def create(self):
logging.info("Creating virtual environment [{}]".format(self.path))
run_check_call([sys.executable, "-m", "venv", "ENV_DIR", self.path], root_dir)
self.python_executable = self._find_python_executable()
self.lib_paths = self._find_lib_paths()
def clear_venv(self):
# clear any previously installed packages
run_check_call([sys.executable, "-m", "venv", "--clear", "ENV_DIR", self.path], root_dir)
def _find_python_executable(self):
paths = glob.glob(os.path.join(self.path, "*", "python")) + glob.glob(
os.path.join(self.path, "*", "python.exe")
)
if not paths:
logging.error("Failed to find path to python executable in virtual env:{}".format(self.path))
sys.exit(1)
return paths[0]
def _find_lib_paths(self):
paths = glob.glob(os.path.join(self.path, "*", "site-packages")) + glob.glob(
os.path.join(self.path, "lib", "*", "site-packages")
)
if not paths:
logging.error("Failed to find site-packages directory in virtual env:{}".format(self.path))
sys.exit(1)
return paths
class RegressionContext:
def __init__(self, whl_dir, tmp_path, is_latest, pytest_mark_arg):
self.whl_directory = whl_dir
self.temp_path = tmp_path
self.is_latest_depend_test = is_latest
self.venv = CustomVirtualEnv(self.temp_path)
self.pytest_mark_arg = pytest_mark_arg
self.venv.create()
def init_for_pkg(self, pkg_root):
# This method is called each time context is switched to test regression for new package
self.package_root_path = pkg_root
self.package_name, self.pkg_version, _, _ = parse_setup(self.package_root_path)
def initialize(self, dep_pkg_root_path):
self.dep_pkg_root_path = dep_pkg_root_path
self.venv.clear_venv()
def deinitialize(self, dep_pkg_root_path):
# This function can be used to reset code repo to master branch
# Revert to master branch
run_check_call(["git", "clean", "-fd"], dep_pkg_root_path)
run_check_call(["git", "checkout", GIT_MASTER_BRANCH], dep_pkg_root_path)
class RegressionTest:
def __init__(self, context, package_dependency_dict):
self.context = context
self.package_dependency_dict = package_dependency_dict
def run(self):
pkg_name = self.context.package_name
if pkg_name in self.package_dependency_dict:
logging.info("Running regression test for {}".format(pkg_name))
self.whl_path = os.path.join(
self.context.whl_directory,
find_whl(pkg_name, self.context.pkg_version, self.context.whl_directory),
)
if find_packages_missing_on_pypi(self.whl_path):
logging.error("Required packages are not available on PyPI. Skipping regression test")
exit(0)
dep_packages = self.package_dependency_dict[pkg_name]
logging.info("Dependent packages for [{0}]: {1}".format(pkg_name, dep_packages))
for dep_pkg_path in dep_packages:
dep_pkg_name, _, _, _ = parse_setup(dep_pkg_path)
logging.info("Starting regression test of {0} against released {1}".format(pkg_name, dep_pkg_name))
self._run_test(dep_pkg_path)
logging.info("Completed regression test of {0} against released {1}".format(pkg_name, dep_pkg_name))
logging.info("Completed regression test for {}".format(pkg_name))
else:
logging.info("Package {} is not added as required by any package".format(pkg_name))
def _run_test(self, dep_pkg_path):
self.context.initialize(dep_pkg_path)
# find GA released tags for package and run test using that code base
dep_pkg_name, version, _, _ = parse_setup(dep_pkg_path)
release_tag = get_release_tag(dep_pkg_name, self.context.is_latest_depend_test)
if not release_tag:
logging.error("Release tag is not available. Skipping package {} from test".format(dep_pkg_name))
return
test_branch_name = "{0}_tests".format(release_tag)
try:
git_checkout_branch(test_branch_name, dep_pkg_path)
except:
# If git checkout failed for "tests" branch then checkout branch with release tag
logging.info("Failed to checkout branch {}. Checking out release tagged git repo".format(test_branch_name))
git_checkout_tag(release_tag, dep_pkg_path)
try:
# install packages required to run tests
run_check_call(
[
self.context.venv.python_executable,
"-m",
"pip",
"install",
"-r",
test_tools_req_file,
"--extra-index-url",
"https://pypi.org/simple",
],
dep_pkg_path,
)
# Install pre-built whl for current package.
install_package_from_whl(
self.whl_path,
self.context.temp_path,
self.context.venv.python_executable,
)
# install dependent package from source
self._install_packages(dep_pkg_path, self.context.package_name)
# try install of pre-built whl for current package again. if unnecessary, pip does nothing.
# we do this to ensure that the correct development version is installed. on non-dev builds
# this step will just skip through.
install_package_from_whl(
self.whl_path,
self.context.temp_path,
self.context.venv.python_executable,
)
self._execute_test(dep_pkg_path)
finally:
self.context.deinitialize(dep_pkg_path)
def _execute_test(self, dep_pkg_path):
# Ensure correct version of package is installed
if not self._is_package_installed(self.context.package_name, self.context.pkg_version):
logging.error(
"Incorrect version of package {0} is installed. Expected version {1}".format(
self.context.package_name, self.context.pkg_version
)
)
sys.exit(1)
logging.info("Running test for {}".format(dep_pkg_path))
commands = [
self.context.venv.python_executable,
"-m",
"pytest",
"--verbose",
"--durations",
"10",
]
# add any pytest mark arg if present. for e.g. 'not cosmosEmulator'
if self.context.pytest_mark_arg:
commands.extend(["-m", self.context.pytest_mark_arg])
test_dir = self._get_package_test_dir(dep_pkg_path)
if test_dir:
commands.append(test_dir)
run_check_call(commands, self.context.temp_path)
else:
logging.info(
"Test directory is not found in package root. Skipping {} from regression test.".format(
self.context.package_name
)
)
def _get_package_test_dir(self, pkg_root_path):
# Returns path to test or tests folder within package root directory.
paths = glob.glob(os.path.join(pkg_root_path, "test")) + glob.glob(os.path.join(pkg_root_path, "tests"))
if not paths:
# We will run into this situation only if test and tests are missing in repo.
# For now, running test for package repo itself to keep it same as regular CI in such cases
logging.error("'test' folder is not found in {}".format(pkg_root_path))
return
return paths[0]
def _install_packages(self, dependent_pkg_path, pkg_to_exclude):
python_executable = self.context.venv.python_executable
working_dir = self.context.package_root_path
temp_dir = self.context.temp_path
list_to_exclude = [pkg_to_exclude, "azure-sdk-tools", "azure-devtools"]
installed_pkgs = [
p.split("==")[0] for p in get_installed_packages(self.context.venv.lib_paths) if p.startswith("azure-")
]
logging.info("Installed azure sdk packages:{}".format(installed_pkgs))
# Do not exclude list of packages in tools directory and so these tools packages will be reinstalled from repo branch we are testing
root_path = os.path.abspath(os.path.join(dependent_pkg_path, "..", "..", ".."))
tools_packages = find_tools_packages(root_path)
installed_pkgs = [req for req in installed_pkgs if req not in tools_packages]
list_to_exclude.extend(installed_pkgs)
# install dev requirement but skip already installed package which is being tested or present in dev requirement
filtered_dev_req_path = filter_dev_requirements(dependent_pkg_path, list_to_exclude, dependent_pkg_path)
# early versions of azure-sdk-tools had an unpinned version of azure-mgmt packages.
# that unpinned version hits an a code path in azure-sdk-tools that hits this error.
if filtered_dev_req_path and self.context.is_latest_depend_test == False:
logging.info("Extending dev requirements with {}".format(OLDEST_EXTENSION_PKGS))
extend_dev_requirements(filtered_dev_req_path, OLDEST_EXTENSION_PKGS)
else:
logging.info(
"Not extending dev requirements {} {}".format(filtered_dev_req_path, self.context.is_latest_depend_test)
)
if filtered_dev_req_path:
logging.info("Extending dev requirement to include azure-sdk-tools")
extend_dev_requirements(
filtered_dev_req_path,
["../../../tools/azure-sdk-tools", "../../../tools/azure-devtools"],
)
logging.info("Installing filtered dev requirements from {}".format(filtered_dev_req_path))
run_check_call(
[
python_executable,
"-m",
"pip",
"install",
"-r",
filtered_dev_req_path,
"--extra-index-url",
"https://pypi.org/simple",
],
dependent_pkg_path,
)
else:
logging.info("dev requirements is not found to install")
# install dependent package which is being verified
run_check_call([python_executable, "-m", "pip", "install", dependent_pkg_path, "--extra-index-url", "https://pypi.org/simple"], temp_dir)
def _is_package_installed(self, package, version):
# find env root and pacakge locations
venv_root = self.context.venv.path
site_packages = self.context.venv.lib_paths
logging.info("Searching for packages in :{}".format(site_packages))
installed_pkgs = get_installed_packages(site_packages)
logging.info("Installed packages: {}".format(installed_pkgs))
# Verify installed package version
# Search for exact version or alpha build version of current version.
pkg_search_string = "{0}=={1}".format(package, version)
alpha_build_search_string = "{0}=={1}a".format(package, version)
return any(p == pkg_search_string or p.startswith(alpha_build_search_string) for p in installed_pkgs)
# This method identifies package dependency map for all packages in azure sdk
def find_package_dependency(glob_string, repo_root_dir):
package_paths = process_glob_string(glob_string, repo_root_dir, "", "Regression")
dependency_map = {}
for pkg_root in package_paths:
_, _, _, requires = parse_setup(pkg_root)
# Get a list of package names from install requires
required_pkgs = [parse_require(r)[0] for r in requires]
required_pkgs = [p for p in required_pkgs if p.startswith("azure")]
for req_pkg in required_pkgs:
if req_pkg not in dependency_map:
dependency_map[req_pkg] = []
dependency_map[req_pkg].append(pkg_root)
logging.info("Package dependency: {}".format(dependency_map))
return dependency_map
# This is the main function which identifies packages to test, find dependency matrix and trigger test
def run_main(args):
temp_dir = ""
if args.temp_dir:
temp_dir = args.temp_dir
else:
temp_dir = os.path.abspath(os.path.join(root_dir, "..", TEMP_FOLDER_NAME))
code_repo_root = os.path.join(temp_dir, GIT_REPO_NAME)
# Make sure root_dir where script is running is not same as code repo which will be reverted to old released branch to run test
if root_dir == code_repo_root:
logging.error(
"Invalid path to clone github code repo. Temporary path can not be same as current source root directory"
)
exit(1)
# Make sure temp path exists
if not os.path.exists(temp_dir):
os.mkdir(temp_dir)
if args.service:
service_dir = os.path.join("sdk", args.service)
target_dir = os.path.join(root_dir, service_dir)
else:
target_dir = root_dir
targeted_packages = process_glob_string(args.glob_string, target_dir, "", "Regression")
if len(targeted_packages) == 0:
exit(0)
# clone code repo only if it doesn't exist
if not os.path.exists(code_repo_root):
clone_repo(temp_dir, AZURE_SDK_FOR_PYTHON_GIT_URL)
else:
logging.info("Path {} already exists. Skipping step to clone github repo".format(code_repo_root))
# find package dependency map for azure sdk
pkg_dependency = find_package_dependency(AZURE_GLOB_STRING, code_repo_root)
# Create regression text context. One context object will be reused for all packages
context = RegressionContext(args.whl_dir, temp_dir, str_to_bool(args.verify_latest), args.mark_arg)
for pkg_path in targeted_packages:
context.init_for_pkg(pkg_path)
RegressionTest(context, pkg_dependency).run()
logging.info("Regression test is completed successfully")
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Run regression test for a package against released dependent packages"
)
parser.add_argument(
"glob_string",
nargs="?",
help=(
"A comma separated list of glob strings that will target the top level directories that contain packages."
'Examples: All = "azure*", Single = "azure-keyvault", Targeted Multiple = "azure-keyvault,azure-mgmt-resource"'
),
)
parser.add_argument(
"--service",
help=("Name of service directory (under sdk/) to test." "Example: --service applicationinsights"),
)
parser.add_argument(
"--whl-dir",
required=True,
help=("Directory in which whl is pre built for all eligible package"),
)
parser.add_argument(
"--verify-latest",
default=True,
help=(
"Set this parameter to true to verify regression against latest released version."
"Default behavior is to test regression for oldest released version of dependent packages"
),
)
parser.add_argument(
"--temp-dir",
help=(
"Temporary path to clone github repo of azure-sdk-for-python to run tests. Any changes in this path will be overwritten"
),
)
parser.add_argument(
"--mark-arg",
dest="mark_arg",
help=(
'The complete argument for `pytest -m "<input>"`. This can be used to exclude or include specific pytest markers.'
'--mark_arg="not cosmosEmulator"'
),
)
args = parser.parse_args()
run_main(args)
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 Nicira Networks, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# @author: Somik Behera, Nicira Networks, Inc.
# @author: Brad Hall, Nicira Networks, Inc.
# @author: Dan Wendlandt, Nicira Networks, Inc.
# @author: Dave Lapsley, Nicira Networks, Inc.
import re
from neutron.agent.linux import ip_lib
from neutron.agent.linux import utils
from neutron.openstack.common import jsonutils
from neutron.openstack.common import log as logging
from neutron.plugins.common import constants as p_const
# TODO(JLH) Should we remove the explicit include of the ovs plugin here
from neutron.plugins.openvswitch.common import constants
LOG = logging.getLogger(__name__)
class VifPort:
def __init__(self, port_name, ofport, vif_id, vif_mac, switch):
self.port_name = port_name
self.ofport = ofport
self.vif_id = vif_id
self.vif_mac = vif_mac
self.switch = switch
def __str__(self):
return ("iface-id=" + self.vif_id + ", vif_mac=" +
self.vif_mac + ", port_name=" + self.port_name +
", ofport=" + str(self.ofport) + ", bridge_name =" +
self.switch.br_name)
class BaseOVS(object):
def __init__(self, root_helper):
self.root_helper = root_helper
def run_vsctl(self, args, check_error=False):
full_args = ["ovs-vsctl", "--timeout=2"] + args
try:
return utils.execute(full_args, root_helper=self.root_helper)
except Exception as e:
LOG.error(_("Unable to execute %(cmd)s. Exception: %(exception)s"),
{'cmd': full_args, 'exception': e})
if check_error:
raise
def add_bridge(self, bridge_name):
self.run_vsctl(["--", "--may-exist", "add-br", bridge_name])
return OVSBridge(bridge_name, self.root_helper)
def delete_bridge(self, bridge_name):
self.run_vsctl(["--", "--if-exists", "del-br", bridge_name])
def bridge_exists(self, bridge_name):
try:
self.run_vsctl(['br-exists', bridge_name], check_error=True)
except RuntimeError as e:
if 'Exit code: 2\n' in str(e):
return False
raise
return True
def get_bridge_name_for_port_name(self, port_name):
try:
return self.run_vsctl(['port-to-br', port_name], check_error=True)
except RuntimeError as e:
if 'Exit code: 1\n' not in str(e):
raise
def port_exists(self, port_name):
return bool(self.get_bridge_name_for_port_name(port_name))
class OVSBridge(BaseOVS):
def __init__(self, br_name, root_helper):
super(OVSBridge, self).__init__(root_helper)
self.br_name = br_name
self.re_id = self.re_compile_id()
self.defer_apply_flows = False
self.deferred_flows = {'add': '', 'mod': '', 'del': ''}
def re_compile_id(self):
external = 'external_ids\s*'
mac = 'attached-mac="(?P<vif_mac>([a-fA-F\d]{2}:){5}([a-fA-F\d]{2}))"'
iface = 'iface-id="(?P<vif_id>[^"]+)"'
name = 'name\s*:\s"(?P<port_name>[^"]*)"'
port = 'ofport\s*:\s(?P<ofport>-?\d+)'
_re = ('%(external)s:\s{ ( %(mac)s,? | %(iface)s,? | . )* }'
' \s+ %(name)s \s+ %(port)s' % {'external': external,
'mac': mac,
'iface': iface, 'name': name,
'port': port})
return re.compile(_re, re.M | re.X)
def create(self):
self.add_bridge(self.br_name)
def destroy(self):
self.delete_bridge(self.br_name)
def reset_bridge(self):
self.destroy()
self.create()
def add_port(self, port_name):
self.run_vsctl(["--", "--may-exist", "add-port", self.br_name,
port_name])
return self.get_port_ofport(port_name)
def delete_port(self, port_name):
self.run_vsctl(["--", "--if-exists", "del-port", self.br_name,
port_name])
def set_db_attribute(self, table_name, record, column, value):
args = ["set", table_name, record, "%s=%s" % (column, value)]
self.run_vsctl(args)
def clear_db_attribute(self, table_name, record, column):
args = ["clear", table_name, record, column]
self.run_vsctl(args)
def run_ofctl(self, cmd, args, process_input=None):
full_args = ["ovs-ofctl", cmd, self.br_name] + args
try:
return utils.execute(full_args, root_helper=self.root_helper,
process_input=process_input)
except Exception as e:
LOG.error(_("Unable to execute %(cmd)s. Exception: %(exception)s"),
{'cmd': full_args, 'exception': e})
def count_flows(self):
flow_list = self.run_ofctl("dump-flows", []).split("\n")[1:]
return len(flow_list) - 1
def remove_all_flows(self):
self.run_ofctl("del-flows", [])
def get_port_ofport(self, port_name):
return self.db_get_val("Interface", port_name, "ofport")
def get_datapath_id(self):
return self.db_get_val('Bridge',
self.br_name, 'datapath_id').strip('"')
def _build_flow_expr_arr(self, **kwargs):
flow_expr_arr = []
is_delete_expr = kwargs.get('delete', False)
if not is_delete_expr:
prefix = ("hard_timeout=%s,idle_timeout=%s,priority=%s" %
(kwargs.get('hard_timeout', '0'),
kwargs.get('idle_timeout', '0'),
kwargs.get('priority', '1')))
flow_expr_arr.append(prefix)
elif 'priority' in kwargs:
raise Exception(_("Cannot match priority on flow deletion"))
table = ('table' in kwargs and ",table=%s" %
kwargs['table'] or '')
in_port = ('in_port' in kwargs and ",in_port=%s" %
kwargs['in_port'] or '')
dl_type = ('dl_type' in kwargs and ",dl_type=%s" %
kwargs['dl_type'] or '')
dl_vlan = ('dl_vlan' in kwargs and ",dl_vlan=%s" %
kwargs['dl_vlan'] or '')
dl_src = 'dl_src' in kwargs and ",dl_src=%s" % kwargs['dl_src'] or ''
dl_dst = 'dl_dst' in kwargs and ",dl_dst=%s" % kwargs['dl_dst'] or ''
nw_src = 'nw_src' in kwargs and ",nw_src=%s" % kwargs['nw_src'] or ''
nw_dst = 'nw_dst' in kwargs and ",nw_dst=%s" % kwargs['nw_dst'] or ''
tun_id = 'tun_id' in kwargs and ",tun_id=%s" % kwargs['tun_id'] or ''
proto = 'proto' in kwargs and ",%s" % kwargs['proto'] or ''
ip = ('nw_src' in kwargs or 'nw_dst' in kwargs) and ',ip' or ''
match = (table + in_port + dl_type + dl_vlan + dl_src + dl_dst +
(proto or ip) + nw_src + nw_dst + tun_id)
if match:
match = match[1:] # strip leading comma
flow_expr_arr.append(match)
return flow_expr_arr
def add_or_mod_flow_str(self, **kwargs):
if "actions" not in kwargs:
raise Exception(_("Must specify one or more actions"))
if "priority" not in kwargs:
kwargs["priority"] = "0"
flow_expr_arr = self._build_flow_expr_arr(**kwargs)
flow_expr_arr.append("actions=%s" % (kwargs["actions"]))
flow_str = ",".join(flow_expr_arr)
return flow_str
def add_flow(self, **kwargs):
flow_str = self.add_or_mod_flow_str(**kwargs)
if self.defer_apply_flows:
self.deferred_flows['add'] += flow_str + '\n'
else:
self.run_ofctl("add-flow", [flow_str])
def mod_flow(self, **kwargs):
flow_str = self.add_or_mod_flow_str(**kwargs)
if self.defer_apply_flows:
self.deferred_flows['mod'] += flow_str + '\n'
else:
self.run_ofctl("mod-flows", [flow_str])
def delete_flows(self, **kwargs):
kwargs['delete'] = True
flow_expr_arr = self._build_flow_expr_arr(**kwargs)
if "actions" in kwargs:
flow_expr_arr.append("actions=%s" % (kwargs["actions"]))
flow_str = ",".join(flow_expr_arr)
if self.defer_apply_flows:
self.deferred_flows['del'] += flow_str + '\n'
else:
self.run_ofctl("del-flows", [flow_str])
def defer_apply_on(self):
LOG.debug(_('defer_apply_on'))
self.defer_apply_flows = True
def defer_apply_off(self):
LOG.debug(_('defer_apply_off'))
for action, flows in self.deferred_flows.items():
if flows:
LOG.debug(_('Applying following deferred flows '
'to bridge %s'), self.br_name)
for line in flows.splitlines():
LOG.debug(_('%(action)s: %(flow)s'),
{'action': action, 'flow': line})
self.run_ofctl('%s-flows' % action, ['-'], flows)
self.defer_apply_flows = False
self.deferred_flows = {'add': '', 'mod': '', 'del': ''}
def add_tunnel_port(self, port_name, remote_ip, local_ip,
tunnel_type=p_const.TYPE_GRE,
vxlan_udp_port=constants.VXLAN_UDP_PORT):
vsctl_command = ["--", "--may-exist", "add-port", self.br_name,
port_name]
vsctl_command.extend(["--", "set", "Interface", port_name,
"type=%s" % tunnel_type])
if tunnel_type == p_const.TYPE_VXLAN:
# Only set the VXLAN UDP port if it's not the default
if vxlan_udp_port != constants.VXLAN_UDP_PORT:
vsctl_command.append("options:dst_port=%s" % vxlan_udp_port)
vsctl_command.extend(["options:remote_ip=%s" % remote_ip,
"options:local_ip=%s" % local_ip,
"options:in_key=flow",
"options:out_key=flow"])
self.run_vsctl(vsctl_command)
return self.get_port_ofport(port_name)
def add_patch_port(self, local_name, remote_name):
self.run_vsctl(["add-port", self.br_name, local_name,
"--", "set", "Interface", local_name,
"type=patch", "options:peer=%s" % remote_name])
return self.get_port_ofport(local_name)
def db_get_map(self, table, record, column):
output = self.run_vsctl(["get", table, record, column])
if output:
output_str = output.rstrip("\n\r")
return self.db_str_to_map(output_str)
return {}
def db_get_val(self, table, record, column):
output = self.run_vsctl(["get", table, record, column])
if output:
return output.rstrip("\n\r")
def db_str_to_map(self, full_str):
list = full_str.strip("{}").split(", ")
ret = {}
for e in list:
if e.find("=") == -1:
continue
arr = e.split("=")
ret[arr[0]] = arr[1].strip("\"")
return ret
def get_port_name_list(self):
res = self.run_vsctl(["list-ports", self.br_name])
if res:
return res.strip().split("\n")
return []
def get_port_stats(self, port_name):
return self.db_get_map("Interface", port_name, "statistics")
def get_xapi_iface_id(self, xs_vif_uuid):
args = ["xe", "vif-param-get", "param-name=other-config",
"param-key=nicira-iface-id", "uuid=%s" % xs_vif_uuid]
try:
return utils.execute(args, root_helper=self.root_helper).strip()
except Exception as e:
LOG.error(_("Unable to execute %(cmd)s. Exception: %(exception)s"),
{'cmd': args, 'exception': e})
# returns a VIF object for each VIF port
def get_vif_ports(self):
edge_ports = []
port_names = self.get_port_name_list()
for name in port_names:
external_ids = self.db_get_map("Interface", name, "external_ids")
ofport = self.db_get_val("Interface", name, "ofport")
if "iface-id" in external_ids and "attached-mac" in external_ids:
p = VifPort(name, ofport, external_ids["iface-id"],
external_ids["attached-mac"], self)
edge_ports.append(p)
elif ("xs-vif-uuid" in external_ids and
"attached-mac" in external_ids):
# if this is a xenserver and iface-id is not automatically
# synced to OVS from XAPI, we grab it from XAPI directly
iface_id = self.get_xapi_iface_id(external_ids["xs-vif-uuid"])
p = VifPort(name, ofport, iface_id,
external_ids["attached-mac"], self)
edge_ports.append(p)
return edge_ports
def get_vif_port_set(self):
port_names = self.get_port_name_list()
edge_ports = set()
args = ['--format=json', '--', '--columns=name,external_ids',
'list', 'Interface']
result = self.run_vsctl(args)
if not result:
return edge_ports
for row in jsonutils.loads(result)['data']:
name = row[0]
if name not in port_names:
continue
external_ids = dict(row[1][1])
if "iface-id" in external_ids and "attached-mac" in external_ids:
edge_ports.add(external_ids['iface-id'])
elif ("xs-vif-uuid" in external_ids and
"attached-mac" in external_ids):
# if this is a xenserver and iface-id is not automatically
# synced to OVS from XAPI, we grab it from XAPI directly
iface_id = self.get_xapi_iface_id(external_ids["xs-vif-uuid"])
edge_ports.add(iface_id)
return edge_ports
def get_vif_port_by_id(self, port_id):
args = ['--', '--columns=external_ids,name,ofport',
'find', 'Interface',
'external_ids:iface-id="%s"' % port_id]
result = self.run_vsctl(args)
if not result:
return
match = self.re_id.search(result)
try:
vif_mac = match.group('vif_mac')
vif_id = match.group('vif_id')
port_name = match.group('port_name')
ofport = int(match.group('ofport'))
return VifPort(port_name, ofport, vif_id, vif_mac, self)
except Exception as e:
LOG.info(_("Unable to parse regex results. Exception: %s"), e)
return
def delete_ports(self, all_ports=False):
if all_ports:
port_names = self.get_port_name_list()
else:
port_names = (port.port_name for port in self.get_vif_ports())
for port_name in port_names:
self.delete_port(port_name)
def get_local_port_mac(self):
"""Retrieve the mac of the bridge's local port."""
address = ip_lib.IPDevice(self.br_name, self.root_helper).link.address
if address:
return address
else:
msg = _('Unable to determine mac address for %s') % self.br_name
raise Exception(msg)
def get_bridge_for_iface(root_helper, iface):
args = ["ovs-vsctl", "--timeout=2", "iface-to-br", iface]
try:
return utils.execute(args, root_helper=root_helper).strip()
except Exception:
LOG.exception(_("Interface %s not found."), iface)
return None
def get_bridges(root_helper):
args = ["ovs-vsctl", "--timeout=2", "list-br"]
try:
return utils.execute(args, root_helper=root_helper).strip().split("\n")
except Exception as e:
LOG.exception(_("Unable to retrieve bridges. Exception: %s"), e)
return []
def get_installed_ovs_usr_version(root_helper):
args = ["ovs-vsctl", "--version"]
try:
cmd = utils.execute(args, root_helper=root_helper)
ver = re.findall("\d+\.\d+", cmd)[0]
return ver
except Exception:
LOG.exception(_("Unable to retrieve OVS userspace version."))
def get_installed_ovs_klm_version():
args = ["modinfo", "openvswitch"]
try:
cmd = utils.execute(args)
for line in cmd.split('\n'):
if 'version: ' in line and not 'srcversion' in line:
ver = re.findall("\d+\.\d+", line)
return ver[0]
except Exception:
LOG.exception(_("Unable to retrieve OVS kernel module version."))
def get_bridge_external_bridge_id(root_helper, bridge):
args = ["ovs-vsctl", "--timeout=2", "br-get-external-id",
bridge, "bridge-id"]
try:
return utils.execute(args, root_helper=root_helper).strip()
except Exception:
LOG.exception(_("Bridge %s not found."), bridge)
return None
|
|
import numpy as np
import pandas as pd
from scipy import integrate, optimize, special, stats
import statsmodels as sm
from . import results
class Distribution:
@staticmethod
def _clean_data(series):
"""Remove any non-positive, null, or NAN observations."""
return series[series > 0].dropna().sort_values()
@staticmethod
def _density_function(x, *args, **kwargs):
raise NotImplementedError
@staticmethod
def _ecdf(series):
"""Empirical cumulative distribution function (ECDF)."""
return sm.distributions.ECDF(series)
@staticmethod
def _normalization_constant(xmin, *args, **kwargs):
raise NotImplementedError
@staticmethod
def _quantile(x, xmin, *args, **kwargs):
"""Inverse of the cumulative distibution function (CDF)."""
raise NotImplementedError
@classmethod
def _log_likelihood(cls, x, xmin, *args, **kwargs):
"""Pointwise log-likelihood function."""
return np.log(cls._pdf(x, xmin, *args, **kwargs))
@classmethod
def _mle_objective(cls, params, xmin, series, **kwargs):
"""Objective function for maximum likelihood estimation.."""
log_likelihood = cls._log_likelihood(series, xmin, *params, **kwargs)
return -log_likelihood.sum()
@classmethod
def _pdf(cls, x, xmin, *args, **params):
"""Probability density function (pdf)."""
C = cls._normalization_constant(xmin, *args, **params)
f = cls._density_function(x, *args, **params)
return C * f
@classmethod
def _rvs(cls, random_state, N, xmin, *args, **kwargs):
sample = stats.uniform.rvs(0, 1, size=N, random_state=random_state)
return cls._quantile(sample, xmin, *args, **kwargs)
@classmethod
def _generate_synthetic_data(cls, prng, clean_data, result):
N = clean_data.count()
body = clean_data[clean_data < result.xmin]
body_prob = body.count() / N
successes = stats.binom.rvs(1, body_prob, size=N, random_state=prng)
n_body = np.sum(successes)
body_sample = prng.choice(body, n_body)
n_tail = N - n_body
tail_sample = cls._rvs(prng, n_tail, result.xmin, **result.params)
synthetic_data = pd.Series(np.hstack((body_sample, tail_sample)),
index=clean_data.index)
return synthetic_data
class Exponential(Distribution):
@classmethod
def fit(cls, data, xmin):
"""Fit Exponential distribution to data using maximum likelihood."""
cleaned_data = cls._clean_data(data)
tail_data = cleaned_data[cleaned_data >= xmin]
gamma_hat = 1 / np.mean(tail_data - xmin)
# create the FitResult object...
n_tail = tail_data.count()
gamma_se = gamma_hat / n_tail
log_likelihood = cls._log_likelihood(tail_data.values, xmin, gamma_hat)
result_kwargs = {'params': {'gamma': gamma_hat}, 'n_tail': n_tail,
'standard_errors': {'gamma': gamma_se},
'log_likelihood': log_likelihood, 'D': None}
result = results.FitResult(**result_kwargs)
return result
@classmethod
def _cdf(cls, x, xmin, gamma):
"""Cumulative distribution function for Exponential distribution."""
return np.exp(gamma * xmin) * (np.exp(-gamma * xmin) - np.exp(-gamma * x))
@staticmethod
def _density_function(x, gamma):
"""Density function for the Exponential distribution."""
return np.exp(-gamma * x)
@staticmethod
def _normalization_constant(xmin, gamma):
"""Normalization constant for the Exponential distribution."""
return gamma * np.exp(gamma * xmin)
class LogNormal(Distribution):
@classmethod
def fit(cls, data, xmin, initial_guess, method, solver_options):
result = optimize.minimize(cls._objective, initial_guess, (xmin, data),
method, **solver_options)
return result
@classmethod
def _cdf(cls, x, xmin, mu, sigma):
return integrate.quad(cls._pdf, xmin, x, args=(xmin, mu, sigma))
@staticmethod
def _density_function(x, mu, sigma):
return (1 / x) * np.exp(-((np.log(x) - mu)**2 / (2 * sigma**2)))
@staticmethod
def _normalization_constant(xmin, mu, sigma):
return (2 / (np.pi * sigma**2))**0.5 * (special.erfc((np.log(xmin) - mu) / (2**0.5 * sigma)))**-1
class Pareto(Distribution):
@classmethod
def fit(cls, data, xmin, quantile=0.95, discrete=False,
approx=False, method='bounded', solver_opts=None):
clean_data = cls._clean_data(data)
if xmin is None:
solver_opts = {} if solver_opts is None else solver_opts
idxs = clean_data < clean_data.quantile(quantile)
candidate_xmins = clean_data[idxs].unique()
xmin, D = cls._find_optimal_xmin(candidate_xmins, clean_data,
discrete, approx, method,
solver_opts)
else:
D = cls._compute_ks_distance(xmin, clean_data, discrete, approx)
alpha_hat, tail_data = cls._fit_maximum_likelihood(xmin, clean_data,
discrete, approx)
# create the FitResult object...
n_tail = tail_data.count()
alpha_se = (alpha_hat - 1) / n_tail**0.5
log_likelihood = cls._log_likelihood(tail_data.values, xmin, alpha_hat)
fit_result_kwargs = {'params': {'alpha': alpha_hat}, 'xmin': xmin,
'D': D, 'n_tail': n_tail,
'standard_errors': {'alpha': alpha_se},
'log_likelihood': log_likelihood}
result = results.FitResult(**fit_result_kwargs)
return result
@classmethod
def test_goodness_of_fit(cls, seed, result, data, xmin=None,
quantile=0.99, discrete=False, approx=False,
method='brute', solver_opts=None,
replications=1000):
prng = np.random.RandomState(seed)
ks_distances = np.empty(replications)
clean_data = cls._clean_data(data)
for i in range(replications):
tmp_data = cls._generate_synthetic_data(prng, clean_data, result)
tmp_result = cls.fit(tmp_data, xmin, quantile, discrete, approx,
method)
ks_distances[i] = tmp_result.D
pvalue = ks_distances[ks_distances > result.D].mean()
return pvalue, ks_distances
@staticmethod
def _cdf(x, xmin, alpha):
"""Cumulative distribution function (CDF)."""
return 1 - (xmin / x)**(alpha - 1)
@staticmethod
def _density_function(x, alpha):
return x**-alpha
@classmethod
def _fit_maximum_likelihood(cls, xmin, clean_data, discrete, approx):
r"""
Fit a Pareto distribution to some data using maximum likelihood.
Notes
-----
For a given value of $x_{min}$, the maximum likelihood estimator for
the scaling exponent is
\begin{equation}\label{eq:plawMLE}
\hat{\alpha} = 1 + n\left[\sum_{i=1}^{n}\mathrm{ln}\ \left(\frac{x_{i}}{x_{min}}\right)\right]^{-1}.
\end{equation}
Equation \ref{eq:plawMLE}, is equivalent to the \cite{hill1975simple}
estimator, and has been shown to be asymptotically normal
\cite{hall1982some} and consistent \cite{mason1982laws}. The standard
error of $\hat{\alpha}$ is
\begin{equation}\label{eq:seplawMLE}
\sigma = \frac{\hat{\alpha} - 1}{\sqrt{n}} + \mathcal{O}\left(n^{-1}\right)
\end{equation}
"""
if discrete:
alpha_hat, tail_data = cls._mle_discrete(xmin, clean_data, approx)
else:
alpha_hat, tail_data = cls._mle_continuous(xmin, clean_data)
return alpha_hat, tail_data
@staticmethod
def _mle_continuous(xmin, clean_data):
"""Maximum likelihood estimator of the scaling exponent."""
tail_data = clean_data[clean_data >= xmin]
n = tail_data.count()
alpha_hat = 1 + n * (np.log(tail_data / xmin).sum())**-1
return alpha_hat, tail_data
@staticmethod
def _mle_discrete(xmin, clean_data, approx):
"""Maximum likelihood estimator of the scaling exponent."""
tail_data = clean_data[clean_data >= xmin]
n = tail_data.count()
if approx:
alpha_hat = 1 + n * (np.log(tail_data / (xmin - 0.5)).sum())**-1
else:
raise NotImplementedError
return alpha_hat, tail_data
@staticmethod
def _normalization_constant(xmin, alpha):
return (alpha - 1) * xmin**(alpha - 1)
@staticmethod
def _quantile(q, xmin, alpha):
"""Inverse of the cumulative distibution function (CDF)."""
return xmin / (1 - q)**(1 / (alpha - 1))
@classmethod
def _brute_force_minimize(cls, xmins, clean_data, discrete, approx):
Ds = [cls._compute_ks_distance(xmin, clean_data, discrete, approx) for xmin in xmins]
idx = np.argmin(Ds)
return xmins[idx], Ds[idx]
@classmethod
def _compute_ks_distance(cls, xmin, clean_data, discrete, approx):
"""Compute the Kolmogorov-Smirnov (KS) distance."""
alpha_hat, tail_data = cls._fit_maximum_likelihood(xmin, clean_data,
discrete, approx)
ecdf = cls._ecdf(tail_data)
cdf = cls._cdf(ecdf.x[1:], xmin, alpha_hat)
D = np.max(np.abs(ecdf.y[1:] - cdf))
return D
@classmethod
def _find_optimal_xmin(cls, xmins, clean_data, discrete, approx, method,
solver_opts):
"""Find optimal xmin by minimizing Kolmogorov-Smirnov (KS) distance."""
if method == 'brute':
xmin, D = cls._brute_force_minimize(xmins, clean_data, discrete,
approx)
elif method == 'bounded':
result = optimize.fminbound(cls._compute_ks_distance,
xmins.min(),
xmins.max(),
args=(clean_data, discrete, approx),
full_output=True,
**solver_opts)
xmin, D, _, _ = result
else:
raise ValueError
return xmin, D
class StretchedExponential(Distribution):
@classmethod
def fit(cls, data, xmin):
"""
Fit Stretched Exponential distribution to data using maximum
likelihood.
"""
raise NotImplementedError
@classmethod
def _cdf(cls, x, xmin, beta, gamma):
raise NotImplementedError
@staticmethod
def _density_function(x, beta, gamma):
return x**(beta - 1) * np.exp(-gamma * x**beta)
@staticmethod
def _normalization_constant(xmin, beta, gamma):
return beta * gamma * np.exp(gamma * xmin**beta)
|
|
import os
from os import environ as env
from django.conf import global_settings
import dj_database_url
import sentry_sdk
from sentry_sdk.integrations.django import DjangoIntegration
import sentry_sdk.utils
SENTRY_DSN = env.get("SENTRY_DSN", None)
if SENTRY_DSN is not None:
sentry_sdk.init(
dsn=SENTRY_DSN,
integrations=[DjangoIntegration()],
traces_sample_rate = 1.0,
)
sentry_sdk.utils.MAX_STRING_LENGTH = 8192
DEBUG = env.get('DJANGO_DEBUG', 'true') == 'true'
PROJECT_ROOT = os.path.dirname(os.path.realpath(__name__))
DATA_PATH = os.path.join(PROJECT_ROOT, "data")
ADMINS = (
('Adi Eyal', '[email protected]'),
)
MANAGERS = ADMINS
DATABASES = {
# load from DATABASE_URL env var, or default to this
'default': dj_database_url.config(default='sqlite:///mpr.db')
}
# Hosts/domain names that are valid for this site; required if DEBUG is False
# See https://docs.djangoproject.com/en/1.5/ref/settings/#allowed-hosts
ALLOWED_HOSTS = [
'code4sa-mpr.herokuapp.com',
'mpr.code4sa.org',
'mpr.openup.org.za',
'localhost',
'127.0.0.1',
'medicineprices.org.za'
]
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
TIME_ZONE = 'Africa/Johannesburg'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: '/var/www/example.com/media/'
MEDIA_ROOT = os.path.join(PROJECT_ROOT, 'media')
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: 'http://example.com/media/', 'http://media.example.com/'
MEDIA_URL = '/media/'
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' 'static/' subdirectories and in STATICFILES_DIRS.
# Example: '/var/www/example.com/static/'
STATIC_ROOT = os.path.join(PROJECT_ROOT, 'static')
# URL prefix for static files.
# Example: 'http://example.com/static/', 'http://static.example.com/'
STATIC_URL = '/static/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like '/home/html/static' or 'C:/www/django/static'.
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
os.path.join(PROJECT_ROOT, 'mpr', 'static'),
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
'pipeline.finders.PipelineFinder',
)
if DEBUG:
# Make this unique, and don't share it with anybody.
SECRET_KEY = 'ub7dk%m=d*k=aip1rl)z&v8sj&fg2msc&=km0z3u#5ct9+_43w'
else:
SECRET_KEY = env.get('DJANGO_SECRET_KEY')
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'mpr.context_processors.settings_context'
]
}
},
]
MIDDLEWARE = (
'mpr.middleware.CORSMiddleware',
'django.middleware.cache.UpdateCacheMiddleware',
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.cache.FetchFromCacheMiddleware',
# Uncomment the next line for simple clickjacking protection:
# 'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'mpr.urls'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'mpr.wsgi.application'
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.admin',
'django.contrib.sitemaps',
'pipeline',
'mpr',
'dataprocessing',
# 'behave_django',
)
STATICFILES_STORAGE = 'pipeline.storage.PipelineCachedStorage'
PIPELINE = {
'ENABLED': env.get("DJANGO_PIPELINE_ENABLED", True),
'JAVASCRIPT': {
'mprbase' : {
'source_filenames': (
'js/jquery-1.10.2.js',
'js/bootstrap.js',
'js/jquery.ba-hashchange.js',
'js/medloader.js',
),
'output_filename': 'js/mprbase.js',
},
},
'STYLESHEETS' : {
'mpr' : {
'source_filenames': (
'css/bootstrap.css',
#'css/bootstrap-theme.css',
'css/custom.css',
),
'output_filename': 'css/mpr.css',
}
},
'CSS_COMPRESSOR' : 'pipeline.compressors.yuglify.YuglifyCompressor',
'JS_COMPRESSOR' : 'pipeline.compressors.yuglify.YuglifyCompressor',
'DISABLE_WRAPPER' : True
}
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'verbose': {
'format' : '[%(asctime)s] %(levelname)s [%(name)s:%(lineno)s] %(message)s',
'datefmt' : '%d/%b/%Y %H:%M:%S'
},
'simple': {
'format': '%(levelname)s %(message)s'
},
},
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
},
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'verbose'
},
},
'loggers': {
'django': {
'handlers': ['console'],
'level': 'ERROR',
'propagate': True,
},
'mpr': {
'handlers': ['console'],
'level': 'DEBUG' if DEBUG else 'ERROR',
'propagate': True,
},
}
}
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.dummy.DummyCache',
'TIMEOUT' : 60*60*24*7
}
}
SEGMENT_IO_KEY = env.get('SEGMENT_IO_KEY')
SESSION = 'django.contrib.sessions.backends.signed_cookies'
PRICE_PARAMETERS = {
"VAT" : 1.15,
"prices" : [
(118.80, 0.46, 15.80),
(315.53, 0.33, 30.24),
(1104.40, 0.15, 86.11),
(float('inf'), 0.05, 198.36),
]
}
LATEST_GAZETTE = "https://search.opengazettes.org.za/text/37304?dq=single%20exit%20price&page=27"
from . import loganalytics
if DEBUG:
ANALYTICS = loganalytics.test_log_analytics
else:
ANALYTICS = loganalytics.log_analytics
|
|
import copy
import re
__all__ = ['Scanner', 'text_coords']
class Scanner(object):
"""
:class:`Scanner` is a near-direct port of Ruby's ``StringScanner``.
The aim is to provide for lexical scanning operations on strings::
>>> from strscan import Scanner
>>> s = Scanner("This is an example string")
>>> s.eos()
False
>>> s.scan(r'\w+')
'This'
>>> s.scan(r'\w+')
>>> s.scan(r'\s+')
' '
>>> s.scan(r'\s+')
>>> s.scan(r'\w+')
'is'
>>> s.eos()
False
>>> s.scan(r'\s+')
' '
>>> s.scan(r'\w+')
'an'
>>> s.scan(r'\s+')
' '
>>> s.scan(r'\w+')
'example'
>>> s.scan(r'\s+')
' '
>>> s.scan(r'\w+')
'string'
>>> s.eos()
True
>>> s.scan(r'\s+')
>>> s.scan(r'\w+')
Its mechanism of operation is similar to :class:`StringIO`, only instead of
reading by passing a number of bytes, you read by passing a regex. A scan
pointer tracks the current position through the string, and all scanning or
searching happens on the rest of the string after this pointer.
:meth:`scan` is the simple case of reading some text and advancing the scan
pointer, but there are several other related methods which fulfil different
requirements.
All the methods on :class:`Scanner` which take regexes will accept either
regex strings or compiled pattern objects (as would be returned from
``re.compile()``).
"""
def __init__(self, string):
self.string = string
self.pos_history, self._pos = [0], 0
self.match_history, self._match = [None], None
def __getitem__(self, index):
"""Proxy for ``self.match.group(index)``."""
if self.match:
return self.match.group(index)
raise IndexError("No match on this scanner")
def _get_pos(self):
return self._pos
def _set_pos(self, pos):
self._pos = pos
self.pos_history.append(pos)
pos = property(_get_pos, _set_pos,
doc="The current position of the scan pointer.")
prev = property(lambda self: self.pos_history[-2],
doc="The last position of the scan pointer.")
def coords(self):
r"""
Return the current scanner position as `(lineno, columnno, line)`.
This method is useful for displaying the scanner position in a human-
readable way. For example, you could use it to provide friendlier
debugging information when writing parsers.
>>> s = Scanner("abcdef\nghijkl\nmnopqr\nstuvwx\nyz")
>>> s.coords()
(0, 0, 'abcdef')
>>> s.pos += 4
>>> s.coords()
(0, 4, 'abcdef')
>>> s.pos += 2
>>> s.coords()
(0, 6, 'abcdef')
>>> s.pos += 1
>>> s.coords()
(1, 0, 'ghijkl')
>>> s.pos += 4
>>> s.coords()
(1, 4, 'ghijkl')
>>> s.pos += 4
>>> s.coords()
(2, 1, 'mnopqr')
"""
return text_coords(self.string, self.pos)
def _get_match(self):
return self._match
def _set_match(self, match):
self._match = match
self.match_history.append(match)
match = property(_get_match, _set_match,
doc="The latest scan match.")
def beginning_of_line(self):
r"""
Return true if the scan pointer is at the beginning of a line.
>>> s = Scanner("test\ntest\n")
>>> s.beginning_of_line()
True
>>> s.skip(r'te')
2
>>> s.beginning_of_line()
False
>>> s.skip(r'st\n')
3
>>> s.beginning_of_line()
True
>>> s.terminate()
>>> s.beginning_of_line()
True
"""
if self.pos > len(self.string):
return None
elif self.pos == 0:
return True
return self.string[self.pos - 1] == '\n'
def terminate(self):
"""Set the scan pointer to the end of the string; clear match data."""
self.pos = len(self.string)
self.match = None
def eos(self):
"""
Return true if the scan pointer is at the end of the string.
>>> s = Scanner("abc")
>>> s.eos()
False
>>> s.terminate()
>>> s.eos()
True
"""
return len(self.string) == self.pos
def getch(self):
"""
Get a single character and advance the scan pointer.
>>> s = Scanner("abc")
>>> s.getch()
'a'
>>> s.getch()
'b'
>>> s.getch()
'c'
>>> s.pos
3
"""
self.pos += 1
return self.string[self.pos - 1:self.pos]
def peek(self, length):
"""
Get a number of characters without advancing the scan pointer.
>>> s = Scanner("test string")
>>> s.peek(7)
'test st'
>>> s.peek(7)
'test st'
"""
return self.string[self.pos:self.pos + length]
def rest(self):
"""
Get the rest of the string that hasn't been scanned yet.
>>> s = Scanner("test string")
>>> s.scan(r'test')
'test'
>>> s.rest
' string'
"""
return self.string[self.pos:]
rest = property(rest)
def matched(self):
"""
Get the whole of the current match.
This method returns whatever would have been returned by the latest
:meth:`scan()` call.
>>> s = Scanner("test string")
>>> s.scan(r'test')
'test'
>>> s.matched()
'test'
"""
return self.match.group(0)
def pre_match(self):
r"""
Get whatever comes before the current match.
>>> s = Scanner('test string')
>>> s.skip(r'test')
4
>>> s.scan(r'\s')
' '
>>> s.pre_match()
'test'
"""
return self.string[:self.match.start()]
def post_match(self):
r"""
Get whatever comes after the current match.
>>> s = Scanner('test string')
>>> s.skip(r'test')
4
>>> s.scan(r'\s')
' '
>>> s.post_match()
'string'
"""
return self.string[self.match.end():]
def unscan(self):
"""
Undo the last scan, resetting the position and match registers.
>>> s = Scanner('test string')
>>> s.pos
0
>>> s.skip(r'te')
2
>>> s.rest
'st string'
>>> s.unscan()
>>> s.pos
0
>>> s.rest
'test string'
"""
self.pos_history.pop()
self._pos = self.pos_history[-1]
self.match_history.pop()
self._match = self.match_history[-1]
def scan_full(self, regex, return_string=True, advance_pointer=True):
"""
Match from the current position.
If `return_string` is false and a match is found, returns the number of
characters matched.
>>> s = Scanner("test string")
>>> s.scan_full(r' ')
>>> s.scan_full(r'test ')
'test '
>>> s.pos
5
>>> s.scan_full(r'stri', advance_pointer=False)
'stri'
>>> s.pos
5
>>> s.scan_full(r'stri', return_string=False, advance_pointer=False)
4
>>> s.pos
5
"""
regex = get_regex(regex)
self.match = regex.match(self.string, self.pos)
if not self.match:
return
if advance_pointer:
self.pos = self.match.end()
if return_string:
return self.match.group(0)
return len(self.match.group(0))
def search_full(self, regex, return_string=True, advance_pointer=True):
"""
Search from the current position.
If `return_string` is false and a match is found, returns the number of
characters matched (from the current position *up to* the end of the
match).
>>> s = Scanner("test string")
>>> s.search_full(r' ')
'test '
>>> s.pos
5
>>> s.search_full(r'i', advance_pointer=False)
'stri'
>>> s.pos
5
>>> s.search_full(r'i', return_string=False, advance_pointer=False)
4
>>> s.pos
5
"""
regex = get_regex(regex)
self.match = regex.search(self.string, self.pos)
if not self.match:
return
start_pos = self.pos
if advance_pointer:
self.pos = self.match.end()
if return_string:
return self.string[start_pos:self.match.end()]
return (self.match.end() - start_pos)
def scan(self, regex):
"""
Match a pattern from the current position.
If a match is found, advances the scan pointer and returns the matched
string. Otherwise returns ``None``.
>>> s = Scanner("test string")
>>> s.pos
0
>>> s.scan(r'foo')
>>> s.scan(r'bar')
>>> s.pos
0
>>> s.scan(r'test ')
'test '
>>> s.pos
5
"""
return self.scan_full(regex, return_string=True, advance_pointer=True)
def scan_until(self, regex):
"""
Search for a pattern from the current position.
If a match is found, advances the scan pointer and returns the matched
string, from the current position *up to* the end of the match.
Otherwise returns ``None``.
>>> s = Scanner("test string")
>>> s.pos
0
>>> s.scan_until(r'foo')
>>> s.scan_until(r'bar')
>>> s.pos
0
>>> s.scan_until(r' ')
'test '
>>> s.pos
5
"""
return self.search_full(regex, return_string=True, advance_pointer=True)
def scan_upto(self, regex):
"""
Scan up to, but not including, the given regex.
>>> s = Scanner("test string")
>>> s.scan('t')
't'
>>> s.scan_upto(r' ')
'est'
>>> s.pos
4
>>> s.pos_history
[0, 1, 4]
"""
pos = self.pos
if self.scan_until(regex) is not None:
self.pos -= len(self.matched())
# Remove the intermediate position history entry.
self.pos_history.pop(-2)
return self.pre_match()[pos:]
def skip(self, regex):
"""
Like :meth:`scan`, but return the number of characters matched.
>>> s = Scanner("test string")
>>> s.skip('test ')
5
"""
return self.scan_full(regex, return_string=False, advance_pointer=True)
def skip_until(self, regex):
"""
Like :meth:`scan_until`, but return the number of characters matched.
>>> s = Scanner("test string")
>>> s.skip_until(' ')
5
"""
return self.search_full(regex, return_string=False, advance_pointer=True)
def check(self, regex):
"""
See what :meth:`scan` would return without advancing the pointer.
>>> s = Scanner("test string")
>>> s.check('test ')
'test '
>>> s.pos
0
"""
return self.scan_full(regex, return_string=True, advance_pointer=False)
def check_until(self, regex):
"""
See what :meth:`scan_until` would return without advancing the pointer.
>>> s = Scanner("test string")
>>> s.check_until(' ')
'test '
>>> s.pos
0
"""
return self.search_full(regex, return_string=True, advance_pointer=False)
def exists(self, regex):
"""
See what :meth:`skip_until` would return without advancing the pointer.
>>> s = Scanner("test string")
>>> s.exists(' ')
5
>>> s.pos
0
Returns the number of characters matched if it does exist, or ``None``
otherwise.
"""
return self.search_full(regex, return_string=False, advance_pointer=False)
def text_coords(string, position):
r"""
Transform a simple index into a human-readable position in a string.
This function accepts a string and an index, and will return a triple of
`(lineno, columnno, line)` representing the position through the text. It's
useful for displaying a string index in a human-readable way::
>>> s = "abcdef\nghijkl\nmnopqr\nstuvwx\nyz"
>>> text_coords(s, 0)
(0, 0, 'abcdef')
>>> text_coords(s, 4)
(0, 4, 'abcdef')
>>> text_coords(s, 6)
(0, 6, 'abcdef')
>>> text_coords(s, 7)
(1, 0, 'ghijkl')
>>> text_coords(s, 11)
(1, 4, 'ghijkl')
>>> text_coords(s, 15)
(2, 1, 'mnopqr')
"""
line_start = string.rfind('\n', 0, position) + 1
line_end = string.find('\n', position)
lineno = string.count('\n', 0, position)
columnno = position - line_start
line = string[line_start:line_end]
return (lineno, columnno, line)
def get_regex(regex):
"""
Ensure we have a compiled regular expression object.
>>> import re
>>> get_regex('string') # doctest: +ELLIPSIS
<_sre.SRE_Pattern object at 0x...>
>>> pattern = re.compile(r'string')
>>> get_regex(pattern) is pattern
True
>>> get_regex(3) # doctest: +ELLIPSIS
Traceback (most recent call last):
...
TypeError: Invalid regex type: 3
"""
if isinstance(regex, basestring):
return re.compile(regex)
elif not isinstance(regex, re._pattern_type):
raise TypeError("Invalid regex type: %r" % (regex,))
return regex
def _get_tests():
"""Enables ``python setup.py test``."""
import doctest
return doctest.DocTestSuite()
|
|
"""Python class for nix derivations."""
import ast
import os
import json
import sys
import datadiff
import yaml
import rtyaml
class Derivation(object):
"""A Python representation of a derivation."""
# Cache of parsed derivations, to avoid duplicate parsing.
CACHE = {}
def __init__(self, path, raw, outputs, input_derivations,
input_files, system, builder, builder_args, environment):
"""Initializer.
:param path: The path to this derivation file.
:type path: ``str``
:param raw: Raw nix derivation.
:type raw: ``str``
:param outputs: The outputs this derivation will produce. Keys are
output names, and values are EITHER nix store paths, OR
nix store paths plus some output hash information.
:type outputs: ``dict`` of ``str`` -> ``str``
:param input_derivations: Derivations required to build the
expression. Keys are derivation paths, and values are lists of
output names that this derivation will use.
:type input_derivations: ``dict`` of ``str`` -> ``list`` of ``str``
:param input_files: Store paths that aren't derivations, which
are needed to build the expression.
:type input_files: ``set`` of ``str``
:param system: Architecture and OS the derivation is built on.
:type system: ``str``
:param builder: Store path of the derivation's builder executable.
:type builder: ``str``
:param builder_args: Command-line arguments for the builder.
:type builder_args: ``list`` of ``str``
:param environment: Environment variables to set for the builder.
:type environment: ``dict`` of ``str`` to ``str``
"""
self.outputs = outputs
self.input_derivations = input_derivations
self.input_files = input_files
self.system = system
self.builder = builder
self.builder_args = builder_args
self.environment = environment
# Hidden since they're not considered part of a nix derivation,
# but accessible via property.
self._raw = raw
self._path = path
# Built lazily.
self._input_paths = None
self._input_derivation_paths = None
self._output_mapping = None
self._as_dict = None
@property
def path(self):
"""Path to the derivation file."""
return self._path
@property
def raw(self):
"""The raw derivation string."""
return self._raw
@property
def default_output(self):
"""Name of the output that is built by default (usually 'out')."""
if len(self.outputs) == 1:
return list(self.outputs.keys())[0]
elif "outputs" in self.environment:
return self.environment["outputs"].split()[0]
elif "out" in self.outputs:
return "out"
else:
raise ValueError("Can't determine default output of derivation {}."
.format(self))
@property
def output_mapping(self):
"""A dictionary mapping output names to their paths."""
if self._output_mapping is None:
result = {}
for name, _path in self.outputs.items():
if isinstance(_path, str):
result[name] = _path
else:
# The path is actually a tuple combining the path with
# some hash data. Just take the path part.
result[name] = _path[0]
self._output_mapping = result
return self._output_mapping
@property
def name(self):
"""Get the name of the derivation by reading its environment.
`name` is a required attribute of derivations, so this should be safe.
"""
return self.environment["name"]
@property
def input_derivation_paths(self):
"""Set of all store paths needed to build this derivation,
that are themselves the result of derivations.
:return: A set of paths.
:rtype: ``set`` of ``str``
"""
if self._input_derivation_paths is None:
paths = set()
for deriv_path, outputs in self.input_derivations.items():
input_deriv = Derivation.parse_derivation_file(deriv_path)
for output in outputs:
paths.add(input_deriv.output_mapping[output])
self._input_derivation_paths = paths
return self._input_derivation_paths
@property
def input_paths(self):
"""Set of all store paths needed to build the derivation.
:return: A set of paths.
:rtype: ``set`` of ``str``
"""
if self._input_paths is None:
paths = set(self.input_files) | self.input_derivation_paths
self._input_paths = paths
return self._input_paths
@property
def output_names(self):
"""Return the names of outputs that this derivation produces.
:return: A set of output names.
:rtype: ``set`` of ``str``
"""
return set(self.outputs.keys())
@property
def as_dict(self):
"""Convert to a JSON-compatible dictionary."""
if self._as_dict is None:
items = vars(self).items()
res = {k: v for k, v in items if not k.startswith("_")}
for key, val in res.items():
if isinstance(val, set):
res[key] = list(sorted(val))
elif isinstance(val, tuple):
res[key] = list(val)
self._as_dict = res
return self._as_dict
def link_path(self, output_name):
"""Return the default path for a symlink created to this output."""
path = os.path.join(os.getcwd(), self.name)
if output_name != "out":
path += "-" + output_name
return path
def output_path(self, output_name):
"""Get the path to an output with the given name."""
if output_name not in self.output_mapping:
raise ValueError("No output named {}".format(output_name))
path = self.output_mapping[output_name]
if isinstance(path, tuple):
path = path[0]
return path
def output_paths(self, output_names):
"""Get paths of multiple outputs."""
return set(self.output_path(o) for o in output_names)
def __eq__(self, other):
"""Test if one derivation is equal to another."""
if isinstance(other, str):
other = Derivation.parse_derivation_file(other)
return self.as_dict == other.as_dict
def __hash__(self):
"""Use the derivation's path as a hash."""
return hash(self.path)
def __repr__(self):
return "Derivation({})".format(repr(self.path))
def diff(self, other):
"""Get a naive diff between two derivations, just comparing
their dictionary representation."""
selfdict, otherdict = vars(self), vars(other)
# Convert outputs to a format that doesn't include the output
# file path, since we know this will be different if the two
# derivations are different.
selfdict["outputs"] = list(sorted(selfdict["outputs"].keys()))
otherdict["outputs"] = list(sorted(otherdict["outputs"].keys()))
return datadiff.diff(selfdict, otherdict)
def display(self, attribute=None, env_vars=None, output=None,
format=None, pretty=False):
"""Return a string representation in the given format.
:param attribute: If given, only show that attribute.
:type attribute: ``str`` or ``NoneType``
:param env_vars: If given, only show these environment variables.
:type env_vars: (``list`` of ``str``) or ``NoneType``
:param output: If given, show the output path of that output.
:type output: ``str``
:param format: The output format. Valid options are 'string',
'json' and 'yaml'. 'string' is limited in that it can
only show strings and lists of strings. If
unspecified, printing a dict will use JSON
format, and other things will use 'string'.
:type format: ``str``
:param pretty: Pretty-print.
:type pretty: ``bool``
:rtype: ``str``
"""
if attribute is None and env_vars is None and output is None:
to_print = self.raw if format == "string" else self.as_dict
elif attribute is not None:
to_print = getattr(self, attribute)
if isinstance(to_print, set):
to_print = list(sorted(to_print))
elif output is not None:
to_print = self.output_mapping[output]
elif env_vars is not None:
if len(env_vars) == 1:
to_print = self.environment[env_vars[0]]
else:
to_print = {var: self.environment[var] for var in env_vars}
else:
raise ValueError("I don't know what to print...")
# Now that we know what we want to print, decide the default
# format (unless it's given explicitly)
if format is None:
if isinstance(to_print, dict):
format = "json"
else:
format = "string"
if format == "string":
if isinstance(to_print, str):
return to_print
elif isinstance(to_print, list) and \
all(isinstance(x, str) for x in to_print):
return "\n".join(to_print)
else:
raise TypeError("Can't convert {} to a string (try --json "
"or --yaml).".format(type(to_print)))
elif format == "json":
if pretty is True:
return json.dumps(to_print, indent=2, sort_keys=True)
else:
return json.dumps(to_print, sort_keys=True)
elif format == "yaml":
if pretty is True:
return rtyaml.dump(to_print)
else:
return yaml.dump(to_print)
else:
raise ValueError("Invalid format: {}".format(format))
@staticmethod
def parse_derivation(derivation_string, derivation_path):
"""Parse a derivation string into a Derivation.
:param derivation_string: A string representation of a
derivation, as returned by a call to `nix-instantiate`.
:type derivation_string: ``str``
:param derivation_path: Path to the derivation file.
:type derivation_path: ``str``
:return: The parsed Derivation object.
:rtype: :py:class:`Derivation`
"""
if derivation_string.startswith("Derive(["):
# Then trim off the initial 'Derive(' and final ')'
derivation_string = derivation_string[7:-1]
# Parse the string as a python literal; this is a safe
# operation because the derivation will not contain any
# function calls, or anything which isn't a valid python literal.
derivation_list = ast.literal_eval(derivation_string)
output_list= derivation_list[0]
outputs = {name: path if hashtype == "" else (path, hashtype, hash_)
for name, path, hashtype, hash_ in output_list}
input_derivations = dict(derivation_list[1])
input_files = set(derivation_list[2])
system = derivation_list[3]
builder = derivation_list[4]
builder_args = derivation_list[5]
environment = dict(derivation_list[6])
return Derivation(path=derivation_path,
raw=derivation_string,
outputs=outputs,
input_derivations=input_derivations,
input_files=input_files,
system=system,
builder=builder,
builder_args=builder_args,
environment=environment)
@staticmethod
def parse_derivation_file(derivation_path):
"""Parse a derivation from a file path.
:param derivation_path: Path to a file containing a string
representation of a derivation.
:type derivation_path: ``str``
:return: The parsed Derivation object.
:rtype: :py:class:`Derivation`
"""
if not os.path.isabs(derivation_path) and "NIX_STORE" in os.environ:
derivation_path = os.path.join(os.environ["NIX_STORE"],
derivation_path)
if derivation_path in Derivation.CACHE:
return Derivation.CACHE[derivation_path]
with open(derivation_path, "rb") as f:
source = f.read().decode("utf-8")
try:
deriv = Derivation.parse_derivation(source, derivation_path)
Derivation.CACHE[derivation_path] = deriv
return deriv
except Exception as e:
raise ValueError("Couldn't parse derivation at path {}: {}"
.format(derivation_path, repr(e)))
|
|
# Copyright (c) 2009-2010 Google, Inc.
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation; either version 2 of the License, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""checker for use of Python logging
"""
import astroid
from pylint import checkers
from pylint import interfaces
from pylint.checkers import utils
from pylint.checkers.utils import check_messages
import six
MSGS = {
'W1201': ('Specify string format arguments as logging function parameters',
'logging-not-lazy',
'Used when a logging statement has a call form of '
'"logging.<logging method>(format_string % (format_args...))". '
'Such calls should leave string interpolation to the logging '
'method itself and be written '
'"logging.<logging method>(format_string, format_args...)" '
'so that the program may avoid incurring the cost of the '
'interpolation in those cases in which no message will be '
'logged. For more, see '
'http://www.python.org/dev/peps/pep-0282/.'),
'W1202': ('Use % formatting in logging functions but pass the % '
'parameters as arguments',
'logging-format-interpolation',
'Used when a logging statement has a call form of '
'"logging.<logging method>(format_string.format(format_args...))"'
'. Such calls should use % formatting instead, but leave '
'interpolation to the logging function by passing the parameters '
'as arguments.'),
'E1200': ('Unsupported logging format character %r (%#02x) at index %d',
'logging-unsupported-format',
'Used when an unsupported format character is used in a logging\
statement format string.'),
'E1201': ('Logging format string ends in middle of conversion specifier',
'logging-format-truncated',
'Used when a logging statement format string terminates before\
the end of a conversion specifier.'),
'E1205': ('Too many arguments for logging format string',
'logging-too-many-args',
'Used when a logging format string is given too few arguments.'),
'E1206': ('Not enough arguments for logging format string',
'logging-too-few-args',
'Used when a logging format string is given too many arguments'),
}
CHECKED_CONVENIENCE_FUNCTIONS = set([
'critical', 'debug', 'error', 'exception', 'fatal', 'info', 'warn',
'warning'])
def is_method_call(callfunc_node, types=(), methods=()):
"""Determines if a CallFunc node represents a method call.
Args:
callfunc_node: The CallFunc AST node to check.
types: Optional sequence of caller type names to restrict check.
methods: Optional sequence of method names to restrict check.
Returns:
True, if the node represents a method call for the given type and
method names, False otherwise.
"""
if not isinstance(callfunc_node, astroid.CallFunc):
return False
func = utils.safe_infer(callfunc_node.func)
return (isinstance(func, astroid.BoundMethod)
and isinstance(func.bound, astroid.Instance)
and (func.bound.name in types if types else True)
and (func.name in methods if methods else True))
class LoggingChecker(checkers.BaseChecker):
"""Checks use of the logging module."""
__implements__ = interfaces.IAstroidChecker
name = 'logging'
msgs = MSGS
options = (('logging-modules',
{'default': ('logging',),
'type': 'csv',
'metavar': '<comma separated list>',
'help': 'Logging modules to check that the string format '
'arguments are in logging function parameter format'}
),
)
def visit_module(self, node): # pylint: disable=unused-argument
"""Clears any state left in this checker from last module checked."""
# The code being checked can just as easily "import logging as foo",
# so it is necessary to process the imports and store in this field
# what name the logging module is actually given.
self._logging_names = set()
logging_mods = self.config.logging_modules
self._logging_modules = set(logging_mods)
self._from_imports = {}
for logging_mod in logging_mods:
parts = logging_mod.rsplit('.', 1)
if len(parts) > 1:
self._from_imports[parts[0]] = parts[1]
def visit_from(self, node):
"""Checks to see if a module uses a non-Python logging module."""
try:
logging_name = self._from_imports[node.modname]
for module, as_name in node.names:
if module == logging_name:
self._logging_names.add(as_name or module)
except KeyError:
pass
def visit_import(self, node):
"""Checks to see if this module uses Python's built-in logging."""
for module, as_name in node.names:
if module in self._logging_modules:
self._logging_names.add(as_name or module)
@check_messages(*(MSGS.keys()))
def visit_callfunc(self, node):
"""Checks calls to logging methods."""
def is_logging_name():
return (isinstance(node.func, astroid.Getattr) and
isinstance(node.func.expr, astroid.Name) and
node.func.expr.name in self._logging_names)
def is_logger_class():
try:
for inferred in node.func.infer():
if isinstance(inferred, astroid.BoundMethod):
parent = inferred._proxied.parent
if (isinstance(parent, astroid.Class) and
(parent.qname() == 'logging.Logger' or
any(ancestor.qname() == 'logging.Logger'
for ancestor in parent.ancestors()))):
return True, inferred._proxied.name
except astroid.exceptions.InferenceError:
pass
return False, None
if is_logging_name():
name = node.func.attrname
else:
result, name = is_logger_class()
if not result:
return
self._check_log_method(node, name)
def _check_log_method(self, node, name):
"""Checks calls to logging.log(level, format, *format_args)."""
if name == 'log':
if node.starargs or node.kwargs or len(node.args) < 2:
# Either a malformed call, star args, or double-star args. Beyond
# the scope of this checker.
return
format_pos = 1
elif name in CHECKED_CONVENIENCE_FUNCTIONS:
if node.starargs or node.kwargs or not node.args:
# Either no args, star args, or double-star args. Beyond the
# scope of this checker.
return
format_pos = 0
else:
return
if isinstance(node.args[format_pos], astroid.BinOp) and node.args[format_pos].op == '%':
self.add_message('logging-not-lazy', node=node)
elif isinstance(node.args[format_pos], astroid.CallFunc):
self._check_call_func(node.args[format_pos])
elif isinstance(node.args[format_pos], astroid.Const):
self._check_format_string(node, format_pos)
def _check_call_func(self, callfunc_node):
"""Checks that function call is not format_string.format().
Args:
callfunc_node: CallFunc AST node to be checked.
"""
if is_method_call(callfunc_node, ('str', 'unicode'), ('format',)):
self.add_message('logging-format-interpolation', node=callfunc_node)
def _check_format_string(self, node, format_arg):
"""Checks that format string tokens match the supplied arguments.
Args:
node: AST node to be checked.
format_arg: Index of the format string in the node arguments.
"""
num_args = _count_supplied_tokens(node.args[format_arg + 1:])
if not num_args:
# If no args were supplied, then all format strings are valid -
# don't check any further.
return
format_string = node.args[format_arg].value
if not isinstance(format_string, six.string_types):
# If the log format is constant non-string (e.g. logging.debug(5)),
# ensure there are no arguments.
required_num_args = 0
else:
try:
keyword_args, required_num_args = \
utils.parse_format_string(format_string)
if keyword_args:
# Keyword checking on logging strings is complicated by
# special keywords - out of scope.
return
except utils.UnsupportedFormatCharacter as ex:
char = format_string[ex.index]
self.add_message('logging-unsupported-format', node=node,
args=(char, ord(char), ex.index))
return
except utils.IncompleteFormatString:
self.add_message('logging-format-truncated', node=node)
return
if num_args > required_num_args:
self.add_message('logging-too-many-args', node=node)
elif num_args < required_num_args:
self.add_message('logging-too-few-args', node=node)
def _count_supplied_tokens(args):
"""Counts the number of tokens in an args list.
The Python log functions allow for special keyword arguments: func,
exc_info and extra. To handle these cases correctly, we only count
arguments that aren't keywords.
Args:
args: List of AST nodes that are arguments for a log format string.
Returns:
Number of AST nodes that aren't keywords.
"""
return sum(1 for arg in args if not isinstance(arg, astroid.Keyword))
def register(linter):
"""Required method to auto-register this checker."""
linter.register_checker(LoggingChecker(linter))
|
|
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from common.chrome_proxy_benchmark import ChromeProxyBenchmark
from integration_tests import chrome_proxy_measurements as measurements
from integration_tests import chrome_proxy_pagesets as pagesets
from telemetry import benchmark
DESKTOP_PLATFORMS = ['mac', 'linux', 'win', 'chromeos']
WEBVIEW_PLATFORMS = ['android-webview', 'android-webview-shell']
class ChromeProxyClientVersion(ChromeProxyBenchmark):
tag = 'client_version'
test = measurements.ChromeProxyClientVersion
page_set = pagesets.SyntheticStorySet
@classmethod
def Name(cls):
return 'chrome_proxy_benchmark.client_version.synthetic'
class ChromeProxyClientType(ChromeProxyBenchmark):
tag = 'client_type'
test = measurements.ChromeProxyClientType
page_set = pagesets.ClientTypeStorySet
@classmethod
def Name(cls):
return 'chrome_proxy_benchmark.client_type.client_type'
@benchmark.Disabled(*WEBVIEW_PLATFORMS)
class ChromeProxyLoFi(ChromeProxyBenchmark):
tag = 'lo_fi'
test = measurements.ChromeProxyLoFi
page_set = pagesets.LoFiStorySet
@classmethod
def Name(cls):
return 'chrome_proxy_benchmark.lo_fi.lo_fi'
class ChromeProxyExpDirective(ChromeProxyBenchmark):
tag = 'exp_directive'
test = measurements.ChromeProxyExpDirective
page_set = pagesets.ExpDirectiveStorySet
@classmethod
def Name(cls):
return 'chrome_proxy_benchmark.exp_directive.exp_directive'
class ChromeProxyPassThrough(ChromeProxyBenchmark):
tag = 'pass_through'
test = measurements.ChromeProxyPassThrough
page_set = pagesets.PassThroughStorySet
@classmethod
def Name(cls):
return 'chrome_proxy_benchmark.pass_through.pass_through'
class ChromeProxyBypass(ChromeProxyBenchmark):
tag = 'bypass'
test = measurements.ChromeProxyBypass
page_set = pagesets.BypassStorySet
@classmethod
def Name(cls):
return 'chrome_proxy_benchmark.bypass.bypass'
class ChromeProxyCorsBypass(ChromeProxyBenchmark):
tag = 'bypass'
test = measurements.ChromeProxyCorsBypass
page_set = pagesets.CorsBypassStorySet
@classmethod
def Name(cls):
return 'chrome_proxy_benchmark.bypass.corsbypass'
class ChromeProxyBlockOnce(ChromeProxyBenchmark):
tag = 'block_once'
test = measurements.ChromeProxyBlockOnce
page_set = pagesets.BlockOnceStorySet
@classmethod
def Name(cls):
return 'chrome_proxy_benchmark.block_once.block_once'
@benchmark.Disabled(*(DESKTOP_PLATFORMS + WEBVIEW_PLATFORMS))
# Safebrowsing is enabled for Android and iOS.
class ChromeProxySafeBrowsingOn(ChromeProxyBenchmark):
tag = 'safebrowsing_on'
test = measurements.ChromeProxySafebrowsingOn
# Override CreateStorySet so that we can instantiate SafebrowsingStorySet
# with a non default param.
def CreateStorySet(self, options):
del options # unused
return pagesets.SafebrowsingStorySet(expect_timeout=True)
@classmethod
def Name(cls):
return 'chrome_proxy_benchmark.safebrowsing_on.safebrowsing'
@benchmark.Enabled(*(DESKTOP_PLATFORMS + WEBVIEW_PLATFORMS))
# Safebrowsing is switched off for Android Webview and all desktop platforms.
class ChromeProxySafeBrowsingOff(ChromeProxyBenchmark):
tag = 'safebrowsing_off'
test = measurements.ChromeProxySafebrowsingOff
page_set = pagesets.SafebrowsingStorySet
@classmethod
def Name(cls):
return 'chrome_proxy_benchmark.safebrowsing_off.safebrowsing'
class ChromeProxyHTTPFallbackProbeURL(ChromeProxyBenchmark):
tag = 'fallback_probe'
test = measurements.ChromeProxyHTTPFallbackProbeURL
page_set = pagesets.SyntheticStorySet
@classmethod
def Name(cls):
return 'chrome_proxy_benchmark.fallback_probe.synthetic'
class ChromeProxyHTTPFallbackViaHeader(ChromeProxyBenchmark):
tag = 'fallback_viaheader'
test = measurements.ChromeProxyHTTPFallbackViaHeader
page_set = pagesets.FallbackViaHeaderStorySet
@classmethod
def Name(cls):
return 'chrome_proxy_benchmark.fallback_viaheader.fallback_viaheader'
class ChromeProxyHTTPToDirectFallback(ChromeProxyBenchmark):
tag = 'http_to_direct_fallback'
test = measurements.ChromeProxyHTTPToDirectFallback
page_set = pagesets.HTTPToDirectFallbackStorySet
@classmethod
def Name(cls):
return ('chrome_proxy_benchmark.http_to_direct_fallback.'
'http_to_direct_fallback')
class ChromeProxyReenableAfterBypass(ChromeProxyBenchmark):
tag = 'reenable_after_bypass'
test = measurements.ChromeProxyReenableAfterBypass
page_set = pagesets.ReenableAfterBypassStorySet
@classmethod
def Name(cls):
return 'chrome_proxy_benchmark.reenable_after_bypass.reenable_after_bypass'
class ChromeProxySmoke(ChromeProxyBenchmark):
tag = 'smoke'
test = measurements.ChromeProxySmoke
page_set = pagesets.SmokeStorySet
@classmethod
def Name(cls):
return 'chrome_proxy_benchmark.smoke.smoke'
class ChromeProxyClientConfig(ChromeProxyBenchmark):
tag = 'client_config'
test = measurements.ChromeProxyClientConfig
page_set = pagesets.SyntheticStorySet
@classmethod
def Name(cls):
return 'chrome_proxy_benchmark.client_config.synthetic'
@benchmark.Enabled(*DESKTOP_PLATFORMS)
class ChromeProxyVideoDirect(benchmark.Benchmark):
tag = 'video'
test = measurements.ChromeProxyVideoValidation
page_set = pagesets.VideoDirectStorySet
@classmethod
def Name(cls):
return 'chrome_proxy_benchmark.video.direct'
@benchmark.Enabled(*DESKTOP_PLATFORMS)
class ChromeProxyVideoProxied(benchmark.Benchmark):
tag = 'video'
test = measurements.ChromeProxyVideoValidation
page_set = pagesets.VideoProxiedStorySet
@classmethod
def Name(cls):
return 'chrome_proxy_benchmark.video.proxied'
@benchmark.Enabled(*DESKTOP_PLATFORMS)
class ChromeProxyVideoCompare(benchmark.Benchmark):
"""Comparison of direct and proxied video fetches.
This benchmark runs the ChromeProxyVideoDirect and ChromeProxyVideoProxied
benchmarks, then compares their results.
"""
tag = 'video'
test = measurements.ChromeProxyVideoValidation
page_set = pagesets.VideoCompareStorySet
@classmethod
def Name(cls):
return 'chrome_proxy_benchmark.video.compare'
@benchmark.Enabled(*DESKTOP_PLATFORMS)
class ChromeProxyVideoFrames(benchmark.Benchmark):
"""Check for video frames similar to original video."""
tag = 'video'
test = measurements.ChromeProxyInstrumentedVideoValidation
page_set = pagesets.VideoFrameStorySet
@classmethod
def Name(cls):
return 'chrome_proxy_benchmark.video.frames'
@benchmark.Enabled(*DESKTOP_PLATFORMS)
class ChromeProxyVideoAudio(benchmark.Benchmark):
"""Check that audio is similar to original video."""
tag = 'video'
test = measurements.ChromeProxyInstrumentedVideoValidation
page_set = pagesets.VideoAudioStorySet
@classmethod
def Name(cls):
return 'chrome_proxy_benchmark.video.audio'
|
|
# function to turn an integer into a printable modifier
def modifier_string(modifier):
out = str(modifier);
if out[0] != "-":
out = "+" + out
return out
# function to generate an attack string for an attack
def attack_string(attack):
# start with the name and attack bonus
out = attack["name"] + " "
out += modifier_string(attack["attack bonus"]) + " ("
# add the damage roll
out += attack["roll"]
# add the type and modifier, if they exist
if len(attack["type"]) > 0:
out += " " + attack["type"]
if attack["modifier"] != 0:
out += modifier_string(attack["modifier"])
# add the critical
if attack["critical"] > 2:
out += "/×" + str(attack["critical"])
# add the effect, if any
if len(attack["effect"]) > 0:
out += " " + attack["effect"]
out += ")"
return out
# function to generate an attack line from a list of attacks
def attack_list_string(list):
attacks = ""
# traverse the list
for atk in list:
# indent if this is not the first
if attacks != "":
attacks += " or\n%(indent-weapon) "
# render the attack string
attacks += attack_string(atk)
return attacks
# list of ability scores in order
abilities = [
"Str",
"Dex",
"Con",
"Int",
"Wis",
"Cha",
]
# list of skills in order
skills = [
"Acrobatics",
"Appraise",
"Bluff",
"Climb",
"Craft",
"Diplomacy",
"Disable Device",
"Disguise",
"Escape Artist",
"Fly",
"Handle Animal",
"Heal",
"Intimidate",
"Knowledge (arcana)",
"Knowledge (dungeoneering)",
"Knowledge (engineering)",
"Knowledge (geography)",
"Knowledge (history)",
"Knowledge (local)",
"Knowledge (nature)",
"Knowledge (nobility)",
"Knowledge (planes)",
"Knowledge (religion)",
"Linguistics",
"Perception",
"Perform",
"Profession",
"Profession (sailor)",
"Ride",
"Sense Motive",
"Sleight of Hand",
"Spellcraft",
"Stealth",
"Survival",
"Swim",
"Use Magic Device",
]
class character:
def __init__ (self):
self.name = "Nobody"
self.challenge = 0
self.xp_value = 0
self.gender = "None"
self.race = "Human"
self.classes = {}
self.alignment = {'moral' : 0, 'ethical' : 0}
self.size = "Medium"
self.creature_type = {'primary': 'Humanoid', 'subtypes': []}
self.initiative = 0;
self.senses = {"Perception" : 0}
self.ac = {"alternates" : [{'type' : "touch", 'value': 10}, {'type': "flat-footed", 'value': 10}], "sources" : {}}
self.hp = 8
self.hit_dice = 1
self.hit_dice_list = ["1d8"]
self.hp_modifier = 0
self.saves = {"Fort" : 0, "Ref" : 0, "Will" : 0}
self.save_modifiers = []
self.defensive_abilities = {}
self.speed = 30
self.melee_attacks = [{
'name': 'Unarmed',
'attack bonus' : 0,
'roll' : '1d3',
'type' : 'nonlethal',
'modifier' : 0,
'critical' : 2,
'effect' : ''
},]
self.ranged_attacks = []
self.special_attacks = []
self.tactics = {}
self.abilities = {"Str": 10, "Dex": 10, "Con": 10, "Int": 10, "Wis": 10, "Cha": 10}
self.base_attack_bonus = 0
self.combat_maneuver_bonus = 0
self.combat_maneuver_defense = 10
self.feats = []
self.skills = {}
def output(self):
out = "h3. " + self.name + "\n"
out += "*CR* " + str(self.challenge) + "\n"
out += "*XP* " + str(self.xp_value) + "\n"
out += "_" + self.gender + '_ _' + self.race + "_\n"
out += self.class_string() + "\n"
out += self.alignment_string() + " " + self.size
out += " " + self.creature_type_string() + "\n"
out += "*Initiative* " + modifier_string(self.initiative) + "\n"
out += "*Senses* " + self.senses_string() + "\n"
out += "\n"
out += "h3. Defense\n"
out += "*AC* " + self.ac_string() + "\n"
out += "*HP* " + self.hp_string() + "\n"
out += self.saves_string() + "\n"
defensive_abilities = self.defensive_abilities_string()
if len(defensive_abilities) > 0:
out += defensive_abilities + "\n"
out += "\n"
out += "h3. Offense\n"
out += "*Speed* " + str(self.speed) + " ft.\n"
out += self.attacks_string() + "\n"
out += "\n"
tactics = self.tactics_string()
if (len(tactics) > 0):
out += "h3. Tactics\n"
out += tactics + "\n"
out += "\n"
out += "h3. Statistics\n"
out += self.abilities_string() + "\n"
out += self.combat_modifiers_string() + "\n"
out += self.feats_string() + "\n"
out += self.skills_string() + "\n"
return out;
# method to generate a string representing this character's classes
def class_string(self):
out = ""
for char_class in self.classes:
if len(out) > 0:
out += " / "
out += "*" + char_class['class'] + "* " + str(char_class['level'])
return out
def alignment_string(self):
out = "*"
if self.alignment['moral'] > 0:
out += "Lawful "
elif self.alignment['moral'] < 0:
out += "Chaotic "
else:
out += "Neutral "
if self.alignment['ethical'] > 0:
out += "Good"
elif self.alignment['ethical'] < 0:
out += "Evil"
else:
if out == "*Neutral ":
out = "*True Neutral"
else:
out += " Neutral"
out += "*"
return out
def creature_type_string(self):
out = "" + self.creature_type['primary']
if len(self.creature_type['subtypes']) > 0:
subs = "("
for subtype in self.creature_type['subtypes']:
if subs != "(":
subs += ", "
subs += subtype
out += subs + ")"
return out
# method to print out the Senses line
def senses_string(self):
out = ""
# iterate over the senses and print them
for sense in self.senses.keys():
if out != "":
out += ", "
out += sense + " " + modifier_string(self.senses[sense])
return out
def ac_string(self):
# build the alternate ac string
alts = ""
for ac in self.ac["alternates"]:
alts += " / " + ac["type"] + " " + str(ac["value"])
# builld the components string, and calculate the total ac
total_ac = 10;
components = "";
for component in self.ac["sources"]:
total_ac += component["value"]
if components != "":
components += ", "
else:
components += "("
components += modifier_string(component["value"]) + " " + component["name"]
if len(components) > 0:
components +=")"
out = str(total_ac) + alts
if len(components) > 0:
out += " " + components
return out
# method to print out the hit points line
def hp_string(self):
out = str(self.hp) + " "
if (self.hit_dice):
out += "("
out += str(self.hit_dice) + " HD"
dice_string = ""
for dice in self.hit_dice_list:
if (dice_string != ""):
dice_string += " +"
dice_string += " " + dice
out += dice_string + " " + modifier_string(self.hp_modifier)
out += ")"
return out
def saves_string(self):
out = ""
out += "*Fort* " + modifier_string(self.saves["Fort"]) + ", ";
out += "*Ref* " + modifier_string(self.saves["Ref"]) + ", ";
out += "*Will* " + modifier_string(self.saves["Will"]);
if len(self.save_modifiers) > 0:
out += " ("
specials = ""
for special in self.save_modifiers:
if specials != "":
specials += ", "
specials += modifier_string(self.save_modifiers[special]) + " vs. " + special
out += specials + ")"
return out
def defensive_abilities_string(self):
out = ""
if len(self.defensive_abilities) > 0:
abilities = ""
for ability in self.defensive_abilities:
if abilities != "":
abilities += ", "
abilities += ability
val = self.defensive_abilities[ability];
if (val):
abilities += " " + modifier_string(val)
out += "*Defensive Abilities* " + abilities
return out
# method t generate the attacks lines
def attacks_string(self):
out = ""
# generate the melee lines
if len(self.melee_attacks) > 0:
out += "*Melee* " + attack_list_string(self.melee_attacks)
# generate the ranged lines
if len(self.ranged_attacks) > 0:
if out != "":
out += "\n"
out += "*Ranged* " + attack_list_string(self.ranged_attacks)
# generate the special lines
if len(self.special_attacks) > 0:
if out != "":
out += "\n"
attacks = ""
for atk in self.special_attacks:
if attacks != "":
attacks += ", "
attacks += atk["name"] + " " + modifier_string(atk["value"])
out += "*Special Attacks* " + attacks
return out
def tactics_string(self):
out = ""
for tactic in self.tactics:
if len(out) :
out += "\n"
out += "*" + tactic + "*: " + self.tactics[tactic]
return out;
# method to print out the ability scores of a character
def abilities_string(self):
out = ""
for ability in abilities:
if len(out) > 0:
out += ", "
out += "*" + ability + "* " + str(self.abilities[ability])
return out;
# method to print out combat modifiers
def combat_modifiers_string(self):
out = ""
out += "*BAB* " + modifier_string(self.base_attack_bonus) + ", "
out += "*CMB* " + modifier_string(self.combat_maneuver_bonus) + ", "
out += "*CMD* " + str(self.combat_maneuver_defense)
return out
# method to print out the list of feats
def feats_string(self):
out = "*Feats*:"
for feat in self.feats:
out += "\n * " + feat
return out
# method to print out the skills dict
def skills_string(self):
out = "*Skills*:"
for skill in skills:
if skill in self.skills:
out += "\n * " + skill + " " + modifier_string(self.skills[skill])
return out
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 Cloudscaling Group, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import pprint
import re
import socket
import sys
import types
import uuid
import eventlet
import greenlet
from oslo.config import cfg
from nova.openstack.common import excutils
from nova.openstack.common.gettextutils import _
from nova.openstack.common import importutils
from nova.openstack.common import jsonutils
from nova.openstack.common import processutils as utils
from nova.openstack.common.rpc import common as rpc_common
zmq = importutils.try_import('eventlet.green.zmq')
# for convenience, are not modified.
pformat = pprint.pformat
Timeout = eventlet.timeout.Timeout
LOG = rpc_common.LOG
RemoteError = rpc_common.RemoteError
RPCException = rpc_common.RPCException
zmq_opts = [
cfg.StrOpt('rpc_zmq_bind_address', default='*',
help='ZeroMQ bind address. Should be a wildcard (*), '
'an ethernet interface, or IP. '
'The "host" option should point or resolve to this '
'address.'),
# The module.Class to use for matchmaking.
cfg.StrOpt(
'rpc_zmq_matchmaker',
default=('nova.openstack.common.rpc.'
'matchmaker.MatchMakerLocalhost'),
help='MatchMaker driver',
),
# The following port is unassigned by IANA as of 2012-05-21
cfg.IntOpt('rpc_zmq_port', default=9501,
help='ZeroMQ receiver listening port'),
cfg.IntOpt('rpc_zmq_contexts', default=1,
help='Number of ZeroMQ contexts, defaults to 1'),
cfg.IntOpt('rpc_zmq_topic_backlog', default=None,
help='Maximum number of ingress messages to locally buffer '
'per topic. Default is unlimited.'),
cfg.StrOpt('rpc_zmq_ipc_dir', default='/var/run/openstack',
help='Directory for holding IPC sockets'),
cfg.StrOpt('rpc_zmq_host', default=socket.gethostname(),
help='Name of this node. Must be a valid hostname, FQDN, or '
'IP address. Must match "host" option, if running Nova.')
]
CONF = cfg.CONF
CONF.register_opts(zmq_opts)
ZMQ_CTX = None # ZeroMQ Context, must be global.
matchmaker = None # memoized matchmaker object
def _serialize(data):
"""
Serialization wrapper
We prefer using JSON, but it cannot encode all types.
Error if a developer passes us bad data.
"""
try:
return jsonutils.dumps(data, ensure_ascii=True)
except TypeError:
with excutils.save_and_reraise_exception():
LOG.error(_("JSON serialization failed."))
def _deserialize(data):
"""
Deserialization wrapper
"""
LOG.debug(_("Deserializing: %s"), data)
return jsonutils.loads(data)
class ZmqSocket(object):
"""
A tiny wrapper around ZeroMQ to simplify the send/recv protocol
and connection management.
Can be used as a Context (supports the 'with' statement).
"""
def __init__(self, addr, zmq_type, bind=True, subscribe=None):
self.sock = _get_ctxt().socket(zmq_type)
self.addr = addr
self.type = zmq_type
self.subscriptions = []
# Support failures on sending/receiving on wrong socket type.
self.can_recv = zmq_type in (zmq.PULL, zmq.SUB)
self.can_send = zmq_type in (zmq.PUSH, zmq.PUB)
self.can_sub = zmq_type in (zmq.SUB, )
# Support list, str, & None for subscribe arg (cast to list)
do_sub = {
list: subscribe,
str: [subscribe],
type(None): []
}[type(subscribe)]
for f in do_sub:
self.subscribe(f)
str_data = {'addr': addr, 'type': self.socket_s(),
'subscribe': subscribe, 'bind': bind}
LOG.debug(_("Connecting to %(addr)s with %(type)s"), str_data)
LOG.debug(_("-> Subscribed to %(subscribe)s"), str_data)
LOG.debug(_("-> bind: %(bind)s"), str_data)
try:
if bind:
self.sock.bind(addr)
else:
self.sock.connect(addr)
except Exception:
raise RPCException(_("Could not open socket."))
def socket_s(self):
"""Get socket type as string."""
t_enum = ('PUSH', 'PULL', 'PUB', 'SUB', 'REP', 'REQ', 'ROUTER',
'DEALER')
return dict(map(lambda t: (getattr(zmq, t), t), t_enum))[self.type]
def subscribe(self, msg_filter):
"""Subscribe."""
if not self.can_sub:
raise RPCException("Cannot subscribe on this socket.")
LOG.debug(_("Subscribing to %s"), msg_filter)
try:
self.sock.setsockopt(zmq.SUBSCRIBE, msg_filter)
except Exception:
return
self.subscriptions.append(msg_filter)
def unsubscribe(self, msg_filter):
"""Unsubscribe."""
if msg_filter not in self.subscriptions:
return
self.sock.setsockopt(zmq.UNSUBSCRIBE, msg_filter)
self.subscriptions.remove(msg_filter)
def close(self):
if self.sock is None or self.sock.closed:
return
# We must unsubscribe, or we'll leak descriptors.
if len(self.subscriptions) > 0:
for f in self.subscriptions:
try:
self.sock.setsockopt(zmq.UNSUBSCRIBE, f)
except Exception:
pass
self.subscriptions = []
try:
# Default is to linger
self.sock.close()
except Exception:
# While this is a bad thing to happen,
# it would be much worse if some of the code calling this
# were to fail. For now, lets log, and later evaluate
# if we can safely raise here.
LOG.error("ZeroMQ socket could not be closed.")
self.sock = None
def recv(self):
if not self.can_recv:
raise RPCException(_("You cannot recv on this socket."))
return self.sock.recv_multipart()
def send(self, data):
if not self.can_send:
raise RPCException(_("You cannot send on this socket."))
self.sock.send_multipart(data)
class ZmqClient(object):
"""Client for ZMQ sockets."""
def __init__(self, addr, socket_type=None, bind=False):
if socket_type is None:
socket_type = zmq.PUSH
self.outq = ZmqSocket(addr, socket_type, bind=bind)
def cast(self, msg_id, topic, data, envelope=False):
msg_id = msg_id or 0
if not (envelope or rpc_common._SEND_RPC_ENVELOPE):
self.outq.send(map(bytes,
(msg_id, topic, 'cast', _serialize(data))))
return
rpc_envelope = rpc_common.serialize_msg(data[1], envelope)
zmq_msg = reduce(lambda x, y: x + y, rpc_envelope.items())
self.outq.send(map(bytes,
(msg_id, topic, 'impl_zmq_v2', data[0]) + zmq_msg))
def close(self):
self.outq.close()
class RpcContext(rpc_common.CommonRpcContext):
"""Context that supports replying to a rpc.call."""
def __init__(self, **kwargs):
self.replies = []
super(RpcContext, self).__init__(**kwargs)
def deepcopy(self):
values = self.to_dict()
values['replies'] = self.replies
return self.__class__(**values)
def reply(self, reply=None, failure=None, ending=False):
if ending:
return
self.replies.append(reply)
@classmethod
def marshal(self, ctx):
ctx_data = ctx.to_dict()
return _serialize(ctx_data)
@classmethod
def unmarshal(self, data):
return RpcContext.from_dict(_deserialize(data))
class InternalContext(object):
"""Used by ConsumerBase as a private context for - methods."""
def __init__(self, proxy):
self.proxy = proxy
self.msg_waiter = None
def _get_response(self, ctx, proxy, topic, data):
"""Process a curried message and cast the result to topic."""
LOG.debug(_("Running func with context: %s"), ctx.to_dict())
data.setdefault('version', None)
data.setdefault('args', {})
try:
result = proxy.dispatch(
ctx, data['version'], data['method'], **data['args'])
return ConsumerBase.normalize_reply(result, ctx.replies)
except greenlet.GreenletExit:
# ignore these since they are just from shutdowns
pass
except rpc_common.ClientException, e:
LOG.debug(_("Expected exception during message handling (%s)") %
e._exc_info[1])
return {'exc':
rpc_common.serialize_remote_exception(e._exc_info,
log_failure=False)}
except Exception:
LOG.error(_("Exception during message handling"))
return {'exc':
rpc_common.serialize_remote_exception(sys.exc_info())}
def reply(self, ctx, proxy,
msg_id=None, context=None, topic=None, msg=None):
"""Reply to a casted call."""
# Our real method is curried into msg['args']
child_ctx = RpcContext.unmarshal(msg[0])
response = ConsumerBase.normalize_reply(
self._get_response(child_ctx, proxy, topic, msg[1]),
ctx.replies)
LOG.debug(_("Sending reply"))
_multi_send(_cast, ctx, topic, {
'method': '-process_reply',
'args': {
'msg_id': msg_id, # Include for Folsom compat.
'response': response
}
}, _msg_id=msg_id)
class ConsumerBase(object):
"""Base Consumer."""
def __init__(self):
self.private_ctx = InternalContext(None)
@classmethod
def normalize_reply(self, result, replies):
#TODO(ewindisch): re-evaluate and document this method.
if isinstance(result, types.GeneratorType):
return list(result)
elif replies:
return replies
else:
return [result]
def process(self, proxy, ctx, data):
data.setdefault('version', None)
data.setdefault('args', {})
# Method starting with - are
# processed internally. (non-valid method name)
method = data.get('method')
if not method:
LOG.error(_("RPC message did not include method."))
return
# Internal method
# uses internal context for safety.
if method == '-reply':
self.private_ctx.reply(ctx, proxy, **data['args'])
return
proxy.dispatch(ctx, data['version'],
data['method'], **data['args'])
class ZmqBaseReactor(ConsumerBase):
"""
A consumer class implementing a
centralized casting broker (PULL-PUSH)
for RoundRobin requests.
"""
def __init__(self, conf):
super(ZmqBaseReactor, self).__init__()
self.mapping = {}
self.proxies = {}
self.threads = []
self.sockets = []
self.subscribe = {}
self.pool = eventlet.greenpool.GreenPool(conf.rpc_thread_pool_size)
def register(self, proxy, in_addr, zmq_type_in, out_addr=None,
zmq_type_out=None, in_bind=True, out_bind=True,
subscribe=None):
LOG.info(_("Registering reactor"))
if zmq_type_in not in (zmq.PULL, zmq.SUB):
raise RPCException("Bad input socktype")
# Items push in.
inq = ZmqSocket(in_addr, zmq_type_in, bind=in_bind,
subscribe=subscribe)
self.proxies[inq] = proxy
self.sockets.append(inq)
LOG.info(_("In reactor registered"))
if not out_addr:
return
if zmq_type_out not in (zmq.PUSH, zmq.PUB):
raise RPCException("Bad output socktype")
# Items push out.
outq = ZmqSocket(out_addr, zmq_type_out, bind=out_bind)
self.mapping[inq] = outq
self.mapping[outq] = inq
self.sockets.append(outq)
LOG.info(_("Out reactor registered"))
def consume_in_thread(self):
def _consume(sock):
LOG.info(_("Consuming socket"))
while True:
self.consume(sock)
for k in self.proxies.keys():
self.threads.append(
self.pool.spawn(_consume, k)
)
def wait(self):
for t in self.threads:
t.wait()
def close(self):
for s in self.sockets:
s.close()
for t in self.threads:
t.kill()
class ZmqProxy(ZmqBaseReactor):
"""
A consumer class implementing a
topic-based proxy, forwarding to
IPC sockets.
"""
def __init__(self, conf):
super(ZmqProxy, self).__init__(conf)
pathsep = set((os.path.sep or '', os.path.altsep or '', '/', '\\'))
self.badchars = re.compile(r'[%s]' % re.escape(''.join(pathsep)))
self.topic_proxy = {}
def consume(self, sock):
ipc_dir = CONF.rpc_zmq_ipc_dir
#TODO(ewindisch): use zero-copy (i.e. references, not copying)
data = sock.recv()
topic = data[1]
LOG.debug(_("CONSUMER GOT %s"), ' '.join(map(pformat, data)))
if topic.startswith('fanout~'):
sock_type = zmq.PUB
topic = topic.split('.', 1)[0]
elif topic.startswith('zmq_replies'):
sock_type = zmq.PUB
else:
sock_type = zmq.PUSH
if topic not in self.topic_proxy:
def publisher(waiter):
LOG.info(_("Creating proxy for topic: %s"), topic)
try:
# The topic is received over the network,
# don't trust this input.
if self.badchars.search(topic) is not None:
emsg = _("Topic contained dangerous characters.")
LOG.warn(emsg)
raise RPCException(emsg)
out_sock = ZmqSocket("ipc://%s/zmq_topic_%s" %
(ipc_dir, topic),
sock_type, bind=True)
except RPCException:
waiter.send_exception(*sys.exc_info())
return
self.topic_proxy[topic] = eventlet.queue.LightQueue(
CONF.rpc_zmq_topic_backlog)
self.sockets.append(out_sock)
# It takes some time for a pub socket to open,
# before we can have any faith in doing a send() to it.
if sock_type == zmq.PUB:
eventlet.sleep(.5)
waiter.send(True)
while(True):
data = self.topic_proxy[topic].get()
out_sock.send(data)
LOG.debug(_("ROUTER RELAY-OUT SUCCEEDED %(data)s") %
{'data': data})
wait_sock_creation = eventlet.event.Event()
eventlet.spawn(publisher, wait_sock_creation)
try:
wait_sock_creation.wait()
except RPCException:
LOG.error(_("Topic socket file creation failed."))
return
try:
self.topic_proxy[topic].put_nowait(data)
LOG.debug(_("ROUTER RELAY-OUT QUEUED %(data)s") %
{'data': data})
except eventlet.queue.Full:
LOG.error(_("Local per-topic backlog buffer full for topic "
"%(topic)s. Dropping message.") % {'topic': topic})
def consume_in_thread(self):
"""Runs the ZmqProxy service"""
ipc_dir = CONF.rpc_zmq_ipc_dir
consume_in = "tcp://%s:%s" % \
(CONF.rpc_zmq_bind_address,
CONF.rpc_zmq_port)
consumption_proxy = InternalContext(None)
if not os.path.isdir(ipc_dir):
try:
utils.execute('mkdir', '-p', ipc_dir, run_as_root=True)
utils.execute('chown', "%s:%s" % (os.getuid(), os.getgid()),
ipc_dir, run_as_root=True)
utils.execute('chmod', '750', ipc_dir, run_as_root=True)
except utils.ProcessExecutionError:
with excutils.save_and_reraise_exception():
LOG.error(_("Could not create IPC directory %s") %
(ipc_dir, ))
try:
self.register(consumption_proxy,
consume_in,
zmq.PULL,
out_bind=True)
except zmq.ZMQError:
with excutils.save_and_reraise_exception():
LOG.error(_("Could not create ZeroMQ receiver daemon. "
"Socket may already be in use."))
super(ZmqProxy, self).consume_in_thread()
def unflatten_envelope(packenv):
"""Unflattens the RPC envelope.
Takes a list and returns a dictionary.
i.e. [1,2,3,4] => {1: 2, 3: 4}
"""
i = iter(packenv)
h = {}
try:
while True:
k = i.next()
h[k] = i.next()
except StopIteration:
return h
class ZmqReactor(ZmqBaseReactor):
"""
A consumer class implementing a
consumer for messages. Can also be
used as a 1:1 proxy
"""
def __init__(self, conf):
super(ZmqReactor, self).__init__(conf)
def consume(self, sock):
#TODO(ewindisch): use zero-copy (i.e. references, not copying)
data = sock.recv()
LOG.debug(_("CONSUMER RECEIVED DATA: %s"), data)
if sock in self.mapping:
LOG.debug(_("ROUTER RELAY-OUT %(data)s") % {
'data': data})
self.mapping[sock].send(data)
return
proxy = self.proxies[sock]
if data[2] == 'cast': # Legacy protocol
packenv = data[3]
ctx, msg = _deserialize(packenv)
request = rpc_common.deserialize_msg(msg)
ctx = RpcContext.unmarshal(ctx)
elif data[2] == 'impl_zmq_v2':
packenv = data[4:]
msg = unflatten_envelope(packenv)
request = rpc_common.deserialize_msg(msg)
# Unmarshal only after verifying the message.
ctx = RpcContext.unmarshal(data[3])
else:
LOG.error(_("ZMQ Envelope version unsupported or unknown."))
return
self.pool.spawn_n(self.process, proxy, ctx, request)
class Connection(rpc_common.Connection):
"""Manages connections and threads."""
def __init__(self, conf):
self.topics = []
self.reactor = ZmqReactor(conf)
def create_consumer(self, topic, proxy, fanout=False):
# Register with matchmaker.
_get_matchmaker().register(topic, CONF.rpc_zmq_host)
# Subscription scenarios
if fanout:
sock_type = zmq.SUB
subscribe = ('', fanout)[type(fanout) == str]
topic = 'fanout~' + topic.split('.', 1)[0]
else:
sock_type = zmq.PULL
subscribe = None
topic = '.'.join((topic.split('.', 1)[0], CONF.rpc_zmq_host))
if topic in self.topics:
LOG.info(_("Skipping topic registration. Already registered."))
return
# Receive messages from (local) proxy
inaddr = "ipc://%s/zmq_topic_%s" % \
(CONF.rpc_zmq_ipc_dir, topic)
LOG.debug(_("Consumer is a zmq.%s"),
['PULL', 'SUB'][sock_type == zmq.SUB])
self.reactor.register(proxy, inaddr, sock_type,
subscribe=subscribe, in_bind=False)
self.topics.append(topic)
def close(self):
_get_matchmaker().stop_heartbeat()
for topic in self.topics:
_get_matchmaker().unregister(topic, CONF.rpc_zmq_host)
self.reactor.close()
self.topics = []
def wait(self):
self.reactor.wait()
def consume_in_thread(self):
_get_matchmaker().start_heartbeat()
self.reactor.consume_in_thread()
def _cast(addr, context, topic, msg, timeout=None, envelope=False,
_msg_id=None):
timeout_cast = timeout or CONF.rpc_cast_timeout
payload = [RpcContext.marshal(context), msg]
with Timeout(timeout_cast, exception=rpc_common.Timeout):
try:
conn = ZmqClient(addr)
# assumes cast can't return an exception
conn.cast(_msg_id, topic, payload, envelope)
except zmq.ZMQError:
raise RPCException("Cast failed. ZMQ Socket Exception")
finally:
if 'conn' in vars():
conn.close()
def _call(addr, context, topic, msg, timeout=None,
envelope=False):
# timeout_response is how long we wait for a response
timeout = timeout or CONF.rpc_response_timeout
# The msg_id is used to track replies.
msg_id = uuid.uuid4().hex
# Replies always come into the reply service.
reply_topic = "zmq_replies.%s" % CONF.rpc_zmq_host
LOG.debug(_("Creating payload"))
# Curry the original request into a reply method.
mcontext = RpcContext.marshal(context)
payload = {
'method': '-reply',
'args': {
'msg_id': msg_id,
'context': mcontext,
'topic': reply_topic,
'msg': [mcontext, msg]
}
}
LOG.debug(_("Creating queue socket for reply waiter"))
# Messages arriving async.
# TODO(ewindisch): have reply consumer with dynamic subscription mgmt
with Timeout(timeout, exception=rpc_common.Timeout):
try:
msg_waiter = ZmqSocket(
"ipc://%s/zmq_topic_zmq_replies.%s" %
(CONF.rpc_zmq_ipc_dir,
CONF.rpc_zmq_host),
zmq.SUB, subscribe=msg_id, bind=False
)
LOG.debug(_("Sending cast"))
_cast(addr, context, topic, payload, envelope)
LOG.debug(_("Cast sent; Waiting reply"))
# Blocks until receives reply
msg = msg_waiter.recv()
LOG.debug(_("Received message: %s"), msg)
LOG.debug(_("Unpacking response"))
if msg[2] == 'cast': # Legacy version
raw_msg = _deserialize(msg[-1])[-1]
elif msg[2] == 'impl_zmq_v2':
rpc_envelope = unflatten_envelope(msg[4:])
raw_msg = rpc_common.deserialize_msg(rpc_envelope)
else:
raise rpc_common.UnsupportedRpcEnvelopeVersion(
_("Unsupported or unknown ZMQ envelope returned."))
responses = raw_msg['args']['response']
# ZMQError trumps the Timeout error.
except zmq.ZMQError:
raise RPCException("ZMQ Socket Error")
except (IndexError, KeyError):
raise RPCException(_("RPC Message Invalid."))
finally:
if 'msg_waiter' in vars():
msg_waiter.close()
# It seems we don't need to do all of the following,
# but perhaps it would be useful for multicall?
# One effect of this is that we're checking all
# responses for Exceptions.
for resp in responses:
if isinstance(resp, types.DictType) and 'exc' in resp:
raise rpc_common.deserialize_remote_exception(CONF, resp['exc'])
return responses[-1]
def _multi_send(method, context, topic, msg, timeout=None,
envelope=False, _msg_id=None):
"""
Wraps the sending of messages,
dispatches to the matchmaker and sends
message to all relevant hosts.
"""
conf = CONF
LOG.debug(_("%(msg)s") % {'msg': ' '.join(map(pformat, (topic, msg)))})
queues = _get_matchmaker().queues(topic)
LOG.debug(_("Sending message(s) to: %s"), queues)
# Don't stack if we have no matchmaker results
if len(queues) == 0:
LOG.warn(_("No matchmaker results. Not casting."))
# While not strictly a timeout, callers know how to handle
# this exception and a timeout isn't too big a lie.
raise rpc_common.Timeout(_("No match from matchmaker."))
# This supports brokerless fanout (addresses > 1)
for queue in queues:
(_topic, ip_addr) = queue
_addr = "tcp://%s:%s" % (ip_addr, conf.rpc_zmq_port)
if method.__name__ == '_cast':
eventlet.spawn_n(method, _addr, context,
_topic, msg, timeout, envelope,
_msg_id)
return
return method(_addr, context, _topic, msg, timeout,
envelope)
def create_connection(conf, new=True):
return Connection(conf)
def multicall(conf, *args, **kwargs):
"""Multiple calls."""
return _multi_send(_call, *args, **kwargs)
def call(conf, *args, **kwargs):
"""Send a message, expect a response."""
data = _multi_send(_call, *args, **kwargs)
return data[-1]
def cast(conf, *args, **kwargs):
"""Send a message expecting no reply."""
_multi_send(_cast, *args, **kwargs)
def fanout_cast(conf, context, topic, msg, **kwargs):
"""Send a message to all listening and expect no reply."""
# NOTE(ewindisch): fanout~ is used because it avoid splitting on .
# and acts as a non-subtle hint to the matchmaker and ZmqProxy.
_multi_send(_cast, context, 'fanout~' + str(topic), msg, **kwargs)
def notify(conf, context, topic, msg, envelope):
"""
Send notification event.
Notifications are sent to topic-priority.
This differs from the AMQP drivers which send to topic.priority.
"""
# NOTE(ewindisch): dot-priority in rpc notifier does not
# work with our assumptions.
topic = topic.replace('.', '-')
cast(conf, context, topic, msg, envelope=envelope)
def cleanup():
"""Clean up resources in use by implementation."""
global ZMQ_CTX
if ZMQ_CTX:
ZMQ_CTX.term()
ZMQ_CTX = None
global matchmaker
matchmaker = None
def _get_ctxt():
if not zmq:
raise ImportError("Failed to import eventlet.green.zmq")
global ZMQ_CTX
if not ZMQ_CTX:
ZMQ_CTX = zmq.Context(CONF.rpc_zmq_contexts)
return ZMQ_CTX
def _get_matchmaker(*args, **kwargs):
global matchmaker
if not matchmaker:
matchmaker = importutils.import_object(
CONF.rpc_zmq_matchmaker, *args, **kwargs)
return matchmaker
|
|
from __future__ import unicode_literals
from django.contrib.auth.models import User
from djblets.webapi.testing.decorators import webapi_test_template
from reviewboard.webapi.tests.mixins_extra_data import (ExtraDataItemMixin,
ExtraDataListMixin)
class BaseCommentListMixin(object):
@webapi_test_template
def test_post_with_text_type_markdown(self):
"""Testing the POST <URL> API with text_type=markdown"""
self._test_post_with_text_type('markdown')
@webapi_test_template
def test_post_with_text_type_plain(self):
"""Testing the POST <URL> API with text_type=plain"""
self._test_post_with_text_type('plain')
def _test_post_with_text_type(self, text_type):
comment_text = '`This` is a **test**'
url, mimetype, data, objs = \
self.setup_basic_post_test(self.user, False, None, True)
data['text'] = comment_text
data['text_type'] = text_type
rsp = self.api_post(url, data, expected_mimetype=mimetype)
self.assertEqual(rsp['stat'], 'ok')
self.assertIn(self.resource.item_result_key, rsp)
comment_rsp = rsp[self.resource.item_result_key]
self.assertEqual(comment_rsp['text'], comment_text)
self.assertEqual(comment_rsp['text_type'], text_type)
comment = self.resource.model.objects.get(pk=comment_rsp['id'])
self.compare_item(comment_rsp, comment)
class BaseCommentItemMixin(object):
def compare_item(self, item_rsp, comment):
self.assertEqual(item_rsp['id'], comment.pk)
self.assertEqual(item_rsp['text'], comment.text)
if comment.rich_text:
self.assertEqual(item_rsp['rich_text'], 'markdown')
else:
self.assertEqual(item_rsp['rich_text'], 'plain')
@webapi_test_template
def test_get_with_markdown_and_force_text_type_markdown(self):
"""Testing the GET <URL> API with text_type=markdown and
?force-text-type=markdown
"""
self._test_get_with_force_text_type(
text=r'\# `This` is a **test**',
rich_text=True,
force_text_type='markdown',
expected_text=r'\# `This` is a **test**')
@webapi_test_template
def test_get_with_markdown_and_force_text_type_plain(self):
"""Testing the GET <URL> API with text_type=markdown and
?force-text-type=plain
"""
self._test_get_with_force_text_type(
text=r'\# `This` is a **test**',
rich_text=True,
force_text_type='plain',
expected_text='# `This` is a **test**')
@webapi_test_template
def test_get_with_markdown_and_force_text_type_html(self):
"""Testing the GET <URL> API with text_type=markdown and
?force-text-type=html
"""
self._test_get_with_force_text_type(
text=r'\# `This` is a **test**',
rich_text=True,
force_text_type='html',
expected_text='<p># <code>This</code> is a '
'<strong>test</strong></p>')
@webapi_test_template
def test_get_with_plain_and_force_text_type_markdown(self):
"""Testing the GET <URL> API with text_type=plain and
?force-text-type=markdown
"""
self._test_get_with_force_text_type(
text='#<`This` is a **test**>',
rich_text=False,
force_text_type='markdown',
expected_text=r'\#<\`This\` is a \*\*test\*\*>')
@webapi_test_template
def test_get_with_plain_and_force_text_type_plain(self):
"""Testing the GET <URL> API with text_type=plain and
?force-text-type=plain
"""
self._test_get_with_force_text_type(
text='#<`This` is a **test**>',
rich_text=False,
force_text_type='plain',
expected_text='#<`This` is a **test**>')
@webapi_test_template
def test_get_with_plain_and_force_text_type_html(self):
"""Testing the GET <URL> API with text_type=plain and
?force-text-type=html
"""
self._test_get_with_force_text_type(
text='#<`This` is a **test**>',
rich_text=False,
force_text_type='html',
expected_text='#<`This` is a **test**>')
@webapi_test_template
def test_put_with_text_type_markdown_and_text(self):
"""Testing the PUT <URL> API
with text_type=markdown and text specified
"""
self._test_put_with_text_type_and_text('markdown')
@webapi_test_template
def test_put_with_text_type_plain_and_text(self):
"""Testing the PUT <URL> API with text_type=plain and text specified"""
self._test_put_with_text_type_and_text('plain')
@webapi_test_template
def test_put_with_text_type_markdown_and_not_text(self):
"""Testing the PUT <URL> API
with text_type=markdown and text not specified escapes text
"""
self._test_put_with_text_type_and_not_text(
'markdown',
'`Test` **diff** comment',
r'\`Test\` \*\*diff\*\* comment')
@webapi_test_template
def test_put_with_text_type_plain_and_not_text(self):
"""Testing the PUT <URL> API
with text_type=plain and text not specified
"""
self._test_put_with_text_type_and_not_text(
'plain',
r'\`Test\` \*\*diff\*\* comment',
'`Test` **diff** comment')
@webapi_test_template
def test_put_without_text_type_and_escaping_provided_fields(self):
"""Testing the PUT <URL> API
without changing text_type and with escaping provided fields
"""
url, mimetype, data, reply_comment, objs = \
self.setup_basic_put_test(self.user, False, None, True)
reply_comment.rich_text = True
reply_comment.save()
if 'text_type' in data:
del data['text_type']
data.update({
'text': '`This` is **text**',
})
rsp = self.api_put(url, data, expected_mimetype=mimetype)
self.assertEqual(rsp['stat'], 'ok')
comment_rsp = rsp[self.resource.item_result_key]
self.assertEqual(comment_rsp['text_type'], 'markdown')
self.assertEqual(comment_rsp['text'], '\\`This\\` is \\*\\*text\\*\\*')
comment = self.resource.model.objects.get(pk=comment_rsp['id'])
self.compare_item(comment_rsp, comment)
@webapi_test_template
def test_put_with_multiple_include_text_types(self):
"""Testing the PUT <URL> API with multiple include-text-types"""
url, mimetype, data, reply_comment, objs = \
self.setup_basic_put_test(self.user, False, None, True)
data.update({
'include_text_types': 'raw,plain,markdown,html',
'text': 'Foo',
})
rsp = self.api_put(url, data, expected_mimetype=mimetype)
self.assertEqual(rsp['stat'], 'ok')
@webapi_test_template
def test_put_with_issue_verification_success(self):
"""Testing the PUT <URL> API with issue verification success"""
url, mimetype, data, comment, objs = \
self.setup_basic_put_test(self.user, False, None, True)
comment.require_verification = True
comment.save()
rsp = self.api_put(
url,
{'issue_status': 'resolved'},
expected_mimetype=mimetype)
self.assertEqual(rsp['stat'], 'ok')
@webapi_test_template
def test_put_with_issue_verification_permission_denied(self):
"""Testing the PUT <URL> API with issue verification permission denied
"""
user = User.objects.get(username='doc')
self.assertNotEqual(user, self.user)
url, mimetype, data, comment, objs = \
self.setup_basic_put_test(user, False, None, True)
comment.require_verification = True
comment.save()
rsp = self.api_put(
url,
{'issue_status': 'resolved'},
expected_status=self.not_owner_status_code)
self.assertEqual(rsp['stat'], 'fail')
self.assertEqual(rsp['err']['code'], self.not_owner_error.code)
def _test_get_with_force_text_type(self, text, rich_text,
force_text_type, expected_text):
url, mimetype, comment = \
self.setup_basic_get_test(self.user, False, None)
comment.text = text
comment.rich_text = rich_text
comment.save()
rsp = self.api_get(url + '?force-text-type=%s' % force_text_type,
expected_mimetype=mimetype)
self.assertEqual(rsp['stat'], 'ok')
self.assertIn(self.resource.item_result_key, rsp)
comment_rsp = rsp[self.resource.item_result_key]
self.assertEqual(comment_rsp['text_type'], force_text_type)
self.assertEqual(comment_rsp['text'], expected_text)
self.assertNotIn('raw_text_fields', comment_rsp)
rsp = self.api_get('%s?force-text-type=%s&include-text-types=raw'
% (url, force_text_type),
expected_mimetype=mimetype)
comment_rsp = rsp[self.resource.item_result_key]
self.assertIn('raw_text_fields', comment_rsp)
self.assertEqual(comment_rsp['raw_text_fields']['text'], text)
def _test_put_with_text_type_and_text(self, text_type):
comment_text = '`Test` **diff** comment'
url, mimetype, data, reply_comment, objs = \
self.setup_basic_put_test(self.user, False, None, True)
data['text_type'] = text_type
data['text'] = comment_text
rsp = self.api_put(url, data, expected_mimetype=mimetype)
self.assertEqual(rsp['stat'], 'ok')
self.assertIn(self.resource.item_result_key, rsp)
comment_rsp = rsp[self.resource.item_result_key]
self.assertEqual(comment_rsp['text'], comment_text)
self.assertEqual(comment_rsp['text_type'], text_type)
comment = self.resource.model.objects.get(pk=comment_rsp['id'])
self.compare_item(comment_rsp, comment)
def _test_put_with_text_type_and_not_text(self, text_type, text,
expected_text):
self.assertIn(text_type, ('markdown', 'plain'))
rich_text = (text_type == 'markdown')
url, mimetype, data, reply_comment, objs = \
self.setup_basic_put_test(self.user, False, None, True)
reply_comment.text = text
reply_comment.rich_text = not rich_text
reply_comment.save()
data['text_type'] = text_type
if 'text' in data:
del data['text']
rsp = self.api_put(url, data, expected_mimetype=mimetype)
self.assertEqual(rsp['stat'], 'ok')
self.assertIn(self.resource.item_result_key, rsp)
comment_rsp = rsp[self.resource.item_result_key]
self.assertEqual(comment_rsp['text'], expected_text)
self.assertEqual(comment_rsp['text_type'], text_type)
comment = self.resource.model.objects.get(pk=comment_rsp['id'])
self.compare_item(comment_rsp, comment)
class CommentListMixin(ExtraDataListMixin, BaseCommentListMixin):
pass
class CommentItemMixin(ExtraDataItemMixin, BaseCommentItemMixin):
pass
class CommentReplyListMixin(BaseCommentListMixin):
pass
class CommentReplyItemMixin(BaseCommentItemMixin):
pass
|
|
from __future__ import absolute_import
from random import randint
from datetime import datetime, timedelta
from .error import SpiderMisuseError
from ..base import copy_config
class BaseTask(object):
pass
class Task(BaseTask):
"""
Task for spider.
"""
def __init__(self, name='initial', url=None, grab=None, grab_config=None,
priority=None, priority_is_custom=True,
network_try_count=0, task_try_count=0,
disable_cache=False, refresh_cache=False,
valid_status=[], use_proxylist=True,
cache_timeout=None, delay=0,
raw=False, callback=None,
**kwargs):
"""
Create `Task` object.
If more than one of url, grab and grab_config options are non-empty then they
processed in following order:
* grab overwrite grab_config
* grab_config overwrite url
Args:
:param name: name of the task. After successfull network operation
task's result will be passed to `task_<name>` method.
:param url: URL of network document. Any task requires `url` or `grab`
option to be specified.
:param grab: configured `Grab` instance. You can use that option in case
when `url` option is not enough. Do not forget to configure `url` option
of `Grab` instance because in this case the `url` option of `Task`
constructor will be overwritten with `grab.config['url']`.
:param priority: - priority of the Task. Tasks with lower priority will be
processed earlier. By default each new task is assigned with random
priority from (80, 100) range.
:param priotiy_is_custom: - internal flag which tells if that task priority was
assigned manually or generated by spider according to pririty generation rules.
:param network_try_count: you'll probably will not need to use it. It is used
internally to control how many times this task was restarted due to network
errors. The `Spider` instance has `network_try_limit` option. When
`network_try_count` attribut of the task exceeds the `network_try_limit`
attribut then processing of the task is abandoned.
:param task_try_count: the as `network_try_count` but it increased only then you
use `clone` method. Also you can set it manually. It is usefull if you want
to restart the task after it was cacelled due to multiple network errors.
As you might guessed there is `task_try_limit` option in `Spider` instance.
Both options `network_try_count` and `network_try_limit` guarantee you that
you'll not get infinite loop of restarting some task.
:param disable_cache: if `True` disable cache subsystem. The document will be
fetched from the Network and it will not be saved to cache.
:param refresh_cache: if `True` the document will be fetched from the Network
and saved to cache.
:param valid_status: extra status codes which counts as valid
:param use_proxylist: it means to use proxylist which was configured
via `setup_proxylist` method of spider
:param cache_timeout: maximum age (in seconds) of cache record to be valid
:param delay: if specified tells the spider to schedule the task and execute
it after `delay` seconds
:param raw: if `raw` is True then the network response is forwarding to the
corresponding handler without any check of HTTP status code of network error,
if `raw` is False (by default) then failed response is putting back
to task queue or if tries limit is reached then the processing of this
request is finished.
:param callback: if you pass some function in `callback` option then the
network resposne will be passed to this callback and the usual 'task_*'
handler will be ignored and no error will be raised if such 'task_*' handler
does not exist.
Any non-standard named arguments passed to `Task` constructor will be saved as
attributes of the object. You can get their values later as attributes or with
`get` method which allows to use default value if attrubute does not exist.
"""
if name == 'generator':
# The name "generator" is restricted because
# `task_generator` handler could not be created because
# this name is already used for special method which
# generates new tasks
raise SpiderMisuseError('Task name could not be "generator"')
self.name = name
if url is None and grab is None and grab_config is None:
raise SpiderMisuseError('Either url, grab or grab_config argument of Task constructor should not be None')
if url is not None and grab is not None:
raise SpiderMisuseError('Options url and grab could not be used together')
if url is not None and grab_config is not None:
raise SpiderMisuseError('Options url and grab_config could not be used together')
if grab is not None and grab_config is not None:
raise SpiderMisuseError('Options grab and grab_config could not be used together')
if grab:
self.setup_grab_config(grab.dump_config())
elif grab_config:
self.setup_grab_config(grab_config)
else:
self.grab_config = None
self.url = url
self.process_delay_option(delay)
self.priority_is_custom = priority_is_custom
self.priority = priority
self.network_try_count = network_try_count
self.task_try_count = task_try_count
self.disable_cache = disable_cache
self.refresh_cache = refresh_cache
self.valid_status = valid_status
self.use_proxylist = use_proxylist
self.cache_timeout = cache_timeout
self.raw = raw
self.callback = callback
for key, value in kwargs.items():
setattr(self, key, value)
def get(self, key, default=None):
"""
Return value of attribute or None if such attribute
does not exist.
"""
return getattr(self, key, default)
def process_delay_option(self, delay):
if delay:
self.schedule_time = datetime.now() + timedelta(seconds=delay)
self.original_delay = delay
else:
self.schedule_time = None
self.original_delay = None
def setup_grab_config(self, grab_config):
self.grab_config = copy_config(grab_config)
self.url = grab_config['url']
def clone(self, **kwargs):
"""
Clone Task instance.
Reset network_try_count, increase task_try_count.
"""
# First, create exact copy of the current Task object
attr_copy = self.__dict__.copy()
if attr_copy.get('grab_config') is not None:
del attr_copy['url']
task = Task(**attr_copy)
# Reset some task properties if the have not
# been set explicitly in kwargs
if not 'network_try_count' in kwargs:
task.network_try_count = 0
if not 'task_try_count' in kwargs:
task.task_try_count = self.task_try_count + 1
if not 'refresh_cache' in kwargs:
task.refresh_cache = False
if not 'disable_cache' in kwargs:
task.disable_cache = False
if kwargs.get('url') is not None and kwargs.get('grab') is not None:
raise SpiderMisuseError('Options url and grab could not be used together')
if kwargs.get('url') is not None and kwargs.get('grab_config') is not None:
raise SpiderMisuseError('Options url and grab_config could not be used together')
if kwargs.get('grab') is not None and kwargs.get('grab_config') is not None:
raise SpiderMisuseError('Options grab and grab_config could not be used together')
if kwargs.get('grab'):
task.setup_grab_config(kwargs['grab'].dump_config())
del kwargs['grab']
elif kwargs.get('grab_config'):
task.setup_grab_config(kwargs['grab_config'])
del kwargs['grab_config']
elif kwargs.get('url'):
task.url = kwargs['url']
if task.grab_config:
task.grab_config['url'] = kwargs['url']
del kwargs['url']
for key, value in kwargs.items():
setattr(task, key, value)
task.process_delay_option(task.get('delay', None))
return task
def __repr__(self):
return '<Task: %s>' % self.url
def __lt__(self, other):
if self.priority and other.priority:
return (self.priority < other.priority)
else:
return False
def __eq__(self, other):
return (self.priority == other.priority)
class NullTask(BaseTask):
def __init__(self, name='initial', sleep=0, priority=None,
priority_is_custom=True, network_try_count=0,
task_try_count=0):
self.name = name
self.sleep = sleep
self.priority = None
self.priority_is_custom = False
self.network_try_count = network_try_count
self.task_try_count = task_try_count
|
|
'''
/*
* Copyright 2010-2016 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
*/
'''
import sys
import getopt
import time
import json
import glob
from AWSIoTPythonSDK.MQTTLib import AWSIoTMQTTShadowClient
# Tkinter
try:
import tkinter # Python 3.x
except ImportError:
import Tkinter as tkinter
# Class that defines and manages callback used in this app
class ThermoSimAppCallbackPool:
def __init__(self, srcTkRoot, srcReportedDataDisplayBox, srcDeviceShadowHandler, srcReportedDataVariable, srcDesiredDataVariable):
self._tkRootHandler = srcTkRoot
self._reportedDataDisplayBox = srcReportedDataDisplayBox
self._reportedDataVariableHandler = srcReportedDataVariable
self._desiredDataVariableHandler = srcDesiredDataVariable
self._deviceShadowHandler = srcDeviceShadowHandler
self._reportedTemperatureDataFromNetwork = "XX.X"
def buttonCallback(self, srcSetTemperatureInputBox, srcDesiredDataVariable):
desiredData = None
try:
desiredData = "{:.1f}".format((float)(srcSetTemperatureInputBox.get()))
if float(desiredData) >= 100.0:
print("Cannot set temperature higher than 100 F.")
elif float(desiredData) <= -100.0:
print("Cannot set temperature lower than -100 F.")
else:
JSONString = '{"state":{"desired":{"Temp":' + str(desiredData) + '}}}'
srcDesiredDataVariable.set(str(desiredData) + " F")
self._deviceShadowHandler.shadowUpdate(JSONString, None, 5)
except ValueError:
print("Setting desired temperature: Invalid temperature value!")
except Exception as e:
print(e.message)
def shadowGetCallback(self, payload, responseStatus, token):
print(payload)
print("---------------")
print(responseStatus)
print("\n\n")
if responseStatus == "accepted":
try:
JSONResponseDictionary = json.loads(payload)
self._reportedTemperatureDataFromNetwork = JSONResponseDictionary[u"state"][u"reported"][u"Temp"]
except:
print("Invalid JSON or missing attribute")
def sendShadowGetForReportedTemperature(self, event=None):
try:
self._deviceShadowHandler.shadowGet(self.shadowGetCallback, 5)
except Exception as e:
print(e.message)
self._tkRootHandler.after(500, self.sendShadowGetForReportedTemperature)
def updateReportedTemperatureDataVariable(self, event=None):
# Also update the color
currentDesiredData = self._desiredDataVariableHandler.get()[:4]
if currentDesiredData != "XX.X":
if self._reportedTemperatureDataFromNetwork > float(currentDesiredData):
self._reportedDataDisplayBox.config(fg="blue")
elif self._reportedTemperatureDataFromNetwork < float(currentDesiredData):
self._reportedDataDisplayBox.config(fg="red")
else:
self._reportedDataDisplayBox.config(fg="black")
self._reportedDataVariableHandler.set(str(self._reportedTemperatureDataFromNetwork) + " F")
self._tkRootHandler.after(500, self.updateReportedTemperatureDataVariable)
# Class that generates the GUI and starts the application
class ThermoSimAppGUI:
_usage = """Usage:
Make sure that you put all your credentials under: ./certs/
with the following naming conventions:
Root CA file: *CA.crt
Certificate file (not required if using MQTT over WebSocket): *.pem.crt
Private key file (not required if using MQTT over WebSocket): *.pem.key
Use X.509 certificate based mutual authentication:
python ThermostatSimulatorApp -e <endpoint>
Use MQTT over WebSocket:
python ThermostatSimulatorApp -e <endpoint> -w
Type "python ThermostatSimulatorApp -h" for detailed command line options.
"""
_helpInfo = """Available command line options:
-e, --endpoint: Your custom AWS IoT custom endpoint
-w, --websocket: Use MQTT over websocket
-h, --help: Help infomation
"""
def __init__(self):
# Init data members
# Connection related
self._endpoint = ""
self._rootCAFilePathList = ""
self._certificateFilePathList = ""
self._privateKeyFilePathList = ""
self._useWebsocket = False
self._AWSIoTMQTTShadowClient = None
self._thermostatSimulatorShadowHandler = None
# GUI related
self._tkRootHandler = tkinter.Tk()
self._reportedDataVariable = None
self._reportedDataDisplayBox = None
self._desiredDataVariable = None
self._desiredDataDisplayBox = None
self._setTemperatureInputBox = None
self._setTemperatureButton = None
# Check command line inputs
if not self._checkInputs():
raise ValueError("Malformed/Missing command line inputs.")
# Create and configure AWSIoTMQTTShadowClient
self._AWSIoTMQTTShadowClient = AWSIoTMQTTShadowClient("ThermostatSimulatorApp", useWebsocket=self._useWebsocket)
if self._useWebsocket:
self._AWSIoTMQTTShadowClient.configureEndpoint(self._endpoint, 443)
self._AWSIoTMQTTShadowClient.configureCredentials(self._rootCAFilePathList[0])
else:
self._AWSIoTMQTTShadowClient.configureEndpoint(self._endpoint, 8883)
self._AWSIoTMQTTShadowClient.configureCredentials(self._rootCAFilePathList[0], self._privateKeyFilePathList[0], self._certificateFilePathList[0])
self._AWSIoTMQTTShadowClient.configureAutoReconnectBackoffTime(1, 128, 20)
self._AWSIoTMQTTShadowClient.configureConnectDisconnectTimeout(10)
self._AWSIoTMQTTShadowClient.configureMQTTOperationTimeout(5)
# Set keepAlive interval to be 1 second and connect
# Raise exception if there is an error in connecting to AWS IoT
self._AWSIoTMQTTShadowClient.connect(5)
self._thermostatSimulatorShadowHandler = self._AWSIoTMQTTShadowClient.createShadowHandlerWithName("room", True)
# Generate GUI
self._packModule()
# Validate command line inputs
# Return False there is any malformed inputs
# Return True if all the necessary inputs have been discovered
def _checkInputs(self):
gotEoughInputs = True
# Check command line inputs
try:
opts, args = getopt.getopt(sys.argv[1:], "hwe:", ["endpoint=", "websocket", "help"])
if len(opts) == 0:
raise getopt.GetoptError("No input parameters")
for opt, arg in opts:
if opt in ("-e", "--endpoint"):
self._endpoint = arg
if opt in ("-w", "--websocket"):
self._useWebsocket = True
if opt in ("-h", "--help"):
print(self._helpInfo)
gotEoughInputs = False
except getopt.GetoptError:
print(self._usage)
gotEoughInputs = False
# Check credential files
if gotEoughInputs:
self._rootCAFilePathList = glob.glob("./certs/*CA.crt")
if self._useWebsocket:
gotEoughInputs = gotEoughInputs and len(self._rootCAFilePathList) != 0
if not gotEoughInputs:
print("Missing rootCA in ./certs/")
else:
self._certificateFilePathList = glob.glob("./certs/*.pem.crt")
self._privateKeyFilePathList = glob.glob("./certs/*.pem.key")
gotEoughInputs = gotEoughInputs and len(self._rootCAFilePathList) != 0 and len(self._certificateFilePathList) != 0 and len(self._privateKeyFilePathList) != 0
if not gotEoughInputs:
print("Missing rootCA, certificate or private key in ./certs/")
return gotEoughInputs
def _packModule(self):
self._tkRootHandler.title("ThermostatSimulatorApp")
self._tkRootHandler.geometry("500x250")
self._tkRootHandler.resizable(width=False, height=False)
# Pack all frames
baseFrame = tkinter.Frame(self._tkRootHandler)
temperatureFrame = tkinter.Frame(baseFrame)
temperatureFrame.pack(side="top")
controlPanelFrame = tkinter.Frame(baseFrame)
controlPanelFrame.pack(side="bottom")
baseFrame.pack()
# Pack all modules for temperature frame
self._reportedDataVariable = tkinter.StringVar()
self._reportedDataVariable.set("XX.X F")
reportedDataTag = tkinter.Label(temperatureFrame, text="Reported Temperature:", justify="left")
self._reportedDataDisplayBox = tkinter.Label(temperatureFrame, textvariable=self._reportedDataVariable, font=("Arial", 55), justify="left")
#
self._desiredDataVariable = tkinter.StringVar()
self._desiredDataVariable.set("XX.X F")
desiredDataTag = tkinter.Label(temperatureFrame, text="Desired Temperature:", justify="left")
self._desiredDataDisplayBox = tkinter.Label(temperatureFrame, textvariable=self._desiredDataVariable, font=("Arial", 55), justify="left")
#
reportedDataTag.pack()
self._reportedDataDisplayBox.pack()
desiredDataTag.pack()
self._desiredDataDisplayBox.pack()
# Create a callback pool
self._callbackPoolHandler = ThermoSimAppCallbackPool(self._tkRootHandler, self._reportedDataDisplayBox, self._thermostatSimulatorShadowHandler, self._reportedDataVariable, self._desiredDataVariable)
# Pack all modules for control panel frame
self._setTemperatureInputBox = tkinter.Entry(controlPanelFrame)
self._setTemperatureInputBox.pack(sid="left")
self._setTemperatureButton = tkinter.Button(controlPanelFrame, text="SET", command=lambda: self._callbackPoolHandler.buttonCallback(self._setTemperatureInputBox, self._desiredDataVariable))
self._setTemperatureButton.pack()
def runApp(self):
# Start and run the app
self._tkRootHandler.after(500, self._callbackPoolHandler.sendShadowGetForReportedTemperature) # per 500ms
self._tkRootHandler.after(500, self._callbackPoolHandler.updateReportedTemperatureDataVariable) # per 500ms
self._tkRootHandler.mainloop()
# Main
if __name__ == '__main__':
# Start the app
try:
thisThermoSimAppGUI = ThermoSimAppGUI()
thisThermoSimAppGUI.runApp()
except ValueError:
print("Terminated.")
except KeyboardInterrupt:
print("Terminated.")
|
|
# -*- coding: utf-8 -*-
"""
Covenant Add-on
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
try:
from sqlite3 import dbapi2 as database
except:
from pysqlite2 import dbapi2 as database
import datetime
import json
import os
import re
import sys
import urllib
import urlparse
import xbmc
from resources.lib.modules import control
from resources.lib.modules import cleantitle
class lib_tools:
@staticmethod
def create_folder(folder):
try:
folder = xbmc.makeLegalFilename(folder)
control.makeFile(folder)
try:
if not 'ftp://' in folder: raise Exception()
from ftplib import FTP
ftparg = re.compile('ftp://(.+?):(.+?)@(.+?):?(\d+)?/(.+/?)').findall(folder)
ftp = FTP(ftparg[0][2], ftparg[0][0], ftparg[0][1])
try:
ftp.cwd(ftparg[0][4])
except:
ftp.mkd(ftparg[0][4])
ftp.quit()
except:
pass
except:
pass
@staticmethod
def write_file(path, content):
try:
path = xbmc.makeLegalFilename(path)
if not isinstance(content, basestring):
content = str(content)
file = control.openFile(path, 'w')
file.write(str(content))
file.close()
except Exception as e:
pass
@staticmethod
def nfo_url(media_string, ids):
tvdb_url = 'http://thetvdb.com/?tab=series&id=%s'
tmdb_url = 'https://www.themoviedb.org/%s/%s'
imdb_url = 'http://www.imdb.com/title/%s/'
if 'tvdb' in ids:
return tvdb_url % (str(ids['tvdb']))
elif 'tmdb' in ids:
return tmdb_url % (media_string, str(ids['tmdb']))
elif 'imdb' in ids:
return imdb_url % (str(ids['imdb']))
else:
return ''
@staticmethod
def check_sources(title, year, imdb, tvdb=None, season=None, episode=None, tvshowtitle=None, premiered=None):
try:
from resources.lib.modules import sources
src = sources.sources().getSources(title, year, imdb, tvdb, season, episode, tvshowtitle, premiered)
return src and len(src) > 5
except:
return False
@staticmethod
def legal_filename(filename):
try:
filename = filename.strip()
filename = re.sub(r'(?!%s)[^\w\-_\.]', '.', filename)
filename = re.sub('\.+', '.', filename)
filename = re.sub(re.compile('(CON|PRN|AUX|NUL|COM\d|LPT\d)\.', re.I), '\\1_', filename)
xbmc.makeLegalFilename(filename)
return filename
except:
return filename
@staticmethod
def make_path(base_path, title, year='', season=''):
show_folder = re.sub(r'[^\w\-_\. ]', '_', title)
show_folder = '%s (%s)' % (show_folder, year) if year else show_folder
path = os.path.join(base_path, show_folder)
if season:
path = os.path.join(path, 'Season %s' % season)
return path
class libmovies:
def __init__(self):
self.library_folder = os.path.join(control.transPath(control.setting('library.movie')), '')
self.check_setting = control.setting('library.check_movie') or 'false'
self.library_setting = control.setting('library.update') or 'true'
self.dupe_setting = control.setting('library.check') or 'true'
self.infoDialog = False
def add(self, name, title, year, imdb, tmdb, range=False):
if not control.condVisibility('Window.IsVisible(infodialog)') and not control.condVisibility('Player.HasVideo'):
control.infoDialog(control.lang(32552).encode('utf-8'), time=10000000)
self.infoDialog = True
try:
if not self.dupe_setting == 'true': raise Exception()
id = [imdb, tmdb] if not tmdb == '0' else [imdb]
lib = control.jsonrpc('{"jsonrpc": "2.0", "method": "VideoLibrary.GetMovies", "params": {"filter":{"or": [{"field": "year", "operator": "is", "value": "%s"}, {"field": "year", "operator": "is", "value": "%s"}, {"field": "year", "operator": "is", "value": "%s"}]}, "properties" : ["imdbnumber", "originaltitle", "year"]}, "id": 1}' % (year, str(int(year)+1), str(int(year)-1)))
lib = unicode(lib, 'utf-8', errors='ignore')
lib = json.loads(lib)['result']['movies']
lib = [i for i in lib if str(i['imdbnumber']) in id or (i['originaltitle'].encode('utf-8') == title and str(i['year']) == year)][0]
except:
lib = []
files_added = 0
try:
if not lib == []: raise Exception()
if self.check_setting == 'true':
src = lib_tools.check_sources(title, year, imdb, None, None, None, None, None)
if not src: raise Exception()
self.strmFile({'name': name, 'title': title, 'year': year, 'imdb': imdb, 'tmdb': tmdb})
files_added += 1
except:
pass
if range == True: return
if self.infoDialog == True:
control.infoDialog(control.lang(32554).encode('utf-8'), time=1)
if self.library_setting == 'true' and not control.condVisibility('Library.IsScanningVideo') and files_added > 0:
control.execute('UpdateLibrary(video)')
def range(self, url):
control.idle()
yes = control.yesnoDialog(control.lang(32555).encode('utf-8'), '', '')
if not yes: return
if not control.condVisibility('Window.IsVisible(infodialog)') and not control.condVisibility('Player.HasVideo'):
control.infoDialog(control.lang(32552).encode('utf-8'), time=10000000)
self.infoDialog = True
from resources.lib.indexers import movies
items = movies.movies().get(url, idx=False)
if items == None: items = []
for i in items:
try:
if xbmc.abortRequested == True: return sys.exit()
self.add('%s (%s)' % (i['title'], i['year']), i['title'], i['year'], i['imdb'], i['tmdb'], range=True)
except:
pass
if self.infoDialog == True:
control.infoDialog(control.lang(32554).encode('utf-8'), time=1)
if self.library_setting == 'true' and not control.condVisibility('Library.IsScanningVideo'):
control.execute('UpdateLibrary(video)')
def strmFile(self, i):
try:
name, title, year, imdb, tmdb = i['name'], i['title'], i['year'], i['imdb'], i['tmdb']
sysname, systitle = urllib.quote_plus(name), urllib.quote_plus(title)
transtitle = cleantitle.normalize(title.translate(None, '\/:*?"<>|'))
content = '%s?action=play&name=%s&title=%s&year=%s&imdb=%s&tmdb=%s' % (sys.argv[0], sysname, systitle, year, imdb, tmdb)
folder = lib_tools.make_path(self.library_folder, transtitle, year)
lib_tools.create_folder(folder)
lib_tools.write_file(os.path.join(folder, lib_tools.legal_filename(transtitle) + '.strm'), content)
lib_tools.write_file(os.path.join(folder, 'movie.nfo'), lib_tools.nfo_url('movie', i))
except:
pass
class libtvshows:
def __init__(self):
self.library_folder = os.path.join(control.transPath(control.setting('library.tv')),'')
self.version = control.version()
self.check_setting = control.setting('library.check_episode') or 'false'
self.include_unknown = control.setting('library.include_unknown') or 'true'
self.library_setting = control.setting('library.update') or 'true'
self.dupe_setting = control.setting('library.check') or 'true'
self.datetime = (datetime.datetime.utcnow() - datetime.timedelta(hours = 5))
self.date = (self.datetime - datetime.timedelta(hours = 24)).strftime('%Y%m%d')
self.infoDialog = False
self.block = False
def add(self, tvshowtitle, year, imdb, tvdb, range=False):
if not control.condVisibility('Window.IsVisible(infodialog)') and not control.condVisibility('Player.HasVideo'):
control.infoDialog(control.lang(32552).encode('utf-8'), time=10000000)
self.infoDialog = True
from resources.lib.indexers import episodes
items = episodes.episodes().get(tvshowtitle, year, imdb, tvdb, idx=False)
try: items = [{'title': i['title'], 'year': i['year'], 'imdb': i['imdb'], 'tvdb': i['tvdb'], 'season': i['season'], 'episode': i['episode'], 'tvshowtitle': i['tvshowtitle'], 'premiered': i['premiered']} for i in items]
except: items = []
try:
if not self.dupe_setting == 'true': raise Exception()
if items == []: raise Exception()
id = [items[0]['imdb'], items[0]['tvdb']]
lib = control.jsonrpc('{"jsonrpc": "2.0", "method": "VideoLibrary.GetTVShows", "params": {"properties" : ["imdbnumber", "title", "year"]}, "id": 1}')
lib = unicode(lib, 'utf-8', errors='ignore')
lib = json.loads(lib)['result']['tvshows']
lib = [i['title'].encode('utf-8') for i in lib if str(i['imdbnumber']) in id or (i['title'].encode('utf-8') == items[0]['tvshowtitle'] and str(i['year']) == items[0]['year'])][0]
lib = control.jsonrpc('{"jsonrpc": "2.0", "method": "VideoLibrary.GetEpisodes", "params": {"filter":{"and": [{"field": "tvshow", "operator": "is", "value": "%s"}]}, "properties": ["season", "episode"]}, "id": 1}' % lib)
lib = unicode(lib, 'utf-8', errors='ignore')
lib = json.loads(lib)['result']['episodes']
lib = ['S%02dE%02d' % (int(i['season']), int(i['episode'])) for i in lib]
items = [i for i in items if not 'S%02dE%02d' % (int(i['season']), int(i['episode'])) in lib]
except:
pass
files_added = 0
for i in items:
try:
if xbmc.abortRequested == True: return sys.exit()
if self.check_setting == 'true':
if i['episode'] == '1':
self.block = True
src = lib_tools.check_sources(i['title'], i['year'], i['imdb'], i['tvdb'], i['season'], i['episode'], i['tvshowtitle'], i['premiered'])
if src: self.block = False
if self.block == True: raise Exception()
premiered = i.get('premiered', '0')
if (premiered != '0' and int(re.sub('[^0-9]', '', str(premiered))) > int(self.date)) or (premiered == '0' and not self.include_unknown):
continue
self.strmFile(i)
files_added += 1
except:
pass
if range == True: return
if self.infoDialog == True:
control.infoDialog(control.lang(32554).encode('utf-8'), time=1)
if self.library_setting == 'true' and not control.condVisibility('Library.IsScanningVideo') and files_added > 0:
control.execute('UpdateLibrary(video)')
def range(self, url):
control.idle()
yes = control.yesnoDialog(control.lang(32555).encode('utf-8'), '', '')
if not yes: return
if not control.condVisibility('Window.IsVisible(infodialog)') and not control.condVisibility('Player.HasVideo'):
control.infoDialog(control.lang(32552).encode('utf-8'), time=10000000)
self.infoDialog = True
from resources.lib.indexers import tvshows
items = tvshows.tvshows().get(url, idx=False)
if items == None: items = []
for i in items:
try:
if xbmc.abortRequested == True: return sys.exit()
self.add(i['title'], i['year'], i['imdb'], i['tvdb'], range=True)
except:
pass
if self.infoDialog == True:
control.infoDialog(control.lang(32554).encode('utf-8'), time=1)
if self.library_setting == 'true' and not control.condVisibility('Library.IsScanningVideo'):
control.execute('UpdateLibrary(video)')
def strmFile(self, i):
try:
title, year, imdb, tvdb, season, episode, tvshowtitle, premiered = i['title'], i['year'], i['imdb'], i['tvdb'], i['season'], i['episode'], i['tvshowtitle'], i['premiered']
episodetitle = urllib.quote_plus(title)
systitle, syspremiered = urllib.quote_plus(tvshowtitle), urllib.quote_plus(premiered)
transtitle = cleantitle.normalize(tvshowtitle.translate(None, '\/:*?"<>|'))
content = '%s?action=play&title=%s&year=%s&imdb=%s&tvdb=%s&season=%s&episode=%s&tvshowtitle=%s&date=%s' % (sys.argv[0], episodetitle, year, imdb, tvdb, season, episode, systitle, syspremiered)
folder = lib_tools.make_path(self.library_folder, transtitle, year)
if not os.path.isfile(os.path.join(folder, 'tvshow.nfo')):
lib_tools.create_folder(folder)
lib_tools.write_file(os.path.join(folder, 'tvshow.nfo'), lib_tools.nfo_url('tv', i))
folder = lib_tools.make_path(self.library_folder, transtitle, year, season)
lib_tools.create_folder(folder)
lib_tools.write_file(os.path.join(folder, lib_tools.legal_filename('%s S%02dE%02d' % (transtitle, int(season), int(episode))) + '.strm'), content)
except:
pass
class libepisodes:
def __init__(self):
self.library_folder = os.path.join(control.transPath(control.setting('library.tv')),'')
self.library_setting = control.setting('library.update') or 'true'
self.include_unknown = control.setting('library.include_unknown') or 'true'
self.property = '%s_service_property' % control.addonInfo('name').lower()
self.datetime = (datetime.datetime.utcnow() - datetime.timedelta(hours = 5))
self.date = (self.datetime - datetime.timedelta(hours = 24)).strftime('%Y%m%d')
self.infoDialog = False
def update(self, query=None, info='true'):
if not query == None: control.idle()
try:
items = []
season, episode = [], []
show = [os.path.join(self.library_folder, i) for i in control.listDir(self.library_folder)[0]]
for s in show:
try: season += [os.path.join(s, i) for i in control.listDir(s)[0]]
except: pass
for s in season:
try: episode.append([os.path.join(s, i) for i in control.listDir(s)[1] if i.endswith('.strm')][-1])
except: pass
for file in episode:
try:
file = control.openFile(file)
read = file.read()
read = read.encode('utf-8')
file.close()
if not read.startswith(sys.argv[0]): raise Exception()
params = dict(urlparse.parse_qsl(read.replace('?','')))
try: tvshowtitle = params['tvshowtitle']
except: tvshowtitle = None
try: tvshowtitle = params['show']
except: pass
if tvshowtitle == None or tvshowtitle == '': raise Exception()
year, imdb, tvdb = params['year'], params['imdb'], params['tvdb']
imdb = 'tt' + re.sub('[^0-9]', '', str(imdb))
try: tmdb = params['tmdb']
except: tmdb = '0'
items.append({'tvshowtitle': tvshowtitle, 'year': year, 'imdb': imdb, 'tmdb': tmdb, 'tvdb': tvdb})
except:
pass
items = [i for x, i in enumerate(items) if i not in items[x + 1:]]
if len(items) == 0: raise Exception()
except:
return
try:
lib = control.jsonrpc('{"jsonrpc": "2.0", "method": "VideoLibrary.GetTVShows", "params": {"properties" : ["imdbnumber", "title", "year"]}, "id": 1}')
lib = unicode(lib, 'utf-8', errors='ignore')
lib = json.loads(lib)['result']['tvshows']
except:
return
if info == 'true' and not control.condVisibility('Window.IsVisible(infodialog)') and not control.condVisibility('Player.HasVideo'):
control.infoDialog(control.lang(32553).encode('utf-8'), time=10000000)
self.infoDialog = True
try:
control.makeFile(control.dataPath)
dbcon = database.connect(control.libcacheFile)
dbcur = dbcon.cursor()
dbcur.execute("CREATE TABLE IF NOT EXISTS tvshows (""id TEXT, ""items TEXT, ""UNIQUE(id)"");")
except:
return
try:
from resources.lib.indexers import episodes
except:
return
files_added = 0
# __init__ doesn't get called from services so self.date never gets updated and new episodes are not added to the library
self.datetime = (datetime.datetime.utcnow() - datetime.timedelta(hours = 5))
self.date = (self.datetime - datetime.timedelta(hours = 24)).strftime('%Y%m%d')
for item in items:
it = None
if xbmc.abortRequested == True: return sys.exit()
try:
dbcur.execute("SELECT * FROM tvshows WHERE id = '%s'" % item['tvdb'])
fetch = dbcur.fetchone()
it = eval(fetch[1].encode('utf-8'))
except:
pass
try:
if not it == None: raise Exception()
it = episodes.episodes().get(item['tvshowtitle'], item['year'], item['imdb'], item['tvdb'], idx=False)
status = it[0]['status'].lower()
it = [{'title': i['title'], 'year': i['year'], 'imdb': i['imdb'], 'tvdb': i['tvdb'], 'season': i['season'], 'episode': i['episode'], 'tvshowtitle': i['tvshowtitle'], 'premiered': i['premiered']} for i in it]
if status == 'continuing': raise Exception()
dbcur.execute("INSERT INTO tvshows Values (?, ?)", (item['tvdb'], repr(it)))
dbcon.commit()
except:
pass
try:
id = [item['imdb'], item['tvdb']]
if not item['tmdb'] == '0': id += [item['tmdb']]
ep = [x['title'].encode('utf-8') for x in lib if str(x['imdbnumber']) in id or (x['title'].encode('utf-8') == item['tvshowtitle'] and str(x['year']) == item['year'])][0]
ep = control.jsonrpc('{"jsonrpc": "2.0", "method": "VideoLibrary.GetEpisodes", "params": {"filter":{"and": [{"field": "tvshow", "operator": "is", "value": "%s"}]}, "properties": ["season", "episode"]}, "id": 1}' % ep)
ep = unicode(ep, 'utf-8', errors='ignore')
ep = json.loads(ep).get('result', {}).get('episodes', {})
ep = [{'season': int(i['season']), 'episode': int(i['episode'])} for i in ep]
ep = sorted(ep, key=lambda x: (x['season'], x['episode']))[-1]
num = [x for x,y in enumerate(it) if str(y['season']) == str(ep['season']) and str(y['episode']) == str(ep['episode'])][-1]
it = [y for x,y in enumerate(it) if x > num]
if len(it) == 0: continue
except:
continue
for i in it:
try:
if xbmc.abortRequested == True: return sys.exit()
premiered = i.get('premiered', '0')
if (premiered != '0' and int(re.sub('[^0-9]', '', str(premiered))) > int(self.date)) or (premiered == '0' and not self.include_unknown):
continue
libtvshows().strmFile(i)
files_added += 1
except:
pass
if self.infoDialog == True:
control.infoDialog(control.lang(32554).encode('utf-8'), time=1)
if self.library_setting == 'true' and not control.condVisibility('Library.IsScanningVideo') and files_added > 0:
control.execute('UpdateLibrary(video)')
def service(self):
try:
lib_tools.create_folder(os.path.join(control.transPath(control.setting('library.movie')), ''))
lib_tools.create_folder(os.path.join(control.transPath(control.setting('library.tv')), ''))
except:
pass
try:
control.makeFile(control.dataPath)
dbcon = database.connect(control.libcacheFile)
dbcur = dbcon.cursor()
dbcur.execute("CREATE TABLE IF NOT EXISTS service (""setting TEXT, ""value TEXT, ""UNIQUE(setting)"");")
dbcur.execute("SELECT * FROM service WHERE setting = 'last_run'")
fetch = dbcur.fetchone()
if fetch == None:
serviceProperty = "1970-01-01 23:59:00.000000"
dbcur.execute("INSERT INTO service Values (?, ?)", ('last_run', serviceProperty))
dbcon.commit()
else:
serviceProperty = str(fetch[1])
dbcon.close()
except:
try: return dbcon.close()
except: return
try: control.window.setProperty(self.property, serviceProperty)
except: return
while not xbmc.abortRequested:
try:
serviceProperty = control.window.getProperty(self.property)
t1 = datetime.timedelta(hours=6)
t2 = datetime.datetime.strptime(serviceProperty, '%Y-%m-%d %H:%M:%S.%f')
t3 = datetime.datetime.now()
check = abs(t3 - t2) > t1
if check == False: raise Exception()
if (control.player.isPlaying() or control.condVisibility('Library.IsScanningVideo')): raise Exception()
serviceProperty = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S.%f')
control.window.setProperty(self.property, serviceProperty)
try:
dbcon = database.connect(control.libcacheFile)
dbcur = dbcon.cursor()
dbcur.execute("CREATE TABLE IF NOT EXISTS service (""setting TEXT, ""value TEXT, ""UNIQUE(setting)"");")
dbcur.execute("DELETE FROM service WHERE setting = 'last_run'")
dbcur.execute("INSERT INTO service Values (?, ?)", ('last_run', serviceProperty))
dbcon.commit()
dbcon.close()
except:
try: dbcon.close()
except: pass
if not control.setting('library.service.update') == 'true': raise Exception()
info = control.setting('library.service.notification') or 'true'
self.update(info=info)
except:
pass
control.sleep(10000)
|
|
# Copyright (c) 2012-2013 Andreas Sembrant
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# - Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# - Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# - Neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Andreas Sembrant
import os.path, sys, argparse
import pyscarphase.proto.data
import pyscarphase.proto.meta
import pyscarphase.cmd
import pyscarphase.util.config
import pyscarphase.util.demultiplexer
import pyscarphase.scarphase_dump
import pypowersleuth.model.model
class DumpCmd(pyscarphase.cmd.Cmd):
def __init__(self, args):
#
pyscarphase.cmd.Cmd.__init__(self)
#
self.parse_arguments(args)
def parse_arguments(self, args):
#
parser = argparse.ArgumentParser(
prog=' '.join(args[0:2]),
description='Dump data to other formats.'
)
subparsers = parser.add_subparsers(title="What to dump")
def add_common_args(parser):
'''Add common arguments to all "dump" sub commands.'''
parser.add_argument(
"profile",
help="Input profile."
)
parser.add_argument(
"--thread", "-t",
type=int,
required=True,
help="Thread to dump."
)
parser.add_argument(
"--format",
choices=[ "csv", "prettytable" ], default="prettytable",
help="Thread to dump."
)
parser.add_argument(
"--frequencies", "-f",
required=True,
help="Target frequencies."
)
parser.add_argument(
"--profile-frequency",
type=float,
required=True,
help="Profile frequency."
)
parser.add_argument(
"--cpu-info",
required=True,
help="CPU model."
)
parser.add_argument(
"--output-file", "-o",
type=argparse.FileType('w'), default=sys.stdout,
help="Output file"
)
def conf_dump_windows():
# Add new parser
sub_parser = subparsers.add_parser(
'windows',
help="Dump windows")
#
sub_parser.set_defaults(func=self.dump_windows)
#
add_common_args(sub_parser)
conf_dump_windows()
self.args = parser.parse_args(args[2:])
def run(self):
self.args.func()
def dump_windows(self):
'''Dump windows.
'''
# Load meta profile
profile = pyscarphase.proto.meta.load_profile(self.args.profile)
#
cpuinfo = pyscarphase.util.config.Config(self.args.cpu_info)
# Load right model
model = pypowersleuth.model.model.load_model(cpuinfo, profile)
header = [ "WID", "PID"]
frequencies = []
for f in self.args.frequencies.split(','):
header += [ "Power (W) at %sGHz" % f, "Performance (s) at %sGHz" % f, "Energy (J) at %sGHz" % f]
frequencies.append(float(f))
if self.args.format == "csv":
writer = pyscarphase.scarphase_dump.CsvOutputWrapper(self.args.output_file, header)
else:
writer = pyscarphase.scarphase_dump.PrettyTableWrapper(self.args.output_file, header)
# Get thread to dump
thread = profile.threads[self.args.thread]
# Open a reader to that thread's datafile
reader = pyscarphase.proto.data.DataReader(
os.path.join(
os.path.split(self.args.profile)[0],
thread.profile.filename
),
uuid=thread.profile.uuid
)
dm = pyscarphase.util.demultiplexer.Demultiplexer(reader)
for i, w in enumerate(dm.read()):
row = [i, w.phase]
# Get performance counter data
values = []
for c in profile.performance_counters:
values.append(w.value(c.id)[0])
#
for f in frequencies:
_values = model.fix_counter_values(
f, self.args.profile_frequency, values
)
# Estimate executed cycles
c_est = model.estimate_performance(
f, self.args.profile_frequency, _values
)
# Convert to seconds
performance = model.cycles2seconds(f, c_est)
# Estimate power
power = model.estimate_power(f, c_est, _values)
# Add data
row.append(power)
row.append(performance)
row.append(power * performance)
writer.write_row(row)
def run(args):
DumpCmd(args).run();
|
|
# pylint: skip-file
# flake8: noqa
# pylint: disable=too-many-instance-attributes
class OCObject(OpenShiftCLI):
''' Class to wrap the oc command line tools '''
# pylint allows 5. we need 6
# pylint: disable=too-many-arguments
def __init__(self,
kind,
namespace,
name=None,
selector=None,
kubeconfig='/etc/origin/master/admin.kubeconfig',
verbose=False,
all_namespaces=False,
field_selector=None):
''' Constructor for OpenshiftOC '''
super(OCObject, self).__init__(namespace, kubeconfig=kubeconfig, verbose=verbose,
all_namespaces=all_namespaces)
self.kind = kind
self.name = name
self.selector = selector
self.field_selector = field_selector
def get(self):
'''return a kind by name '''
results = self._get(self.kind, name=self.name, selector=self.selector, field_selector=self.field_selector)
if (results['returncode'] != 0 and 'stderr' in results and
'\"{}\" not found'.format(self.name) in results['stderr']):
results['returncode'] = 0
return results
def delete(self):
'''delete the object'''
results = self._delete(self.kind, name=self.name, selector=self.selector)
if (results['returncode'] != 0 and 'stderr' in results and
'\"{}\" not found'.format(self.name) in results['stderr']):
results['returncode'] = 0
return results
def create(self, files=None, content=None):
'''
Create a config
NOTE: This creates the first file OR the first conent.
TODO: Handle all files and content passed in
'''
if files:
return self._create(files[0])
# pylint: disable=no-member
# The purpose of this change is twofold:
# - we need a check to only use the ruamel specific dumper if ruamel is loaded
# - the dumper or the flow style change is needed so openshift is able to parse
# the resulting yaml, at least until gopkg.in/yaml.v2 is updated
if hasattr(yaml, 'RoundTripDumper'):
content['data'] = yaml.dump(content['data'], Dumper=yaml.RoundTripDumper)
else:
content['data'] = yaml.safe_dump(content['data'], default_flow_style=False)
content_file = Utils.create_tmp_files_from_contents(content)[0]
return self._create(content_file['path'])
# pylint: disable=too-many-function-args
def update(self, files=None, content=None, force=False):
'''update a current openshift object
This receives a list of file names or content
and takes the first and calls replace.
TODO: take an entire list
'''
if files:
return self._replace(files[0], force)
if content and 'data' in content:
content = content['data']
return self.update_content(content, force)
def update_content(self, content, force=False):
'''update an object through using the content param'''
return self._replace_content(self.kind, self.name, content, force=force)
def needs_update(self, files=None, content=None, content_type='yaml'):
''' check to see if we need to update '''
objects = self.get()
if objects['returncode'] != 0:
return objects
data = None
if files:
data = Utils.get_resource_file(files[0], content_type)
elif content and 'data' in content:
data = content['data']
else:
data = content
# if equal then no need. So not equal is True
return not Utils.check_def_equal(data, objects['results'][0], skip_keys=None, debug=False)
# pylint: disable=too-many-return-statements,too-many-branches
@staticmethod
def run_ansible(params, check_mode=False):
'''perform the ansible idempotent code'''
ocobj = OCObject(params['kind'],
params['namespace'],
params['name'],
params['selector'],
kubeconfig=params['kubeconfig'],
verbose=params['debug'],
all_namespaces=params['all_namespaces'],
field_selector=params['field_selector'])
state = params['state']
api_rval = ocobj.get()
#####
# Get
#####
if state == 'list':
if api_rval['returncode'] != 0:
return {'changed': False, 'failed': True, 'msg': api_rval}
return {'changed': False, 'results': api_rval, 'state': state}
########
# Delete
########
if state == 'absent':
# verify its not in our results
if (params['name'] is not None or params['selector'] is not None) and \
(len(api_rval['results']) == 0 or \
('items' in api_rval['results'][0] and len(api_rval['results'][0]['items']) == 0)):
return {'changed': False, 'state': state}
if check_mode:
return {'changed': True, 'msg': 'CHECK_MODE: Would have performed a delete'}
api_rval = ocobj.delete()
if api_rval['returncode'] != 0:
return {'failed': True, 'msg': api_rval}
return {'changed': True, 'results': api_rval, 'state': state}
# create/update: Must define a name beyond this point
if not params['name']:
return {'failed': True, 'msg': 'Please specify a name when state is present.'}
if state == 'present':
########
# Create
########
if not Utils.exists(api_rval['results'], params['name']):
if check_mode:
return {'changed': True, 'msg': 'CHECK_MODE: Would have performed a create'}
# Create it here
api_rval = ocobj.create(params['files'], params['content'])
if api_rval['returncode'] != 0:
return {'failed': True, 'msg': api_rval}
# return the created object
api_rval = ocobj.get()
if api_rval['returncode'] != 0:
return {'failed': True, 'msg': api_rval}
# Remove files
if params['files'] and params['delete_after']:
Utils.cleanup(params['files'])
return {'changed': True, 'results': api_rval, 'state': state}
########
# Update
########
# if a file path is passed, use it.
update = ocobj.needs_update(params['files'], params['content'])
if not isinstance(update, bool):
return {'failed': True, 'msg': update}
# No changes
if not update:
if params['files'] and params['delete_after']:
Utils.cleanup(params['files'])
return {'changed': False, 'results': api_rval['results'][0], 'state': state}
if check_mode:
return {'changed': True, 'msg': 'CHECK_MODE: Would have performed an update.'}
api_rval = ocobj.update(params['files'],
params['content'],
params['force'])
if api_rval['returncode'] != 0:
return {'failed': True, 'msg': api_rval}
# return the created object
api_rval = ocobj.get()
if api_rval['returncode'] != 0:
return {'failed': True, 'msg': api_rval}
return {'changed': True, 'results': api_rval, 'state': state}
|
|
"""
model predicting if an airbnb listing is fair or not
"""
#%matplotlib inline
#%config InlineBackend.figure_format = 'retina'
import numpy as np
import pandas as pd
#import seaborn as sns
import os
import sys
#import pylab
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer
from sklearn.cross_validation import cross_val_score, train_test_split
from sklearn.preprocessing import StandardScaler, PolynomialFeatures
from sklearn.linear_model import Ridge, RidgeCV, Lasso
from sklearn.ensemble import RandomForestRegressor
from sklearn import metrics
import xgboost as xgb
from sklearn.externals import joblib
from sqlalchemy import create_engine
from sqlalchemy_utils import database_exists, create_database
import psycopg2
from keras.models import Sequential
from keras.layers import Dense, Dropout
home_folder = os.environ["home_folder"]
sys.path.append(os.path.join(home_folder, "airbnb_app/airbnb/web_app/flaskexample/"))
import airbnb_pipeline
def rmse(y_true, y_pred):
return(np.sqrt(metrics.mean_squared_error(y_true, y_pred)))
def one_hot_encode_amenities(train):
"""
amenities comes a weird form, this function one hot encodes them
"""
train.amenities = train.amenities.str.replace("[{}]", "")
amenities = ",".join(train.amenities)
amenities = np.unique(amenities.split(","))
amenities_dict = dict(enumerate(amenities))
train.amenities = train.amenities.str.split(",")
amenity_ohe = [[ame in amenity_list for ame in amenities]
for amenity_list in train.amenities]
amenity_ohe = 1*(np.array(amenity_ohe))
return amenity_ohe
def extract_features_price_model(train,add_BOW = False):
"""
encodes categorical variables, concatenates with numeric and amenities
optionally add a sparse bag of words feature matrix
"""
scale = StandardScaler()
#get dummies one hot encodes categorical feats and leaves numeric feats alone:
X_num = pd.get_dummies(train_num_cat)
X_num = scale.fit_transform(X_num)
amenity_ohe = one_hot_encode_amenities(train)
#whether to add BOW to final features - helps marginally:
if add_BOW == True:
train_text = train[["name", "summary", "amenities"]]
#keep min_df large here ~ 300 otherwise detrimental to model
vect = TfidfVectorizer(stop_words = "english", min_df = 300)
X_text = vect.fit_transform(train["summary"])
X_full = np.hstack((X_num, amenity_ohe, X_text.toarray()))
else:
X_full = np.hstack((X_num, amenity_ohe))
return X_full
def validate_model(model, data, y):
"""
splits the data, fits the model and returns the rmse on val set
"""
#TODO: make it return MAE or R^2 here
X_tr, X_val, y_tr, y_val = train_test_split(data, y, random_state = 3)
preds = model.fit(X_tr, y_tr).predict(X_val)
return rmse(preds, y_val)
#~~~~~~~~~~~~~~~
#POSTGRES:
#~~~~~~~~~~~~~~
home_folder = os.environ["home_folder"]
dbname = os.environ["dbname"]
username = os.environ["username"]
if sys.platform == "linux":
password = os.environ["password"]
if sys.platform == "linux":
connect_str = "dbname='%s' user='%s' host='localhost' password='%s'"%(dbname,username,password)
con = psycopg2.connect(connect_str)
engine = create_engine('postgres://%s@localhost/%s'%(username,dbname, password))
else:
con = psycopg2.connect(database = dbname, user = username)
engine = create_engine('postgres://%s@localhost/%s'%(username,dbname))
con = psycopg2.connect(database = dbname, user = username)
sql_query = """
SELECT id, neighbourhood_cleansed,
bedrooms, is_location_exact,
property_type, room_type,
city, latitude,
longitude, accommodates,
review_scores_location,
review_scores_rating,
number_of_reviews,
amenities, name,
minimum_nights, price FROM listings;
"""
train = pd.read_sql_query(sql_query, con, index_col = "id")
#~~~~~~~~
#MODELS:
#~~~~~~~
y = train["price"]
train_num_cat = train[["neighbourhood_cleansed",
"bedrooms",
"is_location_exact",
"property_type",
"room_type",
"city",
"latitude",
"longitude",
"accommodates",
"review_scores_location",
"review_scores_rating",
"number_of_reviews",
"minimum_nights"]]
# train_num_cat["price"] = train["price"]
# train_num_cat = train_num_cat.dropna()
# train_num_cat.to_csv("aibnb_for_R.csv")
X_full = extract_features_price_model(train)
X_tr, X_val, y_tr, y_val = train_test_split(X_full, y, random_state = 3)
#baseline score:
always_mean_preds = len(y_val)*[y_tr.mean()]
rmse(always_mean_preds, y_val)
from sklearn import metrics
metrics.mean_absolute_error(always_mean_preds, y_val)
model = Ridge()
#model = RandomForestRegressor(n_estimators = 50)
model = xgb.XGBRegressor(learning_rate = 0.1, max_depth = 6, n_estimators = 150)
validate_model(model = model, data = X_full, y = y)
metrics.mean_absolute_error(model.predict(X_val), y_val)
#predicting:
train["preds"] = model.predict(X_full)
train["diff"] = train["preds"] - train["price"]
#write to train now with prediction.
listings = pd.read_sql_query("select * from listings", con, index_col = "id")
listings["preds"] = model.predict(X_full)
listings["diff"] = listings["preds"] - listings["price"]
train.to_sql("listings", con = engine, if_exists = "replace")
#~~~~~~~~~~~~
#NEURAL NETS:
#~~~~~~~~~~~
model = Sequential()
model.add(Dense(512, activation = "relu", input_dim = X_tr.shape[1]))
model.add(Dropout(0.5))
#model.add(Dense(256, activation = "relu"))
#model.add(Dropout(0.5))
model.add(Dense(1))
model.compile(loss = "mse", optimizer = "adam")
hist = model.fit(X_tr, y_tr, validation_data = (X_val, y_val), nb_epoch = 20, batch_size = 128, verbose = 0)
pd.DataFrame(hist.history).plot()
preds_nn = model.predict(X_val)
pd.Series(preds_nn[:, 0]).hist()
preds_ensemble = preds_nn[:, 0]*0.5 + preds_1*0.5
rmse(preds_ensemble, y_val)
#GOALS:
#Have rmse be half the baseline model
#have R^2 of at least .70
#baseline:88.4 RMSE
#Ridge: 53 RMSE
#RF: 50.5 RMSE
#NN: 50.7 RMSE
#xgboost: 49.1 RMSE
#xgboost+NN: 49.5
#Baseline(predict the mean price): 88.4 RMSE
#Ridge: 53 RMSE
#Random Forest: 50.5 RMS
#import pandas
#import matplotlib.pyplot as plt
# %matplotlib inline
# import seaborn
# import pandas as pd
# (pd.Series([66, 36, 31], index = ["Mean Prediction", "Ridge Regression", "xgboost+Neural Net"]).sort_values()
# .plot(kind = "barh", title = "Mean Absolute Error in Dollars on Test Set (smaller is better)"))
|
|
import unittest
from mock import patch
import responses
from notification_backend.notification_threads import NotificationThreads
import json
import jwt
from boto3.exceptions import Boto3Error
class TestFindThread(unittest.TestCase):
def setUp(self):
patcher1 = patch('notification_backend.notification_threads.dynamodb_results') # NOQA
self.addCleanup(patcher1.stop)
self.mock_db_results = patcher1.start()
patcher2 = patch('notification_backend.notification_threads.dynamodb_new_item') # NOQA
self.addCleanup(patcher2.stop)
self.mock_db_new_item = patcher2.start()
self.jwt_signing_secret = "shhsekret"
self.token = jwt.encode({"sub": "333333"},
self.jwt_signing_secret,
algorithm='HS256')
self.lambda_event = {
"jwt_signing_secret": self.jwt_signing_secret,
"bearer_token": "Bearer %s" % self.token,
"payload": {},
"resource-path": "/notification/threads/{thread-id}",
"threadid": "12345678",
"notification_dynamodb_endpoint_url": "http://example.com",
"notification_user_notification_dynamodb_table_name": "fakethreads"
}
def test_invalid_jwt(self):
self.lambda_event['jwt_signing_secret'] = "shh"
t = NotificationThreads(self.lambda_event)
with self.assertRaises(TypeError) as cm:
t.process_thread_event("find_thread")
result_json = json.loads(str(cm.exception))
self.assertEqual(result_json.get('http_status'), 401)
self.assertEqual(
result_json.get('data').get('errors')[0].get('status'),
401
)
self.assertEqual(
result_json.get('data').get('errors')[0].get('detail'),
"Invalid JSON Web Token"
)
def test_invalid_userid(self):
self.token = jwt.encode({"subs": "user1"},
self.jwt_signing_secret,
algorithm='HS256')
self.lambda_event['bearer_token'] = "Bearer %s" % self.token
t = NotificationThreads(self.lambda_event)
with self.assertRaises(TypeError) as cm:
t.process_thread_event("find_thread")
result_json = json.loads(str(cm.exception))
self.assertEqual(result_json.get('http_status'), 401)
self.assertEqual(
result_json.get('data').get('errors')[0].get('status'),
401
)
self.assertEqual(
result_json.get('data').get('errors')[0].get('detail'),
"sub field not present in JWT"
)
def test_empty_auth_header(self):
self.lambda_event['bearer_token'] = ""
t = NotificationThreads(self.lambda_event)
with self.assertRaises(TypeError) as cm:
t.process_thread_event("find_thread")
result_json = json.loads(str(cm.exception))
self.assertEqual(result_json.get('http_status'), 401)
self.assertEqual(
result_json.get('data').get('errors')[0].get('status'),
401
)
self.assertEqual(
result_json.get('data').get('errors')[0].get('detail'),
"Invalid JSON Web Token"
)
def test_datastore_query_error(self):
self.mock_db_results.side_effect = Boto3Error
t = NotificationThreads(self.lambda_event)
with self.assertRaises(TypeError) as cm:
t.process_thread_event("find_thread")
result_json = json.loads(str(cm.exception))
self.assertEqual(result_json.get('http_status'), 500)
self.assertEqual(
result_json.get('data').get('errors')[0].get('status'),
500
)
self.assertEqual(
result_json.get('data').get('errors')[0].get('detail'),
"Error querying for thread 12345678 from the datastore"
)
self.assertTrue(self.mock_db_results.mock_calls > 0)
def test_single_dynamodb_result(self):
result = {
"thread_id": 12345678,
"thread_url": "http://api.example.com/fake/12345678",
"thread_subscription_url": "http://api.example.com/fake/12345678/subscribe", # NOQA
"reason": "subscribed",
"updated_at": 1460443217,
"subject_title": "Fake Issue",
"subject_url": "http://example.com/fake/12345678",
"subject_type": "Issue",
"repository_owner": "octocat",
"repository_name": "left-pad"
}
self.mock_db_results.return_value.next.return_value = result
t = NotificationThreads(self.lambda_event)
result_json = t.process_thread_event("find_thread")
result_attrs = result_json.get('data').get('data').get('attributes')
self.assertEqual(result_json.get('http_status'), 200)
self.assertEqual(result_json.get('data').get('data').get('type'), "threads") # NOQA
self.assertEqual(result_json.get('data').get('data').get('id'), 12345678) # NOQA
self.assertEqual(result_attrs.get('thread-url'), "http://api.example.com/fake/12345678") # NOQA
self.assertEqual(result_attrs.get('thread-subscription-url'), "http://api.example.com/fake/12345678/subscribe") # NOQA
self.assertEqual(result_attrs.get('reason'), "subscribed")
self.assertEqual(result_attrs.get('updated-at'), 1460443217)
self.assertTrue(self.mock_db_results.mock_calls > 0)
@responses.activate
def test_github_api_400(self):
responses.add(**{
'method': responses.GET,
'url': 'https://api.github.com/notifications/threads/12345678',
'body': '{"error": "message"}',
'status': 400
})
self.mock_db_results.side_effect = StopIteration
t = NotificationThreads(self.lambda_event)
with self.assertRaises(TypeError) as cm:
t.process_thread_event("find_thread")
result_json = json.loads(str(cm.exception))
self.assertEqual(result_json.get('http_status'), 404)
self.assertEqual(
result_json.get('data').get('errors')[0].get('status'),
404
)
self.assertEqual(
result_json.get('data').get('errors')[0].get('detail'),
"Could not find info for thread 12345678"
)
self.assertTrue(self.mock_db_results.mock_calls > 0)
@responses.activate
def test_github_api_invalid_json(self):
responses.add(**{
'method': responses.GET,
'url': 'https://api.github.com/notifications/threads/12345678',
'body': 'fake json',
'status': 200
})
self.mock_db_results.side_effect = StopIteration
t = NotificationThreads(self.lambda_event)
with self.assertRaises(TypeError) as cm:
t.process_thread_event("find_thread")
result_json = json.loads(str(cm.exception))
self.assertEqual(result_json.get('http_status'), 404)
self.assertEqual(
result_json.get('data').get('errors')[0].get('status'),
404
)
self.assertEqual(
result_json.get('data').get('errors')[0].get('detail'),
"Could not find info for thread 12345678"
)
self.assertTrue(self.mock_db_results.mock_calls > 0)
@responses.activate
def test_error_persisting_record(self):
output = {
"id": "12345678",
"reason": "manual",
"updated_at": "2016-04-12T01:40:17Z",
"subject": {
"title": "Support AWS APIGateway",
"url": "https://api.github.com/repos/hashicorp/terraform/issues/3675", # NOQA
"latest_comment_url": "https://api.github.com/repos/hashicorp/terraform/issues/comments/208658994", # NOQA
"type": "Issue"
},
"repository": {
"id": 17728164,
"name": "terraform",
"full_name": "hashicorp/terraform",
"owner": {
"login": "hashicorp"
}
},
"url": "https://api.github.com/notifications/threads/12345678",
"subscription_url": "https://api.github.com/notifications/threads/12345678/subscription" # NOQA
}
responses.add(**{
'method': responses.GET,
'url': 'https://api.github.com/notifications/threads/12345678',
'body': json.dumps(output),
'status': 200
})
self.mock_db_results.side_effect = StopIteration
self.mock_db_new_item.side_effect = Boto3Error
t = NotificationThreads(self.lambda_event)
with self.assertRaises(TypeError) as cm:
t.process_thread_event("find_thread")
result_json = json.loads(str(cm.exception))
self.assertEqual(result_json.get('http_status'), 500)
self.assertEqual(
result_json.get('data').get('errors')[0].get('status'),
500
)
self.assertEqual(
result_json.get('data').get('errors')[0].get('detail'),
"Error writing info for thread 12345678 to the datastore"
)
self.assertTrue(self.mock_db_results.mock_calls > 0)
self.assertTrue(self.mock_db_new_item.mock_calls > 0)
@responses.activate
def test_github_valid_output(self):
output = {
"id": "12345678",
"reason": "manual",
"updated_at": "2016-04-12T01:40:17Z",
"subject": {
"title": "Support AWS APIGateway",
"url": "https://api.github.com/repos/hashicorp/terraform/issues/3675", # NOQA
"latest_comment_url": "https://api.github.com/repos/hashicorp/terraform/issues/comments/208658994", # NOQA
"type": "Issue"
},
"repository": {
"id": 17728164,
"name": "terraform",
"full_name": "hashicorp/terraform",
"owner": {
"login": "hashicorp"
}
},
"url": "https://api.github.com/notifications/threads/12345678",
"subscription_url": "https://api.github.com/notifications/threads/12345678/subscription" # NOQA
}
responses.add(**{
'method': responses.GET,
'url': 'https://api.github.com/notifications/threads/12345678',
'body': json.dumps(output),
'status': 200
})
self.mock_db_results.side_effect = StopIteration
t = NotificationThreads(self.lambda_event)
result_json = t.process_thread_event("find_thread")
self.assertEqual(result_json.get('http_status'), 200)
result_attrs = result_json.get('data').get('data').get('attributes')
self.assertEqual(result_json.get('http_status'), 200)
self.assertEqual(result_json.get('data').get('data').get('type'), "threads") # NOQA
self.assertEqual(result_json.get('data').get('data').get('id'), 12345678) # NOQA
self.assertEqual(result_attrs.get('thread-url'), "https://api.github.com/notifications/threads/12345678") # NOQA
self.assertEqual(result_attrs.get('thread-subscription-url'), "https://api.github.com/notifications/threads/12345678/subscription") # NOQA
self.assertEqual(result_attrs.get('reason'), "manual")
self.assertEqual(result_attrs.get('updated-at'), 1460425217)
tags = result_json.get('data').get('data').get('attributes').get('tags') # NOQA
self.assertEqual(len(tags), 4)
self.assertTrue('subscribed' in tags)
self.assertTrue('issue' in tags)
self.assertTrue('hashicorp' in tags)
self.assertTrue('terraform' in tags)
self.assertTrue(self.mock_db_results.mock_calls > 0)
|
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
from io import BytesIO
import numpy as np
import warnings
from .. import Variable
from ..core.pycompat import iteritems, OrderedDict, basestring
from ..core.utils import (Frozen, FrozenOrderedDict)
from ..core.indexing import NumpyIndexingAdapter
from .common import WritableCFDataStore, DataStorePickleMixin, BackendArray
from .netcdf3 import (is_valid_nc3_name, encode_nc3_attr_value,
encode_nc3_variable)
def _decode_string(s):
if isinstance(s, bytes):
return s.decode('utf-8', 'replace')
return s
def _decode_attrs(d):
# don't decode _FillValue from bytes -> unicode, because we want to ensure
# that its type matches the data exactly
return OrderedDict((k, v if k == '_FillValue' else _decode_string(v))
for (k, v) in iteritems(d))
class ScipyArrayWrapper(BackendArray):
def __init__(self, variable_name, datastore):
self.datastore = datastore
self.variable_name = variable_name
array = self.get_array()
self.shape = array.shape
self.dtype = np.dtype(array.dtype.kind +
str(array.dtype.itemsize))
def get_array(self):
self.datastore.assert_open()
return self.datastore.ds.variables[self.variable_name].data
def __getitem__(self, key):
with self.datastore.ensure_open(autoclose=True):
data = NumpyIndexingAdapter(self.get_array())[key]
# Copy data if the source file is mmapped.
# This makes things consistent
# with the netCDF4 library by ensuring
# we can safely read arrays even
# after closing associated files.
copy = self.datastore.ds.use_mmap
return np.array(data, dtype=self.dtype, copy=copy)
def _open_scipy_netcdf(filename, mode, mmap, version):
import scipy.io
import gzip
# if the string ends with .gz, then gunzip and open as netcdf file
if isinstance(filename, basestring) and filename.endswith('.gz'):
try:
return scipy.io.netcdf_file(gzip.open(filename), mode=mode,
mmap=mmap, version=version)
except TypeError as e:
# TODO: gzipped loading only works with NetCDF3 files.
if 'is not a valid NetCDF 3 file' in e.message:
raise ValueError('gzipped file loading only supports '
'NetCDF 3 files.')
else:
raise
if isinstance(filename, bytes) and filename.startswith(b'CDF'):
# it's a NetCDF3 bytestring
filename = BytesIO(filename)
try:
return scipy.io.netcdf_file(filename, mode=mode, mmap=mmap,
version=version)
except TypeError as e: # netcdf3 message is obscure in this case
errmsg = e.args[0]
if 'is not a valid NetCDF 3 file' in errmsg:
msg = """
If this is a NetCDF4 file, you may need to install the
netcdf4 library, e.g.,
$ pip install netcdf4
"""
errmsg += msg
raise TypeError(errmsg)
else:
raise
class ScipyDataStore(WritableCFDataStore, DataStorePickleMixin):
"""Store for reading and writing data via scipy.io.netcdf.
This store has the advantage of being able to be initialized with a
StringIO object, allow for serialization without writing to disk.
It only supports the NetCDF3 file-format.
"""
def __init__(self, filename_or_obj, mode='r', format=None, group=None,
writer=None, mmap=None, autoclose=False):
import scipy
import scipy.io
if mode != 'r' and scipy.__version__ < '0.13': # pragma: no cover
warnings.warn('scipy %s detected; '
'the minimal recommended version is 0.13. '
'Older version of this library do not reliably '
'read and write files.'
% scipy.__version__, ImportWarning)
if group is not None:
raise ValueError('cannot save to a group with the '
'scipy.io.netcdf backend')
if format is None or format == 'NETCDF3_64BIT':
version = 2
elif format == 'NETCDF3_CLASSIC':
version = 1
else:
raise ValueError('invalid format for scipy.io.netcdf backend: %r'
% format)
opener = functools.partial(_open_scipy_netcdf,
filename=filename_or_obj,
mode=mode, mmap=mmap, version=version)
self.ds = opener()
self._autoclose = autoclose
self._isopen = True
self._opener = opener
self._mode = mode
super(ScipyDataStore, self).__init__(writer)
def open_store_variable(self, name, var):
with self.ensure_open(autoclose=False):
return Variable(var.dimensions, ScipyArrayWrapper(name, self),
_decode_attrs(var._attributes))
def get_variables(self):
with self.ensure_open(autoclose=False):
return FrozenOrderedDict((k, self.open_store_variable(k, v))
for k, v in iteritems(self.ds.variables))
def get_attrs(self):
with self.ensure_open(autoclose=True):
return Frozen(_decode_attrs(self.ds._attributes))
def get_dimensions(self):
with self.ensure_open(autoclose=True):
return Frozen(self.ds.dimensions)
def get_encoding(self):
encoding = {}
encoding['unlimited_dims'] = {
k for k, v in self.ds.dimensions.items() if v is None}
return encoding
def set_dimension(self, name, length, is_unlimited=False):
with self.ensure_open(autoclose=False):
if name in self.dimensions:
raise ValueError('%s does not support modifying dimensions'
% type(self).__name__)
dim_length = length if not is_unlimited else None
self.ds.createDimension(name, dim_length)
def _validate_attr_key(self, key):
if not is_valid_nc3_name(key):
raise ValueError("Not a valid attribute name")
def set_attribute(self, key, value):
with self.ensure_open(autoclose=False):
self._validate_attr_key(key)
value = encode_nc3_attr_value(value)
setattr(self.ds, key, value)
def prepare_variable(self, name, variable, check_encoding=False,
unlimited_dims=None):
variable = encode_nc3_variable(variable)
if check_encoding and variable.encoding:
raise ValueError('unexpected encoding for scipy backend: %r'
% list(variable.encoding))
if unlimited_dims is not None and len(unlimited_dims) > 1:
raise ValueError('NETCDF3 only supports one unlimited dimension')
self.set_necessary_dimensions(variable, unlimited_dims=unlimited_dims)
data = variable.data
# nb. this still creates a numpy array in all memory, even though we
# don't write the data yet; scipy.io.netcdf does not not support
# incremental writes.
self.ds.createVariable(name, data.dtype, variable.dims)
scipy_var = self.ds.variables[name]
for k, v in iteritems(variable.attrs):
self._validate_attr_key(k)
setattr(scipy_var, k, v)
return scipy_var, data
def sync(self):
with self.ensure_open(autoclose=True):
super(ScipyDataStore, self).sync()
self.ds.flush()
def close(self):
self.ds.close()
self._isopen = False
def __exit__(self, type, value, tb):
self.close()
def __setstate__(self, state):
filename = state['_opener'].keywords['filename']
if hasattr(filename, 'seek'):
# it's a file-like object
# seek to the start of the file so scipy can read it
filename.seek(0)
super(ScipyDataStore, self).__setstate__(state)
self._isopen = True
|
|
from datetime import timedelta
try:
from django.utils.timezone import now
except ImportError:
from datetime import datetime
now = datetime.now
from django.contrib import messages
from django.core.urlresolvers import reverse
from django.db import models
from django.conf import settings
from django.http import HttpResponseRedirect
from django.utils import six
from django.utils.http import urlencode
from django.utils.http import int_to_base36, base36_to_int
from django.core.exceptions import ValidationError
from allauth.compat import OrderedDict
try:
from django.contrib.auth import update_session_auth_hash
except ImportError:
update_session_auth_hash = None
try:
from django.utils.encoding import force_text
except ImportError:
from django.utils.encoding import force_unicode as force_text
from ..exceptions import ImmediateHttpResponse
from ..utils import (import_callable, valid_email_or_none,
get_user_model, get_request_param)
from . import signals
from .app_settings import EmailVerificationMethod
from . import app_settings
from .adapter import get_adapter
def get_next_redirect_url(request, redirect_field_name="next"):
"""
Returns the next URL to redirect to, if it was explicitly passed
via the request.
"""
redirect_to = get_request_param(request, redirect_field_name)
if not get_adapter().is_safe_url(redirect_to):
redirect_to = None
return redirect_to
def get_login_redirect_url(request, url=None, redirect_field_name="next"):
if url and callable(url):
# In order to be able to pass url getters around that depend
# on e.g. the authenticated state.
url = url()
redirect_url \
= (url
or get_next_redirect_url(request,
redirect_field_name=redirect_field_name)
or get_adapter().get_login_redirect_url(request))
return redirect_url
_user_display_callable = None
def logout_on_password_change(request, user):
# Since it is the default behavior of Django to invalidate all sessions on
# password change, this function actually has to preserve the session when
# logout isn't desired.
if (update_session_auth_hash is not None and
not app_settings.LOGOUT_ON_PASSWORD_CHANGE):
update_session_auth_hash(request, user)
def default_user_display(user):
if app_settings.USER_MODEL_USERNAME_FIELD:
return getattr(user, app_settings.USER_MODEL_USERNAME_FIELD)
else:
return force_text(user)
def user_display(user):
global _user_display_callable
if not _user_display_callable:
f = getattr(settings, "ACCOUNT_USER_DISPLAY",
default_user_display)
_user_display_callable = import_callable(f)
return _user_display_callable(user)
def user_field(user, field, *args):
"""
Gets or sets (optional) user model fields. No-op if fields do not exist.
"""
if field and hasattr(user, field):
if args:
# Setter
v = args[0]
if v:
User = get_user_model()
v = v[0:User._meta.get_field(field).max_length]
setattr(user, field, v)
else:
# Getter
return getattr(user, field)
def user_username(user, *args):
return user_field(user, app_settings.USER_MODEL_USERNAME_FIELD, *args)
def user_email(user, *args):
return user_field(user, app_settings.USER_MODEL_EMAIL_FIELD, *args)
def perform_login(request, user, email_verification,
redirect_url=None, signal_kwargs=None,
signup=False):
"""
Keyword arguments:
signup -- Indicates whether or not sending the
email is essential (during signup), or if it can be skipped (e.g. in
case email verification is optional and we are only logging in).
"""
# Local users are stopped due to form validation checking
# is_active, yet, adapter methods could toy with is_active in a
# `user_signed_up` signal. Furthermore, social users should be
# stopped anyway.
if not user.is_active:
return HttpResponseRedirect(reverse('account_inactive'))
from .models import EmailAddress
has_verified_email = EmailAddress.objects.filter(user=user,
verified=True).exists()
if email_verification == EmailVerificationMethod.NONE:
pass
elif email_verification == EmailVerificationMethod.OPTIONAL:
# In case of OPTIONAL verification: send on signup.
if not has_verified_email and signup:
send_email_confirmation(request, user, signup=signup)
elif email_verification == EmailVerificationMethod.MANDATORY:
if not has_verified_email:
send_email_confirmation(request, user, signup=signup)
return HttpResponseRedirect(
reverse('account_email_verification_sent'))
try:
get_adapter().login(request, user)
response = HttpResponseRedirect(
get_login_redirect_url(request, redirect_url))
if signal_kwargs is None:
signal_kwargs = {}
signals.user_logged_in.send(sender=user.__class__,
request=request,
response=response,
user=user,
**signal_kwargs)
get_adapter().add_message(request,
messages.SUCCESS,
'account/messages/logged_in.txt',
{'user': user})
except ImmediateHttpResponse as e:
response = e.response
return response
def complete_signup(request, user, email_verification, success_url,
signal_kwargs=None):
if signal_kwargs is None:
signal_kwargs = {}
signals.user_signed_up.send(sender=user.__class__,
request=request,
user=user,
**signal_kwargs)
return perform_login(request, user,
email_verification=email_verification,
signup=True,
redirect_url=success_url,
signal_kwargs=signal_kwargs)
def cleanup_email_addresses(request, addresses):
"""
Takes a list of EmailAddress instances and cleans it up, making
sure only valid ones remain, without multiple primaries etc.
Order is important: e.g. if multiple primary e-mail addresses
exist, the first one encountered will be kept as primary.
"""
from .models import EmailAddress
adapter = get_adapter()
# Let's group by `email`
e2a = OrderedDict() # maps email to EmailAddress
primary_addresses = []
verified_addresses = []
primary_verified_addresses = []
for address in addresses:
# Pick up only valid ones...
email = valid_email_or_none(address.email)
if not email:
continue
# ... and non-conflicting ones...
if (app_settings.UNIQUE_EMAIL
and EmailAddress.objects
.filter(email__iexact=email)
.exists()):
continue
a = e2a.get(email.lower())
if a:
a.primary = a.primary or address.primary
a.verified = a.verified or address.verified
else:
a = address
a.verified = a.verified or adapter.is_email_verified(request,
a.email)
e2a[email.lower()] = a
if a.primary:
primary_addresses.append(a)
if a.verified:
primary_verified_addresses.append(a)
if a.verified:
verified_addresses.append(a)
# Now that we got things sorted out, let's assign a primary
if primary_verified_addresses:
primary_address = primary_verified_addresses[0]
elif verified_addresses:
# Pick any verified as primary
primary_address = verified_addresses[0]
elif primary_addresses:
# Okay, let's pick primary then, even if unverified
primary_address = primary_addresses[0]
elif e2a:
# Pick the first
primary_address = e2a.keys()[0]
else:
# Empty
primary_address = None
# There can only be one primary
for a in e2a.values():
a.primary = primary_address.email.lower() == a.email.lower()
return list(e2a.values()), primary_address
def setup_user_email(request, user, addresses):
"""
Creates proper EmailAddress for the user that was just signed
up. Only sets up, doesn't do any other handling such as sending
out email confirmation mails etc.
"""
from .models import EmailAddress
assert EmailAddress.objects.filter(user=user).count() == 0
priority_addresses = []
# Is there a stashed e-mail?
adapter = get_adapter()
stashed_email = adapter.unstash_verified_email(request)
if stashed_email:
priority_addresses.append(EmailAddress(user=user,
email=stashed_email,
primary=True,
verified=True))
email = user_email(user)
if email:
priority_addresses.append(EmailAddress(user=user,
email=email,
primary=True,
verified=False))
addresses, primary = cleanup_email_addresses(request,
priority_addresses
+ addresses)
for a in addresses:
a.user = user
a.save()
EmailAddress.objects.fill_cache_for_user(user, addresses)
if (primary
and email
and email.lower() != primary.email.lower()):
user_email(user, primary.email)
user.save()
return primary
def send_email_confirmation(request, user, signup=False):
"""
E-mail verification mails are sent:
a) Explicitly: when a user signs up
b) Implicitly: when a user attempts to log in using an unverified
e-mail while EMAIL_VERIFICATION is mandatory.
Especially in case of b), we want to limit the number of mails
sent (consider a user retrying a few times), which is why there is
a cooldown period before sending a new mail.
"""
from .models import EmailAddress, EmailConfirmation
COOLDOWN_PERIOD = timedelta(minutes=3)
email = user_email(user)
if email:
try:
email_address = EmailAddress.objects.get_for_user(user, email)
if not email_address.verified:
send_email = not EmailConfirmation.objects \
.filter(sent__gt=now() - COOLDOWN_PERIOD,
email_address=email_address) \
.exists()
if send_email:
email_address.send_confirmation(request,
signup=signup)
else:
send_email = False
except EmailAddress.DoesNotExist:
send_email = True
email_address = EmailAddress.objects.add_email(request,
user,
email,
signup=signup,
confirm=True)
assert email_address
# At this point, if we were supposed to send an email we have sent it.
if send_email:
get_adapter().add_message(request,
messages.INFO,
'account/messages/'
'email_confirmation_sent.txt',
{'email': email})
if signup:
get_adapter().stash_user(request, user_pk_to_url_str(user))
def sync_user_email_addresses(user):
"""
Keep user.email in sync with user.emailaddress_set.
Under some circumstances the user.email may not have ended up as
an EmailAddress record, e.g. in the case of manually created admin
users.
"""
from .models import EmailAddress
email = user_email(user)
if email and not EmailAddress.objects.filter(user=user,
email__iexact=email).exists():
if app_settings.UNIQUE_EMAIL \
and EmailAddress.objects.filter(email__iexact=email).exists():
# Bail out
return
EmailAddress.objects.create(user=user,
email=email,
primary=False,
verified=False)
def filter_users_by_email(email):
"""Return list of users by email address
Typically one, at most just a few in length. First we look through
EmailAddress table, than customisable User model table. Add results
together avoiding SQL joins and deduplicate.
"""
from .models import EmailAddress
User = get_user_model()
mails = EmailAddress.objects.filter(email__iexact=email)
users = [e.user for e in mails.prefetch_related('user')]
if app_settings.USER_MODEL_EMAIL_FIELD:
q_dict = {app_settings.USER_MODEL_EMAIL_FIELD + '__iexact': email}
users += list(User.objects.filter(**q_dict))
return list(set(users))
def passthrough_next_redirect_url(request, url, redirect_field_name):
assert url.find("?") < 0 # TODO: Handle this case properly
next_url = get_next_redirect_url(request, redirect_field_name)
if next_url:
url = url + '?' + urlencode({redirect_field_name: next_url})
return url
def user_pk_to_url_str(user):
"""
This should return a string.
"""
User = get_user_model()
if (hasattr(models, 'UUIDField')
and issubclass(type(User._meta.pk), models.UUIDField)):
if isinstance(user.pk, six.string_types):
return user.pk
return user.pk.hex
ret = user.pk
if isinstance(ret, six.integer_types):
ret = int_to_base36(user.pk)
return str(ret)
def url_str_to_user_pk(s):
User = get_user_model()
# TODO: Ugh, isn't there a cleaner way to determine whether or not
# the PK is a str-like field?
if getattr(User._meta.pk, 'rel', None):
pk_field = User._meta.pk.rel.to._meta.pk
else:
pk_field = User._meta.pk
if (hasattr(models, 'UUIDField')
and issubclass(type(pk_field), models.UUIDField)):
return s
try:
pk_field.to_python('a')
pk = s
except ValidationError:
pk = base36_to_int(s)
return pk
|
|
from __future__ import absolute_import
from __future__ import print_function
from ..packages import six
import os
import shutil
from xml.etree import ElementTree as ET
if six.PY2:
try:
import arcpy
from arcpy import mapping
from arcpy import env
arcpyFound = True
except:
arcpyFound = False
########################################################################
def MXDtoFeatureServiceDef( mxd_path,
service_name=None,
tags=None,
description=None,
folder_name=None,
capabilities ='Query,Create,Update,Delete,Uploads,Editing,Sync',
maxRecordCount=1000,
server_type='MY_HOSTED_SERVICES',
url='http://www.arcgis.com'):
if arcpyFound == False:
return
"""
converts an MXD to a service defenition
Inputs:
mxd_path - Path to the ArcMap Map Document(MXD)
service_name - Name of the Feature Service
tags - Tags for the service, if none, the tags from the MXD are used
description - Summary for the Feature Service, if none, info from the MXD is used
folder_name - Folder in the Data store
capabilities - A Comma delimited list of feature service capabolities 'Query,Create,Update,Delete,Uploads,Editing,Sync'
maxRecordCount - The max returned record count for the feature service
server_type - The type of connection or publishing server
Values: ARCGIS_SERVER | FROM_CONNECTION_FILE | SPATIAL_DATA_SERVER | MY_HOSTED_SERVICES
Output:
Service Definition File - *.sd
"""
if not os.path.isabs(mxd_path):
sciptPath = os.getcwd()
mxd_path = os.path.join(sciptPath,mxd_path)
mxd = mapping.MapDocument(mxd_path)
sddraftFolder = env.scratchFolder + os.sep + "draft"
sdFolder = env.scratchFolder + os.sep + "sd"
sddraft = sddraftFolder + os.sep + service_name + ".sddraft"
sd = sdFolder + os.sep + "%s.sd" % service_name
mxd = _prep_mxd(mxd)
res = {}
if service_name is None:
service_name = mxd.title.strip().replace(' ','_')
if tags is None:
tags = mxd.tags.strip()
if description is None:
description = mxd.description.strip()
if os.path.isdir(sddraftFolder) == False:
os.makedirs(sddraftFolder)
else:
shutil.rmtree(sddraftFolder, ignore_errors=True)
os.makedirs(sddraftFolder)
if os.path.isfile(sddraft):
os.remove(sddraft)
res['service_name'] = service_name
res['tags'] = tags
res['description'] = description
analysis = mapping.CreateMapSDDraft(map_document=mxd,
out_sddraft=sddraft,
service_name=service_name,
server_type=server_type,
connection_file_path=None,
copy_data_to_server=True,
folder_name=folder_name,
summary=description,
tags=tags)
sddraft = _modify_sddraft(sddraft=sddraft,
capabilities=capabilities,
maxRecordCount=maxRecordCount,
url=url)
analysis = mapping.AnalyzeForSD(sddraft)
if os.path.isdir(sdFolder):
shutil.rmtree(sdFolder, ignore_errors=True)
os.makedirs(sdFolder)
else:
os.makedirs(sdFolder)
if analysis['errors'] == {}:
# Stage the service
arcpy.StageService_server(sddraft, sd)
res['servicedef'] = sd
return res
else:
# If the sddraft analysis contained errors, display them and quit.
print (analysis['errors'])
return None
def _modify_sddraft(sddraft,
capabilities,
maxRecordCount='1000',
url='http://www.arcgis.com'):
""" modifies the sddraft for agol publishing
"""
if arcpyFound == False:
return
doc = ET.parse(sddraft)
root_elem = doc.getroot()
if root_elem.tag != "SVCManifest":
raise ValueError("Root tag is incorrect. Is {} a .sddraft file?".format(sddraft))
# The following 6 code pieces modify the SDDraft from a new MapService
# with caching capabilities to a FeatureService with Query,Create,
# Update,Delete,Uploads,Editing capabilities as well as the ability to set the max
# records on the service.
# The first two lines (commented out) are no longer necessary as the FS
# is now being deleted and re-published, not truly overwritten as is the
# case when publishing from Desktop.
# The last three pieces change Map to Feature Service, disable caching
# and set appropriate capabilities. You can customize the capabilities by
# removing items.
# Note you cannot disable Query from a Feature Service.
# Change service type from map service to feature service
for desc in doc.findall('Type'):
if desc.text == "esriServiceDefinitionType_New":
desc.text = 'esriServiceDefinitionType_Replacement'
for config in doc.findall("./Configurations/SVCConfiguration/TypeName"):
if config.text == "MapServer":
config.text = "FeatureServer"
#Turn off caching
for prop in doc.findall("./Configurations/SVCConfiguration/Definition/" +
"ConfigurationProperties/PropertyArray/" +
"PropertySetProperty"):
if prop.find("Key").text == 'isCached':
prop.find("Value").text = "false"
if prop.find("Key").text == 'maxRecordCount':
prop.find("Value").text = maxRecordCount
for prop in doc.findall("./Configurations/SVCConfiguration/Definition/Extensions/SVCExtension"):
if prop.find("TypeName").text == 'KmlServer':
prop.find("Enabled").text = "false"
# Turn on feature access capabilities
for prop in doc.findall("./Configurations/SVCConfiguration/Definition/Info/PropertyArray/PropertySetProperty"):
if prop.find("Key").text == 'WebCapabilities':
prop.find("Value").text = capabilities
# Update url for portal
for prop in doc.findall("./StagingSettings/PropertyArray/PropertySetProperty"):
if prop.find("Key").text == 'ServerConnectionString':
prop.find("Value").text = prop.find("Value").text.toString().replace('www.arcgis.com',url)
# Update url for portal
for prop in doc.findall("./itemInfo/url"):
prop.text = prop.text.toString().replace('www.arcgis.com',url)
# Add the namespaces which get stripped, back into the .SD
root_elem.attrib["xmlns:typens"] = 'http://www.esri.com/schemas/ArcGIS/10.2'
root_elem.attrib["xmlns:xs"] = 'http://www.w3.org/2001/XMLSchema'
newSDdraft = os.path.dirname(sddraft) + os.sep + "draft_mod.sddraft"
# Write the new draft to disk
with open(newSDdraft, 'w') as f:
doc.write(f, 'utf-8')
del doc
return newSDdraft
#----------------------------------------------------------------------
def _prep_mxd(mxd):
if arcpyFound == False:
return
""" ensures the requires mxd properties are set to something """
changed = False
if mxd.author.strip() == "":
mxd.author = "NA"
changed = True
if mxd.credits.strip() == "":
mxd.credits = "NA"
changed = True
if mxd.description.strip() == "":
mxd.description = "NA"
changed = True
if mxd.summary.strip() == "":
mxd.summary = "NA"
changed = True
if mxd.tags.strip() == "":
mxd.tags = "NA"
changed = True
if mxd.title.strip() == "":
mxd.title = "NA"
changed = True
if changed == True:
mxd.save()
return mxd
|
|
# MySQL Connector/Python - MySQL driver written in Python.
# Copyright (c) 2013, 2014, Oracle and/or its affiliates. All rights reserved.
# MySQL Connector/Python is licensed under the terms of the GPLv2
# <http://www.gnu.org/licenses/old-licenses/gpl-2.0.html>, like most
# MySQL Connectors. There are special exceptions to the terms and
# conditions of the GPLv2 as it is applied to this software, see the
# FOSS License Exception
# <http://www.mysql.com/about/legal/licensing/foss-exception.html>.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
"""Implementing caching mechanisms for MySQL Fabric"""
from datetime import datetime, timedelta
from hashlib import sha1
import logging
import threading
from . import FabricShard
_LOGGER = logging.getLogger('myconnpy-fabric')
_CACHE_TTL = 1 * 60 # 1 minute
class CacheEntry(object):
"""Base class for MySQL Fabric cache entries"""
def __init__(self, version=None, fabric_uuid=None, ttl=_CACHE_TTL):
self.version = version
self.fabric_uuid = fabric_uuid
self.last_updated = datetime.utcnow()
self._ttl = ttl
@classmethod
def hash_index(cls, part1, part2=None):
"""Create hash for indexing"""
raise NotImplementedError
@property
def invalid(self):
"""Returns True if entry is not valid any longer
This property returns True when the entry is not valid any longer.
The entry is valid when now > (last updated + ttl), where ttl is
in seconds.
"""
if not self.last_updated:
return False
atime = self.last_updated + timedelta(seconds=self._ttl)
return datetime.utcnow() > atime
def reset_ttl(self):
"""Reset the Time to Live"""
self.last_updated = datetime.utcnow()
def invalidate(self):
"""Invalidates the cache entry"""
self.last_updated = None
class CacheShardTable(CacheEntry):
"""Cache entry for a Fabric sharded table"""
def __init__(self, shard, version=None, fabric_uuid=None):
if not isinstance(shard, FabricShard):
raise ValueError("shard argument must be a FabricShard instance")
super(CacheShardTable, self).__init__(version=version,
fabric_uuid=fabric_uuid)
self.partitioning = {}
self._shard = shard
if shard.key and shard.group:
self.add_partition(shard.key, shard.group)
def __getattr__(self, attr):
return getattr(self._shard, attr)
def add_partition(self, key, group):
"""Add sharding information for a group"""
if self.shard_type == 'RANGE':
key = int(key)
elif self.shard_type == 'RANGE_DATETIME':
try:
if ':' in key:
key = datetime.strptime(key, "%Y-%m-%d %H:%M:%S")
else:
key = datetime.strptime(key, "%Y-%m-%d").date()
except:
raise ValueError(
"RANGE_DATETIME key could not be parsed, was: {0}".format(
key
))
elif self.shard_type == 'RANGE_STRING':
pass
else:
raise ValueError("Unsupported sharding type {0}".format(
self.shard_type
))
self.partitioning[key] = {
'group': group,
}
self.reset_ttl()
@classmethod
def hash_index(cls, part1, part2=None):
"""Create hash for indexing"""
return sha1(part1.encode('utf-8') + part2.encode('utf-8')).hexdigest()
def __repr__(self):
return "{class_}({database}.{table}.{column})".format(
class_=self.__class__,
database=self.database,
table=self.table,
column=self.column
)
class CacheGroup(CacheEntry):
"""Cache entry for a Fabric group"""
def __init__(self, group_name, servers):
super(CacheGroup, self).__init__(version=None, fabric_uuid=None)
self.group_name = group_name
self.servers = servers
@classmethod
def hash_index(cls, part1, part2=None):
"""Create hash for indexing"""
return sha1(part1.encode('utf-8')).hexdigest()
def __repr__(self):
return "{class_}({group})".format(
class_=self.__class__,
group=self.group_name,
)
class FabricCache(object):
"""Singleton class for caching Fabric data
Only one instance of this class can exists globally.
"""
def __init__(self, ttl=_CACHE_TTL):
self._ttl = ttl
self._sharding = {}
self._groups = {}
self.__sharding_lock = threading.Lock()
self.__groups_lock = threading.Lock()
def remove_group(self, entry_hash):
"""Remove cache entry for group"""
with self.__groups_lock:
try:
del self._groups[entry_hash]
except KeyError:
# not cached, that's OK
pass
else:
_LOGGER.debug("Group removed from cache")
def remove_shardtable(self, entry_hash):
"""Remove cache entry for shard"""
with self.__sharding_lock:
try:
del self._sharding[entry_hash]
except KeyError:
# not cached, that's OK
pass
def sharding_cache_table(self, shard, version=None, fabric_uuid=None):
"""Cache information about a shard"""
entry_hash = CacheShardTable.hash_index(shard.database, shard.table)
with self.__sharding_lock:
try:
entry = self._sharding[entry_hash]
entry.add_partition(shard.key, shard.group)
except KeyError:
# New cache entry
entry = CacheShardTable(shard, version=version,
fabric_uuid=fabric_uuid)
self._sharding[entry_hash] = entry
def cache_group(self, group_name, servers):
"""Cache information about a group"""
entry_hash = CacheGroup.hash_index(group_name)
with self.__groups_lock:
try:
entry = self._groups[entry_hash]
entry.servers = servers
entry.reset_ttl()
_LOGGER.debug("Recaching group {0} with {1}".format(
group_name, servers))
except KeyError:
# New cache entry
entry = CacheGroup(group_name, servers)
self._groups[entry_hash] = entry
_LOGGER.debug("Caching group {0} with {1}".format(
group_name, servers))
def sharding_search(self, database, table):
"""Search cache for a shard based on database and table"""
entry_hash = CacheShardTable.hash_index(database, table)
entry = None
try:
entry = self._sharding[entry_hash]
if entry.invalid:
_LOGGER.debug("{entry} invalidated".format(entry))
self.remove_shardtable(entry_hash)
return None
except KeyError:
# Nothing in cache
return None
return entry
def group_search(self, group_name):
"""Search cache for a group based on its name"""
entry_hash = CacheGroup.hash_index(group_name)
entry = None
try:
entry = self._groups[entry_hash]
if entry.invalid:
_LOGGER.debug("{entry} invalidated".format(entry))
self.remove_group(entry_hash)
return None
except KeyError:
# Nothing in cache
return None
return entry
def __repr__(self):
return "{class_}(groups={nrgroups},shards={nrshards})".format(
class_=self.__class__,
nrgroups=len(self._groups),
nrshards=len(self._sharding)
)
|
|
import logging
from datetime import datetime, timedelta
from nose.tools import eq_, ok_
from pyquery import PyQuery
from django.contrib.auth.models import Group
from fjord.base.tests import LocalizingClient, ProfileFactory, reverse
from fjord.feedback.tests import ResponseFactory, ResponseEmailFactory
from fjord.search.tests import ElasticTestCase
logger = logging.getLogger(__name__)
class TestAnalyticsDashboardView(ElasticTestCase):
client_class = LocalizingClient
def test_permissions(self):
# Verifies that only analyzers can see the analytics dashboard
# link
resp = self.client.get(reverse('dashboard'))
eq_(200, resp.status_code)
assert 'adashboard' not in resp.content
# Verifies that only analyzers can see the analytics dashboard
resp = self.client.get(reverse('analytics_dashboard'))
eq_(403, resp.status_code)
# Verify analyzers can see analytics dashboard link
jane = ProfileFactory(user__email='[email protected]').user
jane.groups.add(Group.objects.get(name='analyzers'))
self.client_login_user(jane)
resp = self.client.get(reverse('dashboard'))
eq_(200, resp.status_code)
assert 'adashboard' in resp.content
# Verify analyzers can see analytics dashboard
resp = self.client.get(reverse('analytics_dashboard'))
eq_(200, resp.status_code)
class TestOccurrencesView(ElasticTestCase):
client_class = LocalizingClient
def setUp(self):
super(TestOccurrencesView, self).setUp()
# Set up some sample data
items = [
# happy, locale, description
(True, 'en-US', 'apple banana orange pear'),
(True, 'en-US', 'orange pear kiwi'),
(True, 'en-US', 'chocolate chocolate yum'),
(False, 'en-US', 'apple banana grapefruit'),
# This one doesn't create bigrams because there isn't enough words
(False, 'en-US', 'orange'),
# This one shouldn't show up
(False, 'es', 'apple banana'),
]
for happy, locale, description in items:
ResponseFactory(happy=happy, locale=locale,
description=description)
self.refresh()
# Create analyzer and log analyzer in
jane = ProfileFactory(user__email='[email protected]').user
jane.groups.add(Group.objects.get(name='analyzers'))
self.client_login_user(jane)
def test_occurrences(self):
url = reverse('analytics_occurrences')
# No results when you initially look at the page
resp = self.client.get(url)
eq_(200, resp.status_code)
assert 'id="results"' not in resp.content
# 'product' is a required field
resp = self.client.get(url, {'product': ''})
eq_(200, resp.status_code)
# FIXME - this test is too loose
assert 'This field is required' in resp.content
# At least a version, search term or start date is required
resp = self.client.get(url, {'product': 'Firefox'})
eq_(200, resp.status_code)
assert 'This field is required' not in resp.content
assert 'Must specify at least one' in resp.content
# Minimal required for results
resp = self.client.get(url, {
'product': 'Firefox',
'first_version': '17.0'}
)
eq_(200, resp.status_code)
assert 'This field is required' not in resp.content
assert 'Must speicfy at least one' not in resp.content
assert 'id="results"' in resp.content
# FIXME - when things are less prototypy, add tests for
# specific results
class TestSearchView(ElasticTestCase):
client_class = LocalizingClient
url = reverse('analytics_search')
# Note: We count the number of td.sentiment things since there's
# one sentiment-classed td element for every feedback response
# that shows up in the search results.
def setUp(self):
super(TestSearchView, self).setUp()
# Set up some sample data
# 4 happy, 3 sad.
# 2 Windows XP, 2 Linux, 1 OS X, 2 Windows 7
now = datetime.now()
# The dashboard by default shows the last week of data, so
# these need to be relative to today. The alternative is that
# every test gives an explicit date range, and that is
# annoying and verbose.
items = [
# happy, platform, locale, description, created
(True, '', 'en-US', 'apple', now - timedelta(days=6)),
(True, 'Windows 7', 'es', 'banana', now - timedelta(days=5)),
(True, 'Linux', 'en-US', 'orange', now - timedelta(days=4)),
(True, 'Linux', 'en-US', 'apple', now - timedelta(days=3)),
(False, 'Windows XP', 'en-US', 'banana', now - timedelta(days=2)),
(False, 'Windows 7', 'en-US', 'orange', now - timedelta(days=1)),
(False, 'Linux', 'es', u'\u2713 apple', now - timedelta(days=0)),
]
for happy, platform, locale, description, created in items:
# We don't need to keep this around, just need to create it.
ResponseFactory(happy=happy, platform=platform, locale=locale,
description=description, created=created)
self.refresh()
# Create analyzer and log analyzer in
jane = ProfileFactory(user__email='[email protected]').user
jane.groups.add(Group.objects.get(name='analyzers'))
self.client_login_user(jane)
def test_front_page(self):
r = self.client.get(self.url)
eq_(200, r.status_code)
self.assertTemplateUsed(r, 'analytics/analyzer/search.html')
pq = PyQuery(r.content)
eq_(len(pq('li.opinion')), 7)
def test_search(self):
# Happy
r = self.client.get(self.url, {'happy': 1})
pq = PyQuery(r.content)
eq_(len(pq('li.opinion')), 4)
# Sad
r = self.client.get(self.url, {'happy': 0})
pq = PyQuery(r.content)
eq_(len(pq('li.opinion')), 3)
# Locale
r = self.client.get(self.url, {'locale': 'es'})
pq = PyQuery(r.content)
eq_(len(pq('li.opinion')), 2)
# Platform and happy
r = self.client.get(self.url, {'happy': 1, 'platform': 'Linux'})
pq = PyQuery(r.content)
eq_(len(pq('li.opinion')), 2)
# Product
r = self.client.get(self.url, {'product': 'Firefox'})
pq = PyQuery(r.content)
eq_(len(pq('li.opinion')), 7)
# Product
r = self.client.get(self.url, {'product': 'Firefox for Android'})
pq = PyQuery(r.content)
eq_(len(pq('li.opinion')), 0)
# Product version
r = self.client.get(
self.url, {'product': 'Firefox', 'version': '17.0'})
pq = PyQuery(r.content)
eq_(len(pq('li.opinion')), 7)
# Product version
r = self.client.get(
self.url, {'product': 'Firefox', 'version': '18.0'})
pq = PyQuery(r.content)
eq_(len(pq('li.opinion')), 0)
# Empty search
r = self.client.get(self.url, {'platform': 'Atari'})
pq = PyQuery(r.content)
eq_(len(pq('li.opinion')), 0)
def test_has_email(self):
# Test before we create a responsemail
r = self.client.get(self.url, {'has_email': '0'})
eq_(r.status_code, 200)
pq = PyQuery(r.content)
eq_(len(pq('li.opinion')), 7)
r = self.client.get(self.url, {'has_email': '1'})
eq_(r.status_code, 200)
pq = PyQuery(r.content)
eq_(len(pq('li.opinion')), 0)
ResponseEmailFactory(
opinion__happy=True,
opinion__product=u'Firefox',
opinion__description=u'ou812',
opinion__created=datetime.now())
# Have to reindex everything because unlike in a request
# context, what happens here is we index the Response, but
# without the ResponseEmail.
self.setup_indexes()
r = self.client.get(self.url, {'has_email': '0'})
eq_(r.status_code, 200)
pq = PyQuery(r.content)
ok_('ou812' not in r.content)
eq_(len(pq('li.opinion')), 7)
r = self.client.get(self.url, {'has_email': '1'})
eq_(r.status_code, 200)
pq = PyQuery(r.content)
ok_('ou812' in r.content)
eq_(len(pq('li.opinion')), 1)
def test_country(self):
ResponseEmailFactory(
opinion__happy=True,
opinion__product=u'Firefox OS',
opinion__description=u'ou812',
opinion__country=u'ES',
opinion__created=datetime.now())
# Have to reindex everything because unlike in a request
# context, what happens here is we index the Response, but
# without the ResponseEmail.
self.setup_indexes()
r = self.client.get(self.url, {
'product': 'Firefox OS', 'country': 'ES'})
eq_(r.status_code, 200)
pq = PyQuery(r.content)
ok_('ou812' in r.content)
eq_(len(pq('li.opinion')), 1)
def test_empty_and_unknown(self):
# Empty value should work
r = self.client.get(self.url, {'platform': ''})
eq_(r.status_code, 200)
pq = PyQuery(r.content)
eq_(len(pq('li.opinion')), 1)
# "Unknown" value should also work
r = self.client.get(self.url, {'platform': 'Unknown'})
eq_(r.status_code, 200)
pq = PyQuery(r.content)
eq_(len(pq('li.opinion')), 1)
def test_version_noop(self):
"""version has no effect if product isn't set"""
# Filter on product and version--both filters affect the
# results
r = self.client.get(
self.url, {'product': 'Firefox', 'version': '18.0'})
pq = PyQuery(r.content)
eq_(len(pq('li.opinion')), 0)
# Filter on version--filter has no effect on results
r = self.client.get(
self.url, {'version': '18.0'})
pq = PyQuery(r.content)
eq_(len(pq('li.opinion')), 7)
def test_text_search(self):
# Text search
r = self.client.get(self.url, {'q': 'apple'})
pq = PyQuery(r.content)
eq_(len(pq('li.opinion')), 3)
# Text and filter
r = self.client.get(
self.url, {'q': 'apple', 'happy': 1, 'locale': 'en-US'})
pq = PyQuery(r.content)
eq_(len(pq('li.opinion')), 2)
def test_text_search_unicode(self):
"""Unicode in the search field shouldn't kick up errors"""
# Text search
r = self.client.get(self.url, {'q': u'\u2713'})
eq_(r.status_code, 200)
def test_date_search(self):
# These start and end dates will give known slices of the data.
# Silly relative dates.
start = (datetime.now() - timedelta(days=5)).strftime('%Y-%m-%d'),
end = (datetime.now() - timedelta(days=2)).strftime('%Y-%m-%d'),
# Unspecified start => (-infin, end]
r = self.client.get(self.url, {
'date_end': end,
})
pq = PyQuery(r.content)
eq_(len(pq('li.opinion')), 5)
# Unspecified end => [start, +infin)
r = self.client.get(self.url, {
'date_start': start
})
pq = PyQuery(r.content)
eq_(len(pq('li.opinion')), 6)
# Both start and end => [start, end]
r = self.client.get(self.url, {
'date_start': start,
'date_end': end,
})
pq = PyQuery(r.content)
eq_(len(pq('li.opinion')), 4)
def test_date_start_valueerror(self):
# https://bugzilla.mozilla.org/show_bug.cgi?id=898584
r = self.client.get(self.url, {
'date_start': '0001-01-01',
})
eq_(r.status_code, 200)
def test_invalid_search(self):
# Invalid values for happy shouldn't filter
r = self.client.get(self.url, {'happy': 'fish'})
eq_(r.status_code, 200)
pq = PyQuery(r.content)
eq_(len(pq('li.opinion')), 7)
# Unknown parameters should be ignored.
r = self.client.get(self.url, {'apples': 'oranges'})
eq_(r.status_code, 200)
pq = PyQuery(r.content)
eq_(len(pq('li.opinion')), 7)
# A broken date range search shouldn't affect anything
# Why this? Because this is the thing the fuzzer found.
r = self.client.get(self.url, {
'date_end': '/etc/shadow\x00',
'date_start': '/etc/passwd\x00'
})
eq_(r.status_code, 200)
pq = PyQuery(r.content)
eq_(len(pq('li.opinion')), 7)
def test_search_export_csv(self):
r = self.client.get(self.url, {'format': 'csv'})
eq_(r.status_code, 200)
# Check that it parses in csv with n rows.
lines = r.content.splitlines()
# URL row, params row, header row and one row for every opinion
eq_(len(lines), 10)
|
|
#!/usr/bin/python
from scanner import Scanner
import AST
class Cparser(object):
def __init__(self):
self.scanner = Scanner()
self.scanner.build()
self.errors = False
tokens = Scanner.tokens
precedence = (
("nonassoc", 'IFX'),
("nonassoc", 'ELSE'),
("right", '='),
("left", 'OR'),
("left", 'AND'),
("left", '|'),
("left", '^'),
("left", '&'),
("nonassoc", '<', '>', 'EQ', 'NEQ', 'LE', 'GE'),
("left", 'SHL', 'SHR'),
("left", '+', '-'),
("left", '*', '/', '%'),
)
def p_error(self, p):
self.errors = True
err_format = "Syntax error at line {0}, column {1}: LexToken({2}, '{3}')"
if p:
print(err_format.format(p.lineno, self.scanner.find_tok_column(p), p.type, p.value))
else:
print('At end of input')
def p_program(self, p):
"""program : declarations fundefs instructions"""
# ^ ^ ^ ^
# p[0] p[1] p[2] p[3]
program = AST.Program(p[1], p[2], p[3])
p[0] = program
def p_declarations(self, p):
"""declarations : declarations declaration
| """
if len(p) == 3: # occurs when declarations -> declarations declaration
p[1].declarations.append(p[2])
p[0] = p[1]
else: # occurs when declarations -> epsilon
p[0] = AST.Declarations()
def p_declaration(self, p):
"""declaration : TYPE inits ';'
| error ';' """
if len(p) == 3: # occurs when error
p[0] = p[1]
else:
p[0] = AST.Declaration(p[1], p[2])
def p_inits(self, p):
"""inits : inits ',' init
| init """
if len(p) == 4: # occurs when inits -> inits, init
p[0] = p[1]
p[0].inits.append(p[3])
else: # occurs when inits -> init
p[0] = AST.Inits()
p[0].inits.append(p[1])
def p_init(self, p):
"""init : ID '=' expression """
p[0] = AST.Init(p[1], p[3], p.lineno(1))
def p_instructions(self, p):
"""instructions : instructions instruction
| instruction """
if len(p) == 3: # occurs when instructions -> instructions instruction
p[1].instructions.append(p[2])
p[0] = p[1]
else: # occurs when instructions -> instruction
p[0] = AST.Instructions()
p[0].instructions.append(p[1])
def p_instruction(self, p):
"""instruction : print_instr
| labeled_instr
| assignment
| choice_instr
| while_instr
| repeat_instr
| return_instr
| break_instr
| continue_instr
| compound_instr"""
p[0] = p[1]
def p_print_instr(self, p):
"""print_instr : PRINT expression ';'
| PRINT error ';' """
p[0] = AST.PrintInstr(p[2], p.lineno(1))
def p_labeled_instr(self, p):
"""labeled_instr : ID ':' instruction """
p[0] = AST.LabeledInstruction(p[1], p[3], p.lineno(1))
def p_assignment(self, p):
"""assignment : ID '=' expression ';' """
p[0] = AST.Assignment(p[1], p[3], p.lineno(1))
def p_choice_instr(self, p):
"""choice_instr : IF '(' condition ')' instruction %prec IFX
| IF '(' condition ')' instruction ELSE instruction
| IF '(' error ')' instruction %prec IFX
| IF '(' error ')' instruction ELSE instruction """
if len(p) == 8:
p[0] = AST.IfElseInstr(p[3], p[5], p[7])
else:
p[0] = AST.IfInstr(p[3], p[5])
def p_while_instr(self, p):
"""while_instr : WHILE '(' condition ')' instruction
| WHILE '(' error ')' instruction """
p[0] = AST.WhileInstr(p[3], p[5])
def p_repeat_instr(self, p):
"""repeat_instr : REPEAT instructions UNTIL condition ';' """
p[0] = AST.RepeatInstr(p[4], p[2])
def p_return_instr(self, p):
"""return_instr : RETURN expression ';' """
p[0] = AST.ReturnInstr(p[2], p.lineno(1))
def p_continue_instr(self, p):
"""continue_instr : CONTINUE ';' """
p[0] = AST.ContinueInstr(p.lineno(1))
def p_break_instr(self, p):
"""break_instr : BREAK ';' """
p[0] = AST.BreakInstr(p.lineno(1))
def p_compound_instr(self, p):
"""compound_instr : '{' declarations instructions '}' """
p[0] = AST.CompoundInstr(p[2], p[3])
def p_condition(self, p):
"""condition : expression"""
p[0] = p[1]
def p_const(self, p):
"""const : INTEGER
| FLOAT
| STRING"""
lineno = p.lineno(1)
try:
int(p[1])
p[0] = AST.Integer(p[1], lineno)
except ValueError:
try:
float(p[1])
p[0] = AST.Float(p[1], lineno)
except ValueError:
p[0] = AST.String(p[1], lineno)
def p_id_expr(self, p):
"""expression : ID"""
p[0] = AST.Variable(p[1], p.lineno(1))
def p_const_expr(self, p):
"""expression : const"""
p[0] = p[1]
def p_paren_expression(self, p):
"""expression : '(' expression ')'
| '(' error ')'"""
p[0] = AST.ParenExpr(p[2])
def p_funcall(self, p):
"""expression : ID '(' expr_list_or_empty ')'
| ID '(' error ')' """
p[0] = AST.Funcall(p[1], p[3], p.lineno(1))
def p_bin_expression(self, p):
"""expression : expression '+' expression
| expression '-' expression
| expression '*' expression
| expression '/' expression
| expression '%' expression
| expression '|' expression
| expression '&' expression
| expression '^' expression
| expression AND expression
| expression OR expression
| expression SHL expression
| expression SHR expression
| expression EQ expression
| expression NEQ expression
| expression '>' expression
| expression '<' expression
| expression LE expression
| expression GE expression"""
p[0] = AST.BinExpr(p[2], p[1], p[3], p.lineno(2)) # operator pierwszy
def p_expr_list_or_empty(self, p):
"""expr_list_or_empty : expr_list
| """
p[0] = None if len(p) == 1 else p[1]
def p_expr_list(self, p):
"""expr_list : expr_list ',' expression
| expression """
p[0] = AST.ExprList()
if len(p) == 4:
p[0].cons_expr(p[1].expr_list, p[3])
else:
p[0].append_expr(p[1])
def p_fundefs(self, p):
"""fundefs : fundef fundefs
| """
p[0] = AST.FundefList()
if len(p) == 3:
p[0].cons_fun(p[2].fundef_list, p[1])
elif len(p) == 2:
p[0].append_fun(p[1])
def p_fundef(self, p):
"""fundef : TYPE ID '(' args_list_or_empty ')' compound_instr """
p[0] = AST.Fundef(p[1], p[2], p[4], p[6], p.lineno(1))
def p_args_list_or_empty(self, p):
"""args_list_or_empty : args_list
| """
if len(p) == 2:
p[0] = p[1]
else:
p[0] = AST.ArgList() # empty
def p_args_list(self, p):
"""args_list : args_list ',' arg
| arg """
p[0] = AST.ArgList()
if len(p) == 4:
p[0].cons_arg(p[1].arg_list, p[3])
else:
p[0].append_arg(p[1])
def p_arg(self, p):
"""arg : TYPE ID """
p[0] = AST.Arg(p[1], p[2], p.lineno(1))
|
|
import sys
import gym
import gym.wrappers
import threading
import time
import numpy as np
import logging
from atari_environment import AtariEnvironment
from collections import OrderedDict
from blocks import serialization
import network as A3C
# FIXME: have to increase depth limit slightly for A3C-LSTM agent
sys.setrecursionlimit(50000)
# Disabling gym logger
gym.undo_logger_setup()
def sample_policy_action(num_actions, probs, rng):
""" Sample an action using a prob. distribution
Parameters
----------
num_actions : int
number of available actions
probs : list
list of float with the probability of each action
"""
probs = probs - np.finfo(np.float32).epsneg
histogram = rng.multinomial(1, probs)
action_index = int(np.nonzero(histogram)[0])
return action_index
def sample_argmax_action(num_actions, probs, rng):
""" Pick the argmax action
Parameters
----------
num_actions : int
number of available actions
probs : list
list of float with the probability of each action
"""
action_index = int(np.argmax(probs))
return action_index
class Common_Model(object):
""" A container class for the shared model
Parameters
----------
rng : numpy.random
game : string
gym name of the game
model: instance of :class:`.Model`
the shared model with the cost application
algorithm: instance of :class: `theano.function`
gradient of the cost function of the model
policy_network: intance of :class:`theano.function`
the policy function of the shared model
value_network: instance of :class:`theano.function`
the value function of the shared model
monitor_env: instance of class `gym.environment`
a gym environment
resized_width: int
the width of the images that will be used by the agent
resized_height: int
the height of the images that will be used by the agent
agent_history_length: int
number of past frames that will be used by the agent
max_steps: int
maximum number of steps the agent will play during training
render_flag: bool
True to render the screen whilst playing
results_file: str
prefix path for storing the results
num_steps_eval: int
maximum number of steps the agent will play during evaluation
sample_argmax: bool
True if the action of max prob should be chosen or False to choose
a multinomial sampling strategy
"""
def __init__(self, rng, game, model, algorithm, policy_network,
value_network, monitor_env,
resized_width, resized_height, agent_history_length,
max_steps, render_flag=False, results_file=False,
num_steps_eval=100, sample_argmax=False):
""" The common model """
self.rng = rng
self.game = game
self.model = model
self.curr_steps = 0
self.algorithm = algorithm
self.policy_network = policy_network
self.value_network = value_network
self.monitor_env = monitor_env
self.resized_width = resized_width
self.resized_height = resized_height
self.agent_history_length = agent_history_length
self.env = AtariEnvironment(
gym_env=self.monitor_env,
resized_width=self.resized_width,
resized_height=self.resized_height,
agent_history_length=self.agent_history_length)
self.num_steps_eval = num_steps_eval
self.max_steps = max_steps
self.render_flag = render_flag
self.results_file = results_file
# Sets the sampling strategy
if sample_argmax:
self._sample_function = sample_argmax_action
else:
self._sample_function = sample_policy_action
self.curr_it = 0
def reset_environment(self):
""" Reloads an environment
"""
self.env = AtariEnvironment(
gym_env=self.monitor_env,
resized_width=self.resized_width,
resized_height=self.resized_height,
agent_history_length=self.agent_history_length)
def reset_internal_state(self):
""" Reset internal sttate
Doing nothing """
pass
def obtain_policy(self, s_t):
""" Obtain the probabilities of each action
Parameters
----------
s_t :list of states
"""
probs = self.policy_network(s_t)[0]
return probs
def obtain_value(self, s_t):
return self.value_network(s_t)
class Common_Model_LSTM(Common_Model):
"""
Parameters
----------
rng : numpy.random
game : string
gym name of the game
model: instance of :class:`.Model`
the shared model with the cost application
algorithm: instance of :class: `theano.function`
gradient of the cost function of the model
policy_network: intance of :class:`theano.function`
the policy function of the shared model
value_network: instance of :class:`theano.function`
the value function of the shared model
monitor_env: instance of class `gym.environment`
a gym environment
resized_width: int
the width of the images that will be used by the agent
resized_height: int
the height of the images that will be used by the agent
agent_history_length: int
number of past frames that will be used by the agent
max_steps: int
maximum number of steps the agent will play during training
render_flag: bool
True to render the screen whilst playing
results_file: str
prefix path for storing the results
num_steps_eval: int
maximum number of steps the agent will play during evaluation
sample_argmax: bool
True if the action of max prob should be chosen or False to choose
a multinomial sampling strategy
lstm_output_units: int
dimension of lstm output units (to reset internal state)
"""
def __init__(self, rng, game, model, algorithm, policy_network,
value_network, monitor_env,
resized_width, resized_height, agent_history_length,
max_steps, render_flag=False, results_file=False,
num_steps_eval=100, sample_argmax=False,
lstm_output_units=256):
self.lstm_output_units = lstm_output_units
super(Common_Model_LSTM, self).__init__(
rng, game, model, algorithm,
policy_network,
value_network, monitor_env,
resized_width, resized_height, agent_history_length,
max_steps, render_flag, results_file,
num_steps_eval, sample_argmax)
def reset_internal_state(self):
""" Resets internal state: state and LSTM cell """
self.state = np.zeros((1, self.lstm_output_units), dtype="float32")
self.cells = np.zeros((1, self.lstm_output_units), dtype="float32")
def obtain_policy(self, s_t):
""" Obtain the probabilities of each action
Parameters
----------
s_t: list of states
"""
probs, self.state, self.cells = self.policy_network(self.cells, s_t,
self.state)
return probs
def obtain_value(self, s_t):
""" TODO """
value = self.value_network(self.cells, s_t, self.state)
return value
class Common_Model_Wrapper(object):
""" A wrapper class to store the model shared by all the learning agents
It implements the lock for concurrent thread access to the shared model
Parameters
----------
rng : numpy.random
game : string
gym name of the game
model: instance of :class:`.Model`
the shared model with the cost application
algorithm: instance of :class: `theano.function`
gradient of the cost function of the model
policy_network: intance of :class:`theano.function`
the policy function of the shared model
value_network: instance of :class:`theano.function`
the value function of the shared model
monitor_env: instance of class `gym.environment`
a gym environment
resized_width: int
the width of the images that will be used by the agent
resized_height: int
the height of the images that will be used by the agent
agent_history_length: int
number of past frames that will be used by the agent
max_steps: int
maximum number of steps the agent will play during training
render_flag: bool
True to render the screen whilst playing
results_file: str
prefix path for storing the results
num_steps_eval: int
maximum number of steps the agent will play during evaluation
sample_argmax: bool
True if the action of max prob should be chosen or False to choose
a multinomial sampling strategy
"""
def __init__(self, rng, game, model, algorithm, policy_network,
value_network, monitor_env, resized_width, resized_height,
agent_history_length, max_steps, render_flag=False,
results_file=False, num_steps_eval=100,
sample_argmax=False, **kwargs):
if not kwargs:
self.common_model = Common_Model(rng, game, model, algorithm,
policy_network,
value_network, monitor_env,
resized_width,
resized_height,
agent_history_length,
max_steps, render_flag,
results_file,
num_steps_eval, sample_argmax)
else:
self.common_model = Common_Model_LSTM(
rng, game, model,
algorithm,
policy_network,
value_network,
monitor_env,
resized_width,
resized_height,
agent_history_length,
max_steps, render_flag,
results_file,
num_steps_eval,
sample_argmax,
kwargs['lstm_output_units'])
self.lock = threading.Lock()
def save_model(self, save_file):
""" Dump the current shared model to a file
Parameters
----------
save_file: str
path in which to save the model
"""
self.lock.acquire()
with open(save_file, "wb+") as dst:
serialization.dump(self.common_model, dst,
parameters=self.common_model.model.parameters)
self.lock.release()
def load_model(self, load_file):
""" Loading model parameters
Parameters
----------
load_file: str
path to a checkpoint of a model
"""
self.lock.acquire()
with open(load_file, 'rb') as src:
parameters = serialization.load_parameters(src)
self.common_model.model.set_parameter_values(
parameters.get_values())
self.lock.release()
def update_cum_gradients(self, init_state, last_state, batch_size=1,
stats_flag=False):
""" Obtain the cum gradient of the iteration and updates the model
Parameters
----------
init_state : OrderedDict
parameters of the model at the beginning of an iteration
last_state : OrderedDict
parameters of the model at the end on an iteration
batch_size: int
size of the current batch
stats_flag: bool
whether to show current update stats on screen
"""
# Obtain the acc gradient information
new_update = OrderedDict()
for kk in init_state:
if kk in last_state:
new_update[kk] = (init_state[kk] - last_state[kk]) * batch_size
else:
logging.error("{} is not part of the update ".format(kk))
try:
# Acquiring the lock and perform update
with self.lock:
self.common_model.algorithm.process_batch(new_update)
if (stats_flag):
for kk in self.common_model.model.get_parameter_dict():
logging.info(
("After Update {} with Mean {} Current {}" +
" Shape {}").format(
kk,
np.mean(new_update[kk]),
np.mean(
self.common_model.model.get_parameter_dict()[
kk].get_value()),
np.shape(
self.common_model.model.get_parameter_dict()[
kk].get_value())))
finally:
pass
def synchronize_model(self, agent_model):
""" Update the parameters of the learning agent with current
shared model parameters
Parameters
----------
agent_model : instance of :class: `~blocks.model.Model`
"""
agent_model.set_parameter_values(
self.common_model.model.get_parameter_values())
def perform_evaluation(self, num_reps, experiment_name,
eval_it, do_gym_eval=False):
""" Perform an evaluation of the current model
Parameters
----------
num_reps: int
number of times the test experiment will be repeated
experiment_name: str
name of the experiment
eval_it : int
number of evaluation iterations
do_gym_eval: bool
if gym statistics should be gathered during evaluation
"""
# Capturing the lock during all the evaluation period
self.common_model.reset_environment()
try:
self.lock.acquire()
print "EVAL: GOING TO EVAL "
if do_gym_eval:
self.common_model.monitor_env.monitor.start(
"{}{}_{}/eval".format(
self.common_model.results_file,
experiment_name,
eval_it))
# Stat vars
test_rewards = []
test_ep_length = []
test_value = []
# Reset internals
print "GOING TO RESET INTERNAL"
self.common_model.reset_internal_state()
for i_episode in xrange(num_reps):
print "GETTTING INITIAL STATE"
s_t = self.common_model.env.get_initial_state()
terminal = False
ep_reward = 0
ep_t = 0
# Execute actions till achieving a finish state or the eval
# period is over
while not terminal and ep_t < self.common_model.num_steps_eval:
if self.common_model.render_flag and do_gym_eval:
self.common_model.monitor_env.render()
probs = self.common_model.obtain_policy([s_t])[0]
action_index = self.common_model._sample_function(len(
self.common_model.env.gym_actions), probs,
self.common_model.rng)
s_t1, r_t, terminal, info = self.common_model.env.step(
action_index)
s_t = s_t1
ep_reward += r_t
ep_t += 1
test_value.append(
self.common_model.obtain_value([s_t])[0][0])
test_rewards.append(ep_reward)
test_ep_length.append(ep_t)
# TODO: save stats to file
self._save_eval_to_file("{}{}".format(
self.common_model.results_file,
self.common_model.game),
eval_it,
test_rewards,
test_ep_length,
test_value)
if do_gym_eval:
self.common_model.monitor_env.close()
except ValueError:
print "Exception whilst evaluating ", eval_it
finally:
self.lock.release()
def _save_eval_to_file(self, eval_file, eval_it,
test_rewards,
test_ep_length,
test_value):
""" Save stats to a file
This function was intended for use at several timesteps
during training.
Parameters
----------
eval_file: str
path in which the stats will be appended
eval_it : int
iteration in which the eval is performed
test_rewards: list of int
rewards obained in the evaluation
test_ep_length: list of int
length of each episode
test_value: list of float
value at each point of an experiment
"""
with open(eval_file, "a+") as f_eval:
f_eval.write("{},{},{},{}\n".format(eval_it,
np.mean(test_rewards),
np.mean(test_ep_length),
np.mean(test_value)))
def extract_params_from_model(common_net):
""" Obtain the parameters of a model in an OrderedDict
Parameters
----------
common_net: instance of :class: `~blocks.model.Model`
the model from where the parameters are extracted
"""
params = OrderedDict()
for kk in common_net.get_parameter_dict():
params[kk] = common_net.get_parameter_dict()[kk].get_value()
return params
class A3C_Agent(threading.Thread):
""" This class implements the one thread agent of A3C
Parameters
----------
rng : numpy.random
num : int
id of the thread
env : instance of :class: `gym.environment`
gym environment of this thread
batch_size: int
maximum size of a training batch
policy_network: intance of :class:`theano.function`
the policy function of the worker
value_network: instance of :class:`theano.function`
the value function of the worker
cost_model: instance of :class:`.Model`
the shared model with the cost application
algorithm: instance of :class: `theano.function`
gradient of the cost function of the model
resized_width: int
the width of the images that will be used by the agent
resized_height: int
the height of the images that will be used by the agent
agent_history_length: int
number of past frames that will be used by the agent
checkpoint_interval: int
number of steps between checkpoint saves
shared_model : instance of :class:`.Common_Model_Wrapper`
the shared model
num_reps_eval: int
number of repetitions of the evalution FIXME: not used
gamma_rate: float
discount rate when boostrapping the accumulated reward
sample_argmax: bool
True if the action of max prob should be chosen or False to choose
a multinomial sampling strategy
"""
def __init__(self, rng, num, env, batch_size, policy_network,
value_network, cost_function,
cost_model, algorithm, resized_width, resized_height,
agent_history_length, checkpoint_interval, shared_model,
num_reps_eval, gamma_rate=0.99,
sample_argmax=True):
super(A3C_Agent, self).__init__()
self.rng = rng
self.num = num
self.env = env
self.batch_size = batch_size
self.policy_network = policy_network
self.value_network = value_network
self.cost_function = cost_function
self.cost_model = cost_model
self.optim_algorithm = algorithm
self.optim_algorithm.initialize()
self.resized_width = resized_width
self.resized_height = resized_height
self.agent_history_length = agent_history_length
self.checkpoint_interval = checkpoint_interval
self.shared_model = shared_model
self.num_reps_eval = num_reps_eval
self.gamma_rate = gamma_rate
if sample_argmax:
self._sample_function = sample_argmax_action
else:
self._sample_function = sample_policy_action
def reset_internal_state(self):
""" Resets internal state: state and LSTM cell """
pass
def obtain_policy(self, s_t):
""" Obtain the probabilities of each action
Parameters
----------
s_t :list of states
"""
probs = self.policy_network(s_t)[0]
return probs
def obtain_value(self, s_t):
return self.value_network(s_t)
def prepare_input_gradient(self, s_batch, a_batch, R_batch):
""" Preparees the input to the gradient calculation
s_batch: an instance of :class: numpy.matrix
it contains (batch, agent_history, imag_width, imag_height)
a_batch: list of :class: numpy.matrix
it contains a one-hot encoded vector with the action
for each sample in the batch
R_batch: list of :class: numpy.matrix
contains the bootstrapped reward for each step
(sample of the batch)
"""
batch = OrderedDict()
batch['input_image'] = s_batch
batch['input_actions'] = np.array(a_batch, dtype="int32")
batch['input_reward'] = np.array(R_batch, dtype="float32")
return batch
def run(self):
""" Runs the Worker """
logging.info("Thread {} running!".format(self.num))
self.pick_one_thread_data()
def pick_one_thread_data(self):
""" Executes the iterations till training is over """
# Wrap env with AtariEnvironment helper class
env = AtariEnvironment(
gym_env=self.env,
resized_width=self.resized_width,
resized_height=self.resized_height,
agent_history_length=self.agent_history_length)
# Add different starting time for each agent
time.sleep(5*self.num)
# Set up per-episode counters
ep_reward = 0
ep_t = 0
probs_summary_t = 0
# Picking initial state
s_t = env.get_initial_state()
terminal = False
# Reset internals
self.reset_internal_state()
while (self.shared_model.common_model.curr_steps <
self.shared_model.common_model.max_steps):
s_batch = []
past_rewards = []
a_batch = []
t = 0
t_start = t
last_probs = []
# Update the model with the shared_model params (synchro)
self.shared_model.synchronize_model(self.cost_model)
# Store the initial params of the model
init_params = extract_params_from_model(self.cost_model)
# Execute one iteration
while not (terminal or ((t - t_start) == self.batch_size)):
probs = self.obtain_policy([s_t])[0]
action_index = self._sample_function(
len(env.gym_actions), probs, self.rng)
a_t = np.zeros([len(env.gym_actions)])
a_t[action_index] = 1
s_batch.append(s_t)
a_batch.append(a_t)
# Execute the action and obtain the reward
s_t1, r_t, terminal, info = env.step(action_index)
ep_reward += r_t
r_t = np.clip(r_t, -1, 1)
past_rewards.append(r_t)
t += 1
self.shared_model.common_model.curr_steps += 1
ep_t += 1
probs_summary_t += 1
s_t = s_t1
# Debug
last_probs = probs
last_value = self.obtain_value([s_t])[0]
if terminal:
R_t = 0
else:
# Picking last state
R_t = self.obtain_value([s_t])[0][0]
# Obtaining the reward at each epoch
R_batch = np.zeros(t)
for i in reversed(range(t_start, t)):
R_t = past_rewards[i] + self.gamma_rate * R_t
R_batch[i] = R_t
# Picking last value for stats
last_value = self.obtain_value([s_t])[0]
# FIXME: substitute print by a functional logging
# logging.info("Last Value {}".format(last_value))
batch = self.prepare_input_gradient(s_batch, a_batch, R_batch)
# Minimize gradient
# Show stats each 1000 iterations
if (self.shared_model.common_model.curr_it % 100 == 0):
# Show progress
logging.info("Reward in batch {}".format(
batch['input_reward']))
logging.info("Current IT {} Steps {}".format(
self.shared_model.common_model.curr_it,
self.shared_model.common_model.curr_steps))
logging.info("PROBS {}".format(
last_probs))
logging.info("VALUES {}".format(self.obtain_value([s_t])[0]))
# Perform basic gradient descent
self.optim_algorithm.process_batch(batch)
# update common parameters
end_params = extract_params_from_model(self.cost_model)
self.shared_model.update_cum_gradients(
init_params,
end_params,
batch_size=t-t_start,
stats_flag=((self.num == 0) and
((self.shared_model.common_model.curr_it %
1000 == 0)) or (
self.shared_model.common_model.curr_it == 1)))
self.shared_model.common_model.curr_it += 1
if ((self.shared_model.common_model.curr_it %
self.checkpoint_interval) == 0):
# FIXME: Perform evaluation: we are going to scape this as
# gym env fails to close some env after a test
# self.shared_model.perform_evaluation( self.num_reps_eval,
# self.shared_model.common_model.game,
# self.shared_model.common_model.curr_steps)
# save progress
self.shared_model.save_model("{}_{}.tar".format(
self.shared_model.common_model.game,
self.shared_model.common_model.curr_steps))
# Check if terminal
if terminal:
logging.info(("Episode Reward\t{}\tEpisode " +
"Length\t{}\tValue " +
"terminal state\t{}").format(
ep_reward, ep_t, last_value[0][0]))
# TODO: collect stats
s_t = env.get_initial_state()
terminal = False
ep_reward = 0
ep_t = 0
# Reset internals
self.reset_internal_state()
class A3C_AgentLSTM(A3C_Agent):
"""
Parameters
----------
rng : numpy.random
num : int
id of the thread
env : instance of :class: `gym.environment`
gym environment of this thread
batch_size: int
maximum size of a training batch
policy_network: intance of :class:`theano.function`
the policy function of the worker
value_network: instance of :class:`theano.function`
the value function of the worker
cost_model: instance of :class:`.Model`
the shared model with the cost application
algorithm: instance of :class: `theano.function`
gradient of the cost function of the model
resized_width: int
the width of the images that will be used by the agent
resized_height: int
the height of the images that will be used by the agent
agent_history_length: int
number of past frames that will be used by the agent
checkpoint_interval: int
number of steps between checkpoint saves
shared_model : instance of :class:`.Common_Model_Wrapper`
the shared model
num_reps_eval: int
number of repetitions of the evalution FIXME: not used
gamma_rate: float
discount rate when boostrapping the accumulated reward
sample_argmax: bool
True if the action of max prob should be chosen or False to choose
a multinomial sampling strategy
lstm_output_units : int
number of lstm units in the hidden layer
"""
def __init__(self, rng, num, env, batch_size, policy_network,
value_network, cost_function,
cost_model, algorithm, resized_width, resized_height,
agent_history_length, checkpoint_interval, shared_model,
num_reps_eval, gamma_rate=0.99,
sample_argmax=True, lstm_output_units=256):
super(A3C_AgentLSTM, self).__init__(rng, num, env, batch_size,
policy_network, value_network,
cost_function, cost_model,
algorithm,
resized_width, resized_height,
agent_history_length,
checkpoint_interval,
shared_model, num_reps_eval,
gamma_rate, sample_argmax)
self.lstm_output_units = lstm_output_units
def reset_internal_state(self):
""" Resets internal state: state and LSTM cell """
self.state = []
self.cells = []
self.state.append(np.zeros((1, self.lstm_output_units),
dtype="float32"))
self.cells.append(np.zeros((1, self.lstm_output_units),
dtype="float32"))
def obtain_policy(self, s_t):
""" Obtain the probabilities of each action
Parameters
----------
s_t: list of states
"""
probs, state, cells = self.policy_network(self.cells[-1], s_t,
self.state[-1])
self.state.append(state)
self.cells.append(cells)
return probs
def obtain_value(self, s_t):
""" Obtains the value of the critic per state
Parameters
----------
s_t: list of states
"""
value = self.value_network(self.cells[-1], s_t, self.state[-1])
return value
def prepare_input_gradient(self, s_batch, a_batch, R_batch):
""" TODO """
batch = super(A3C_AgentLSTM, self).prepare_input_gradient(s_batch,
a_batch,
R_batch)
batch['states'] = self.state[0]
batch['cells'] = self.cells[0]
# FIXME: DEBUG
# error = self.cost_function(batch['cells'],
# batch['input_image'],
# batch['states'],
# batch['input_reward'],
# batch['input_actions'])
# Clear the internal state
# self.reset_internal_state()
last_state = self.state[-1]
last_cell = self.cells[-1]
# Picking last state
self.state = []
self.cells = []
self.state.append(last_state)
self.cells.append(last_cell)
return batch
class MultiA3CTrainStream(object):
""" Run an A3C Agent to collect training data and update an A3C model
Parameters
----------
rng : numpy.random
epochs : int
number of epochs FIXME: not used
max_steps: int
maximum number of steps the agent will play during training
batch_size: int
maximum size of a training batch
game : str
gym name of the game
num_threads : int
number of workers (training threads)
resized_width: int
the width of the images that will be used by the agent
resized_height: int
the height of the images that will be used by the agent
agent_history_length: int
number of past frames that will be used by the agent
checkpoint_interval : int
batch intervals at which a checkpoint is made
training_flag: bool
True if training should be performed
render_flag: bool
True to render the screen whilst evaluating the model
results_file: str
prefix path for storing the results
num_reps_eval: int
number of repetitions of the evalution FIXME: not used
learning_rate: float
learning rate during training
gamma_rate: float
discount rate when boostrapping the accumulated reward
gradient_clipping: float
a float number to clip gradients to this value
(Default is None)
model_file: str
path of a previously stored model to load
"""
def __init__(self, rng, epochs, max_steps, batch_size, game, num_threads,
resized_width=84, resized_height=84, agent_history_length=4,
checkpoint_interval=5000, training_flag=True,
render_flag=False, results_file=False,
sample_argmax=True, num_steps_eval=1000,
num_reps_eval=10, learning_rate=0.00025,
gamma_rate=0.99,
gradient_clipping=None, model_file=None,
a3c_lstm=False,
lstm_output_units=256):
""" Initialize stuff
"""
self.rng = rng
self.epochs = epochs
self.max_steps = max_steps
self.batch_size = batch_size
self.game = game
self.num_threads = num_threads
self.resized_width = resized_width
self.resized_height = resized_height
self.agent_history_length = agent_history_length
self.checkpoint_interval = checkpoint_interval
self.training_flag = training_flag
self.num_steps_eval = num_steps_eval
self.num_reps_eval = num_reps_eval
self.render_flag = render_flag
self.results_file = results_file
self.sample_argmax = sample_argmax
self.learning_rate = learning_rate
self.gradient_clipping = gradient_clipping
self.model_file = model_file
self.gamma_rate = gamma_rate
self.a3c_lstm = a3c_lstm
self.lstm_output_units = lstm_output_units
# Build shared envs
# TODO: check
self.env = gym.make(self.game)
self.validation_env = AtariEnvironment(
gym_env=self.env,
resized_width=self.resized_width,
resized_height=self.resized_height,
agent_history_length=self.agent_history_length)
def training(self):
""" Perform the training steps """
# FIXME: Refactor this code
# Create the envs of the threaded workers
envs = [gym.make(self.game) for i in range(self.num_threads)]
# Build the networks (one for each environment)
if (self.a3c_lstm):
a3c_networks = [A3C.build_a3c_network_lstm(
image_size=(self.resized_width,
self.resized_height),
num_channels=self.agent_history_length,
num_actions=len(self.validation_env.gym_actions),
lr=self.learning_rate,
clip_c=self.gradient_clipping,
lstm_output_units=self.lstm_output_units) for
thread_id in range(self.num_threads)]
else:
a3c_networks = [A3C.build_a3c_network(
image_size=(self.resized_width,
self.resized_height),
num_channels=self.agent_history_length,
num_actions=len(self.validation_env.gym_actions),
lr=self.learning_rate,
clip_c=self.gradient_clipping) for
thread_id in range(self.num_threads)]
logging.info("Building the shared networks")
if (self.a3c_lstm):
a3c_global = A3C.build_a3c_network_lstm(
image_size=(self.resized_width, self.resized_height),
num_channels=self.agent_history_length,
num_actions=len(self.validation_env.gym_actions),
lr=self.learning_rate,
clip_c=self.gradient_clipping,
async_update=True,
lstm_output_units=self.lstm_output_units)
logging.info("Building the shared worker")
# Building the extra environment for evaluation
a3c_global_costmodel = Common_Model_Wrapper(
rng=self.rng,
game=self.game,
model=a3c_global[0],
algorithm=a3c_global[3],
policy_network=a3c_global[1],
value_network=a3c_global[2],
monitor_env=self.env,
resized_width=self.resized_width,
resized_height=self.resized_height,
agent_history_length=self.agent_history_length,
max_steps=self.max_steps,
num_steps_eval=self.num_steps_eval,
sample_argmax=self.sample_argmax,
results_file=self.results_file,
render_flag=self.render_flag,
lstm_output_units=self.lstm_output_units)
# Start num concurrent threads
thread_list = [A3C_AgentLSTM(
rng=self.rng,
num=thread_id,
env=envs[thread_id],
batch_size=self.batch_size,
policy_network=a3c_networks[thread_id][1],
value_network=a3c_networks[thread_id][2],
cost_model=a3c_networks[thread_id][0],
algorithm=a3c_networks[thread_id][3],
cost_function=a3c_networks[thread_id][4],
resized_width=self.resized_width,
resized_height=self.resized_height,
agent_history_length=self.agent_history_length,
checkpoint_interval=self.checkpoint_interval,
shared_model=a3c_global_costmodel,
num_reps_eval=self.num_reps_eval,
gamma_rate=self.gamma_rate,
sample_argmax=self.sample_argmax)
for thread_id in range(self.num_threads)]
else:
a3c_global = A3C.build_a3c_network(
image_size=(self.resized_width, self.resized_height),
num_channels=self.agent_history_length,
num_actions=len(self.validation_env.gym_actions),
lr=self.learning_rate,
clip_c=self.gradient_clipping,
async_update=True)
logging.info("Building the shared worker")
# Building the extra environment for evaluation
a3c_global_costmodel = Common_Model_Wrapper(
rng=self.rng,
game=self.game,
model=a3c_global[0],
algorithm=a3c_global[3],
policy_network=a3c_global[1],
value_network=a3c_global[2],
monitor_env=self.env,
resized_width=self.resized_width,
resized_height=self.resized_height,
agent_history_length=self.agent_history_length,
max_steps=self.max_steps,
num_steps_eval=self.num_steps_eval,
sample_argmax=self.sample_argmax,
results_file=self.results_file,
render_flag=self.render_flag)
# Start num concurrent threads
thread_list = [A3C_Agent(
rng=self.rng,
num=thread_id,
env=envs[thread_id],
batch_size=self.batch_size,
policy_network=a3c_networks[thread_id][1],
value_network=a3c_networks[thread_id][2],
cost_model=a3c_networks[thread_id][0],
algorithm=a3c_networks[thread_id][3],
cost_function=a3c_networks[thread_id][4],
resized_width=self.resized_width,
resized_height=self.resized_height,
agent_history_length=self.agent_history_length,
checkpoint_interval=self.checkpoint_interval,
shared_model=a3c_global_costmodel,
num_reps_eval=self.num_reps_eval,
gamma_rate=self.gamma_rate,
sample_argmax=self.sample_argmax)
for thread_id in range(self.num_threads)]
for t in thread_list:
t.start()
# TODO: summary information here
for t in thread_list:
t.join()
def do_test(self):
""" Execute a gym evaluation """
logging.info("Building the shared networks")
if (self.a3c_lstm):
a3c_global = A3C.build_a3c_network_lstm(
image_size=(self.resized_width, self.resized_height),
num_channels=self.agent_history_length,
num_actions=len(self.validation_env.gym_actions),
lr=self.learning_rate,
clip_c=self.gradient_clipping,
async_update=True,
lstm_output_units=self.lstm_output_units)
else:
a3c_global = A3C.build_a3c_network(
image_size=(self.resized_width, self.resized_height),
num_channels=self.agent_history_length,
num_actions=len(self.validation_env.gym_actions),
lr=self.learning_rate,
clip_c=self.gradient_clipping,
async_update=True)
logging.info("Building the shared worker")
# Building the extra environment for evaluation
if (self.a3c_lstm):
a3c_global_costmodel = Common_Model_Wrapper(
rng=self.rng,
game=self.game,
model=a3c_global[0],
algorithm=a3c_global[3],
policy_network=a3c_global[1],
value_network=a3c_global[2],
monitor_env=self.env,
resized_width=self.resized_width,
resized_height=self.resized_height,
agent_history_length=self.agent_history_length,
max_steps=self.max_steps,
num_steps_eval=self.num_steps_eval,
sample_argmax=self.sample_argmax,
results_file=self.results_file,
render_flag=self.render_flag,
lstm_output_units=self.lstm_output_units)
else:
a3c_global_costmodel = Common_Model_Wrapper(
rng=self.rng,
game=self.game,
model=a3c_global[0],
algorithm=a3c_global[3],
policy_network=a3c_global[1],
value_network=a3c_global[2],
monitor_env=self.env,
resized_width=self.resized_width,
resized_height=self.resized_height,
agent_history_length=self.agent_history_length,
max_steps=self.max_steps,
num_steps_eval=self.num_steps_eval,
sample_argmax=self.sample_argmax,
results_file=self.results_file,
render_flag=self.render_flag)
if self.model_file is not None:
print "Loading previous model"
with open(self.model_file, 'rb') as src:
parameters = serialization.load_parameters(src)
a3c_global_costmodel.common_model.model.set_parameter_values(
parameters)
# Use the current time to build the experiment name
exp_name = "{}_{}".format(self.game,
time.strftime("%Y%m%d%H%M%S"))
# Perform an evaluation
a3c_global_costmodel.perform_evaluation(
self.num_reps_eval, exp_name,
0, do_gym_eval=True)
def execute(self):
""" Perform training/evaluation of the model """
if self.training_flag:
self.training()
else:
self.do_test()
|
|
class logpyl:
"""The logpyl class implements basic logging functionality for Python programs.
A logpyl log consists of three files. The log file contains the events logged
to a logpyl object. The metadata file contains information about the log, such
as creation date, modification date, and name. The checksum file contains an MD5
hash created from the log and metadata files, and is used to check the integrity
of log files."""
def __init__(self,path='logs',name='NewLog',debug='off'):
"Initialize or open logs as log objects are instantiated."
import sys
import os.path
self.events = [] # list of events written to this log
self.debug = debug # flag to activate/deactive debugging messages
# First try the metadata file
self.metadatafile = name + '.lmd'
self.mdf = path + '/' + self.metadatafile
mfn = os.path.isfile(self.mdf)
if ( mfn ):
if ( debug == 'on' ):
print 'DEBUG: Metadata file',self.metadatafile,'exists.'
# Since the file exists, get the metadata
mfn = open(self.mdf,'r')
self.name = mfn.readline().strip()
self.path = mfn.readline().strip()
self.logfile = mfn.readline().strip()
self.metadatafile = mfn.readline().strip()
self.checksumfile = mfn.readline().strip()
self.created = mfn.readline().strip()
self.modified = mfn.readline().strip()
mfn.close()
self.ldf = path + '/' + self.logfile
self.cdf = path + '/' + self.checksumfile
else:
if ( debug == 'on' ):
print 'DEBUG: Metadata file',metadatafile,'does not exist.'
self.name = name
self.path = path
self.metadatafile = name + '.lmd'
self.logfile = name + '.log'
self.checksumfile = name + '.md5'
import time
self.created = time.asctime(time.localtime(time.time()))
self.mdf = path + '/' + self.metadatafile
self.ldf = path + '/' + self.logfile
self.cdf = path + '/' + self.checksumfile
# Then try the log file
lfn = os.path.isfile(self.ldf)
if ( lfn ):
if ( debug == 'on' ):
print 'DEBUG: Log file',self.logfile,'exists.'
lfn = open(self.ldf,'r')
for line in lfn.readlines():
self.events.append(line.strip())
lfn.close()
else:
if ( debug == 'on' ):
print 'DEBUG: Log file',self.logfile,'does not exist.'
# Finally, try the checksum file
cfn = os.path.isfile(self.cdf)
if ( cfn ):
if ( debug == 'on' ):
print 'DEBUG: Checksum file',self.checksumfile,'exists.'
cfn = open(self.cdf, 'r')
self.md5 = cfn.read().strip()
if ( debug == 'on' ):
print 'DEBUG: MD5 checksum',self.md5,'read from',self.checksumfile
cfn.close()
else:
if ( debug == 'on' ):
print 'DEBUG: Checksum file',self.checksumfile,'does not exist.'
pass
# Once we have read the metadata, verify the integrity of the logfiles.
self.verify()
def add(self, eventclass="note", message="Your message here"):
"Compose a log entry from the elements passed to add() and append it to the list of events."
import time
event = self.datetime() + ' ' + eventclass + ' ' + message
if ( self.debug == "on" ):
print 'DEBUG: Adding', event, 'to log', self.name
self.modified = time.asctime(time.localtime(time.time()))
self.events.append(event)
return
def close(self):
"Close the log by writing all log and metadata to the proper files. Also update the checksum file."
import sys
import os
"Write the current version of the log to a file and free the variables used by the log."
if ( self.debug == 'on' ):
print "DEBUG: Closing log", self.name
# If self.path does not exist, create the directory for the logfiles.
if ( not os.path.exists(self.path ) ):
if ( self.debug == 'on' ):
print 'DEBUG: Directory ',self.path,' does not exist. I am creating it now.'
try:
os.makedirs(self.path)
if ( self.debug == 'on' ):
print 'DEBUG: Created log file directory',self.path
except OSERROR:
print 'ERROR: Could not create log file directory',self.path
# Make sure that the metadata file is opened (created) and written.
import time
mfn = open(self.mdf, 'w+')
mfn.write(self.name+'\n')
mfn.write(self.path+'\n')
mfn.write(self.logfile+'\n')
mfn.write(self.metadatafile+'\n')
mfn.write(self.checksumfile+'\n')
mfn.write(self.created+'\n')
if ( not hasattr(self,'modified') ):
mfn.write(self.created+'\n')
else:
mfn.write(self.modified+'\n')
mfn.close()
# Make sure that the log entries are written.
lfn = open(self.ldf, 'w+')
for event in self.events:
lfn.write(event+'\n')
lfn.close()
# Create the MD5 checksum from the log file and metadata file
import md5
checksum = md5.new()
mfn = open(self.mdf, 'r')
for line in mfn.readlines():
checksum.update(line)
mfn.close()
lfn = open(self.ldf, 'r')
for line in lfn.readlines():
checksum.update(line)
lfn.close()
cs = checksum.hexdigest()
if ( self.debug == 'on' ):
print 'DEBUG: The MD5 digest of the metadata and log files is',cs
# Make sure that the checksum file is opened (created) and written to.
cfn = open(self.cdf,'w+')
cfn.write(cs+'\n')
cfn.close()
def datetime(self):
"Generate the date/time stamp used in our log entries"
import time
datestamp = time.asctime(time.localtime(time.time()))
return datestamp
def info(self):
print 'Info about log', self.name, ':'
print '\tName:', self.name
print '\tPath:', self.path
print '\tLog file:', self.logfile
print '\tMetadata file:', self.metadatafile
print '\tChecksum file:', self.checksumfile
if ( hasattr(self,'md5') ):
print '\t\tMD5 Checksum:',self.md5
print '\tNo. of entries:', len(self.events)
if ( hasattr(self,'created') ):
print '\tCreated:',self.created
if ( hasattr(self,'modified') ) :
print '\tModified:',self.modified
def printlog(self):
print '\nPrinting log', self.name
for event in self.events:
print event
print '\n'
def verify(self):
"Compute the MD5 checksum for this log to see if the logfiles have been corrupted."
# If there is no self.md5, no checksum exists for this log yet...
if ( not hasattr(self,'md5') ):
print 'WARNING: No MD5 checksum was found for log',self.name
print 'WARNING: Log',self.name,'may be newly created, or it may be corrupt!'
return
# Otherwise, create the MD5 checksum from the log file and metadata file for verification
import md5
checksum = md5.new()
mfn = open(self.mdf, 'r')
for line in mfn.readlines():
checksum.update(line)
mfn.close()
lfn = open(self.ldf, 'r')
for line in lfn.readlines():
checksum.update(line)
lfn.close()
cs = checksum.hexdigest()
if ( self.debug == 'on' ):
print 'DEBUG: The MD5 digest of the metadata and log files is',cs
if ( self.md5 == cs ):
if ( self.debug == 'on' ):
print 'DEBUG: The calculated MD5 checksum',cs,'matches the stored MD5 checksum',self.md5
else:
if ( self.debug == 'on' ):
print 'DEBUG: The calculated MD5 checksum',cs,'does not match the stored MD5 checksum',self.md5
print 'ERROR: The MD5 checksum for log',self.name,'is inconsistent!'
print 'ERROR: Log',self.name,'may be corrupt!'
if __name__ == '__main__':
# create a new log or open an existing log with debugging turned on
# (disable debugging messages by passing 'off' as the third parm to logpyl())
mylog = logpyl('logs','testlog','on')
# add a couple of events to the log
mylog.add("spam","Spam is US$1.95 per can.")
mylog.add("eggs","Eggs are US$0.89 per dozen.")
# print some summary information about the log
mylog.info()
# print the log entries
mylog.printlog()
# close the log
mylog.close()
|
|
"""
Support for LimitlessLED bulbs.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/light.limitlessled/
"""
# pylint: disable=abstract-method
import logging
from homeassistant.components.light import (
ATTR_BRIGHTNESS, ATTR_COLOR_TEMP, ATTR_EFFECT, ATTR_FLASH, ATTR_RGB_COLOR,
ATTR_TRANSITION, EFFECT_COLORLOOP, EFFECT_WHITE, FLASH_LONG, Light)
_LOGGER = logging.getLogger(__name__)
REQUIREMENTS = ['limitlessled==1.0.0']
RGB_BOUNDARY = 40
DEFAULT_TRANSITION = 0
DEFAULT_PORT = 8899
DEFAULT_VERSION = 5
DEFAULT_LED_TYPE = 'rgbw'
WHITE = [255, 255, 255]
def rewrite_legacy(config):
"""Rewrite legacy configuration to new format."""
bridges = config.get('bridges', [config])
new_bridges = []
for bridge_conf in bridges:
groups = []
if 'groups' in bridge_conf:
groups = bridge_conf['groups']
else:
_LOGGER.warning("Legacy configuration format detected")
for i in range(1, 5):
name_key = 'group_%d_name' % i
if name_key in bridge_conf:
groups.append({
'number': i,
'type': bridge_conf.get('group_%d_type' % i,
DEFAULT_LED_TYPE),
'name': bridge_conf.get(name_key)
})
new_bridges.append({
'host': bridge_conf.get('host'),
'groups': groups
})
return {'bridges': new_bridges}
def setup_platform(hass, config, add_devices_callback, discovery_info=None):
"""Setup the LimitlessLED lights."""
from limitlessled.bridge import Bridge
# Two legacy configuration formats are supported to
# maintain backwards compatibility.
config = rewrite_legacy(config)
# Use the expanded configuration format.
lights = []
for bridge_conf in config.get('bridges'):
bridge = Bridge(bridge_conf.get('host'),
port=bridge_conf.get('port', DEFAULT_PORT),
version=bridge_conf.get('version', DEFAULT_VERSION))
for group_conf in bridge_conf.get('groups'):
group = bridge.add_group(group_conf.get('number'),
group_conf.get('name'),
group_conf.get('type', DEFAULT_LED_TYPE))
lights.append(LimitlessLEDGroup.factory(group))
add_devices_callback(lights)
def state(new_state):
"""State decorator.
Specify True (turn on) or False (turn off).
"""
def decorator(function):
"""Decorator function."""
# pylint: disable=no-member,protected-access
def wrapper(self, **kwargs):
"""Wrap a group state change."""
from limitlessled.pipeline import Pipeline
pipeline = Pipeline()
transition_time = DEFAULT_TRANSITION
# Stop any repeating pipeline.
if self.repeating:
self.repeating = False
self.group.stop()
# Not on and should be? Turn on.
if not self.is_on and new_state is True:
pipeline.on()
# Set transition time.
if ATTR_TRANSITION in kwargs:
transition_time = kwargs[ATTR_TRANSITION]
# Do group type-specific work.
function(self, transition_time, pipeline, **kwargs)
# Update state.
self._is_on = new_state
self.group.enqueue(pipeline)
self.update_ha_state()
return wrapper
return decorator
class LimitlessLEDGroup(Light):
"""Representation of a LimitessLED group."""
def __init__(self, group):
"""Initialize a group."""
self.group = group
self.repeating = False
self._is_on = False
self._brightness = None
@staticmethod
def factory(group):
"""Produce LimitlessLEDGroup objects."""
from limitlessled.group.rgbw import RgbwGroup
from limitlessled.group.white import WhiteGroup
if isinstance(group, WhiteGroup):
return LimitlessLEDWhiteGroup(group)
elif isinstance(group, RgbwGroup):
return LimitlessLEDRGBWGroup(group)
@property
def should_poll(self):
"""No polling needed."""
return False
@property
def name(self):
"""Return the name of the group."""
return self.group.name
@property
def is_on(self):
"""Return true if device is on."""
return self._is_on
@property
def brightness(self):
"""Return the brightness property."""
return self._brightness
@state(False)
def turn_off(self, transition_time, pipeline, **kwargs):
"""Turn off a group."""
if self.is_on:
pipeline.transition(transition_time, brightness=0.0).off()
class LimitlessLEDWhiteGroup(LimitlessLEDGroup):
"""Representation of a LimitlessLED White group."""
def __init__(self, group):
"""Initialize White group."""
super().__init__(group)
# Initialize group with known values.
self.group.on = True
self.group.temperature = 1.0
self.group.brightness = 0.0
self._brightness = _to_hass_brightness(1.0)
self._temperature = _to_hass_temperature(self.group.temperature)
self.group.on = False
@property
def color_temp(self):
"""Return the temperature property."""
return self._temperature
@state(True)
def turn_on(self, transition_time, pipeline, **kwargs):
"""Turn on (or adjust property of) a group."""
# Check arguments.
if ATTR_BRIGHTNESS in kwargs:
self._brightness = kwargs[ATTR_BRIGHTNESS]
if ATTR_COLOR_TEMP in kwargs:
self._temperature = kwargs[ATTR_COLOR_TEMP]
# Set up transition.
pipeline.transition(transition_time,
brightness=_from_hass_brightness(
self._brightness),
temperature=_from_hass_temperature(
self._temperature))
class LimitlessLEDRGBWGroup(LimitlessLEDGroup):
"""Representation of a LimitlessLED RGBW group."""
def __init__(self, group):
"""Initialize RGBW group."""
super().__init__(group)
# Initialize group with known values.
self.group.on = True
self.group.white()
self._color = WHITE
self.group.brightness = 0.0
self._brightness = _to_hass_brightness(1.0)
self.group.on = False
@property
def rgb_color(self):
"""Return the color property."""
return self._color
@state(True)
def turn_on(self, transition_time, pipeline, **kwargs):
"""Turn on (or adjust property of) a group."""
from limitlessled.presets import COLORLOOP
# Check arguments.
if ATTR_BRIGHTNESS in kwargs:
self._brightness = kwargs[ATTR_BRIGHTNESS]
if ATTR_RGB_COLOR in kwargs:
self._color = kwargs[ATTR_RGB_COLOR]
# White is a special case.
if min(self._color) > 256 - RGB_BOUNDARY:
pipeline.white()
self._color = WHITE
# Set up transition.
pipeline.transition(transition_time,
brightness=_from_hass_brightness(
self._brightness),
color=_from_hass_color(self._color))
# Flash.
if ATTR_FLASH in kwargs:
duration = 0
if kwargs[ATTR_FLASH] == FLASH_LONG:
duration = 1
pipeline.flash(duration=duration)
# Add effects.
if ATTR_EFFECT in kwargs:
if kwargs[ATTR_EFFECT] == EFFECT_COLORLOOP:
self.repeating = True
pipeline.append(COLORLOOP)
if kwargs[ATTR_EFFECT] == EFFECT_WHITE:
pipeline.white()
self._color = WHITE
def _from_hass_temperature(temperature):
"""Convert Home Assistant color temperature units to percentage."""
return (temperature - 154) / 346
def _to_hass_temperature(temperature):
"""Convert percentage to Home Assistant color temperature units."""
return int(temperature * 346) + 154
def _from_hass_brightness(brightness):
"""Convert Home Assistant brightness units to percentage."""
return brightness / 255
def _to_hass_brightness(brightness):
"""Convert percentage to Home Assistant brightness units."""
return int(brightness * 255)
def _from_hass_color(color):
"""Convert Home Assistant RGB list to Color tuple."""
from limitlessled import Color
return Color(*tuple(color))
def _to_hass_color(color):
"""Convert from Color tuple to Home Assistant RGB list."""
return list([int(c) for c in color])
|
|
"""
Copyright (C) since 2013 Calliope contributors listed in AUTHORS.
Licensed under the Apache 2.0 License (see LICENSE file).
funcs.py
~~~~~~~~
Functions to process time series data.
"""
import logging
import datetime
import numpy as np
import pandas as pd
import xarray as xr
from calliope import exceptions
from calliope.time import clustering
logger = logging.getLogger(__name__)
def get_daily_timesteps(data, check_uniformity=False):
daily_timesteps = [
data.timestep_resolution.loc[i].values
for i in np.unique(data.timesteps.to_index().strftime("%Y-%m-%d"))
]
if check_uniformity:
if not np.all(daily_timesteps == daily_timesteps[0]):
raise exceptions.ModelError(
"For clustering, timestep resolution must be uniform."
)
return daily_timesteps[0]
def normalized_copy(data):
"""
Normalize timeseries data, using the maximum across all regions and timesteps.
Parameters
----------
data : xarray Dataset
Dataset with all non-time dependent variables removed
Returns
-------
ds : xarray Dataset
Copy of `data`, with the absolute taken and normalized to 0-1
"""
ds = data.copy(deep=True) # Work off a copy
for var in ds.data_vars:
ds[var] = abs(ds[var] / abs(ds[var]).groupby("techs").max(..., skipna=True))
return ds
def _copy_non_t_vars(data0, data1):
"""Copies non-t-indexed variables from data0 into data1, then
returns data1"""
non_t_vars = [
varname
for varname, vardata in data0.data_vars.items()
if "timesteps" not in vardata.dims
]
# Manually copy over variables not in `timesteps`. If we don't do this,
# these vars get polluted with a superfluous `timesteps` dimension
for v in non_t_vars:
data1[v] = data0[v]
return data1
def _combine_datasets(data0, data1):
"""Concatenates data0 and data1 along the time dimension"""
data_new = xr.concat([data0, data1], dim="timesteps")
# Ensure time dimension is ordered
data_new = data_new.loc[{"timesteps": data_new.timesteps.to_index().sort_values()}]
return data_new
def _drop_timestep_vars(data, timesteps):
timeseries_data = data.copy(deep=True)
# Save all coordinates, to ensure they can be added back in after clustering
data_coords = data.copy().coords
del data_coords["timesteps"]
if timesteps is not None:
timeseries_data = timeseries_data.loc[{"timesteps": timesteps}]
timeseries_data = timeseries_data.drop_vars(
[
varname
for varname, vardata in data.data_vars.items()
if "timesteps" not in vardata.dims
]
)
return timeseries_data, data_coords
def apply_clustering(
data,
timesteps,
clustering_func,
how,
normalize=True,
scale_clusters="mean",
storage_inter_cluster=True,
model_run=None,
**kwargs,
):
"""
Apply the given clustering function to the given data.
Parameters
----------
data : xarray.Dataset
timesteps : pandas.DatetimeIndex or list of timesteps or None
clustering_func : str
Name of clustering function. Can be `file=....csv:column_name`
if loading custom clustering. Custom clustering index = timeseries days.
If no column_name, the CSV file must have only one column of data.
how : str
How to map clusters to data. 'mean' or 'closest'.
normalize : bool, optional
If True (default), data is normalized before clustering is applied,
using :func:`~calliope.time.funcs.normalized_copy`.
scale_clusters : str or None, default = 'mean'
Scale the results of clustering such that the clusters match the metric
given by scale_clusters. For example, 'mean' scales along each loc_tech
and variable to match inputs and outputs. Other options for matching
include 'sum', 'max', and 'min'. If None, no scaling occurs.
**kwargs : optional
Arguments passed to clustering_func.
Returns
-------
data_new_scaled : xarray.Dataset
"""
assert how in ["mean", "closest"]
daily_timesteps = get_daily_timesteps(data, check_uniformity=True)
timesteps_per_day = len(daily_timesteps)
# get a copy of the dataset with only timeseries variables,
# and get all coordinates of the original dataset, to reinstate later
data_to_cluster, data_coords = _drop_timestep_vars(data, timesteps)
data_to_cluster = data_to_cluster.drop_vars(
["timestep_weights", "timestep_resolution"]
)
for dim in data_to_cluster.dims:
data_to_cluster[dim] = data[dim]
with pd.option_context("mode.use_inf_as_na", True):
if normalize:
data_normalized = normalized_copy(data_to_cluster)
else:
data_normalized = data_to_cluster
if "file=" in clustering_func:
file = clustering_func.split("=")[1]
if ":" in file:
file, column = file.rsplit(":", 1)
else:
column = None
df = model_run.timeseries_data[file]
if isinstance(df, pd.Series) and column is not None:
exceptions.warn(
"{} given as time clustering column, but only one column to "
"choose from in {}.".format(column, file)
)
clusters = df.resample("1D").mean()
elif isinstance(df, pd.DataFrame) and column is None:
raise exceptions.ModelError(
"No time clustering column given, but multiple columns found in "
"{0}. Choose one column and add it to {1} as {1}:name_of_column.".format(
file, clustering_func
)
)
elif isinstance(df, pd.DataFrame) and column not in df.columns:
raise KeyError(
"time clustering column {} not found in {}.".format(column, file)
)
elif isinstance(df, pd.DataFrame):
clusters = (
df.loc[:, column].dropna().groupby(pd.Grouper(freq="1D")).unique()
)
# Check there weren't instances of more than one cluster assigned to a day
# or days with no information assigned
if any([len(i) == 0 for i in clusters.values]):
raise exceptions.ModelError(
"Missing cluster days in `{}:{}`.".format(file, column)
)
elif any([len(i) > 1 for i in clusters.values]):
raise exceptions.ModelError(
"More than one cluster value assigned to a day in `{}:{}`. "
"Unique clusters per day: {}".format(file, column, clusters)
)
else:
clusters.loc[:] = [i[0] for i in clusters.values]
else:
result = clustering.get_clusters(
data_normalized,
clustering_func,
timesteps_per_day=timesteps_per_day,
**kwargs,
)
clusters = result[0] # Ignore other stuff returned
data_new = clustering.map_clusters_to_data(
data_to_cluster,
clusters,
how=how,
daily_timesteps=daily_timesteps,
storage_inter_cluster=storage_inter_cluster,
)
# It's now safe to add the original coordinates back in (preserving all the
# loc_tech sets that aren't used to index a variable in the DataArray)
data_new.update(data_coords)
data_new = _copy_non_t_vars(data, data_new)
if timesteps is not None:
data_new = _copy_non_t_vars(data, data_new)
data_new = _combine_datasets(data.drop_sel(timesteps=timesteps), data_new)
data_new = _copy_non_t_vars(data, data_new)
# Scale the new/combined data so that the mean for each (loc_tech, variable)
# combination matches that from the original data
data_new_scaled = data_new.copy(deep=True)
if scale_clusters:
data_vars_in_t = [
v
for v in data_new.data_vars
if "timesteps" in data_new[v].dims
and "timestep_" not in v
and v != "clusters"
]
for var in data_vars_in_t:
scale = getattr(data[var], scale_clusters)(dim="timesteps") / getattr(
data_new[var], scale_clusters
)(dim="timesteps")
data_new_scaled[var] = data_new[var] * scale.fillna(0)
lookup_clusters(data_new_scaled)
return data_new_scaled
def resample(data, timesteps, resolution):
"""
Function to resample timeseries data from the input resolution (e.g. 1H), to
the given resolution (e.g. 2H)
Parameters
----------
data : xarray.Dataset
calliope model data, containing only timeseries data variables
timesteps : str or list; optional
If given, apply resampling to a subset of the timeseries data
resolution : str
time resolution of the output data, given in Pandas time frequency format.
E.g. 1H = 1 hour, 1W = 1 week, 1M = 1 month, 1T = 1 minute. Multiples allowed.
"""
def _resample(var, how):
return getattr(var.resample(timesteps=resolution, keep_attrs=True), how)(
"timesteps"
)
# get a copy of the dataset with only timeseries variables,
# and get all coordinates of the original dataset, to reinstate later
data_new, data_coords = _drop_timestep_vars(data, timesteps)
# First create a new resampled dataset of the correct size by
# using first-resample, which should be a quick way to achieve this
data_rs = _resample(data_new, how="first")
for var in data_rs.data_vars:
if var in ["timestep_resolution", "resource"]:
data_rs[var] = _resample(data_new[var], how="sum")
else:
try:
data_rs[var] = _resample(data_new[var], how="mean")
except TypeError:
# If the var has a datatype of strings, it can't be resampled
logger.error(
"Dropping {} because it has a {} data type when integer or "
"float is expected for timeseries resampling.".format(
var, data_rs[var].dtype
)
)
data_rs = data_rs.drop_vars(var)
# Get rid of the filled-in NaN timestamps
data_rs = data_rs.dropna(dim="timesteps", how="all")
data_rs.attrs["allow_operate_mode"] = 1 # Resampling still permits operational mode
# It's now safe to add the original coordinates back in (preserving all the
# loc_tech sets that aren't used to index a variable in the DataArray)
data_rs.update(data_coords)
data_rs = _copy_non_t_vars(data, data_rs) # add back in non timeseries data
if timesteps is not None:
# Combine leftover parts of passed in data with new data
data_rs = _combine_datasets(data.drop_sel(timesteps=timesteps), data_rs)
data_rs = _copy_non_t_vars(data, data_rs)
# Having timesteps with different lengths does not permit operational mode
data_rs.attrs["allow_operate_mode"] = 0
return data_rs
def drop(data, timesteps):
"""
Drop timesteps from data, adjusting the timestep weight of remaining
timesteps accordingly. Returns updated dataset.
Parameters
----------
data : xarray.Dataset
Calliope model data.
timesteps : str or list or other iterable
Pandas-compatible timestep strings.
"""
# Turn timesteps into a pandas datetime index for subsetting, which also
# checks whether they are actually valid
try:
timesteps_pd = pd.to_datetime(timesteps)
except Exception as e:
raise exceptions.ModelError("Invalid timesteps: {}".format(timesteps))
# 'Distribute weight' of the dropped timesteps onto the remaining ones
dropped_weight = data.timestep_weights.loc[{"timesteps": timesteps_pd}].sum()
data = data.drop_sel(timesteps=timesteps_pd)
data["timestep_weights"] = data["timestep_weights"] + (
dropped_weight / len(data["timestep_weights"])
)
return data
def lookup_clusters(dataset):
"""
For any given timestep in a time clustered model, get:
1. the first and last timestep of the cluster,
2. the last timestep of the cluster corresponding to a date in the original timeseries
"""
data_dict_first = dict(dims=["timesteps"], data=[])
data_dict_last = dict(dims=["timesteps"], data=[])
for timestep in dataset.timesteps:
t = pd.to_datetime(timestep.item()).date().strftime("%Y-%m-%d")
timestep_first = dataset.timesteps.loc[t][0]
timestep_last = dataset.timesteps.loc[t][-1]
if timestep == timestep_first:
data_dict_first["data"].append(1)
data_dict_last["data"].append(timestep_last.values)
else:
data_dict_first["data"].append(0)
data_dict_last["data"].append(None)
dataset["lookup_cluster_first_timestep"] = xr.DataArray.from_dict(data_dict_first)
dataset["lookup_cluster_last_timestep"] = xr.DataArray.from_dict(data_dict_last)
if "datesteps" in dataset.dims:
last_timesteps = dict(dims=["datesteps"], data=[])
cluster_date = dataset.timestep_cluster.to_pandas().resample("1D").mean()
for datestep in dataset.datesteps.to_index():
cluster = dataset.lookup_datestep_cluster.loc[
datestep.strftime("%Y-%m-%d")
].item()
last_timesteps["data"].append(
datetime.datetime.combine(
cluster_date[cluster_date == cluster].index[0].date(),
dataset.timesteps.to_index().time[-1],
)
)
dataset["lookup_datestep_last_cluster_timestep"] = xr.DataArray.from_dict(
last_timesteps
)
return dataset
|
|
"""Utilities for all Certbot."""
import argparse
import collections
# distutils.version under virtualenv confuses pylint
# For more info, see: https://github.com/PyCQA/pylint/issues/73
import distutils.version # pylint: disable=import-error,no-name-in-module
import errno
import logging
import os
import platform
import re
import six
import socket
import stat
import subprocess
import sys
import configargparse
from certbot import errors
logger = logging.getLogger(__name__)
Key = collections.namedtuple("Key", "file pem")
# Note: form is the type of data, "pem" or "der"
CSR = collections.namedtuple("CSR", "file data form")
# ANSI SGR escape codes
# Formats text as bold or with increased intensity
ANSI_SGR_BOLD = '\033[1m'
# Colors text red
ANSI_SGR_RED = "\033[31m"
# Resets output format
ANSI_SGR_RESET = "\033[0m"
def run_script(params):
"""Run the script with the given params.
:param list params: List of parameters to pass to Popen
"""
try:
proc = subprocess.Popen(params,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
except (OSError, ValueError):
msg = "Unable to run the command: %s" % " ".join(params)
logger.error(msg)
raise errors.SubprocessError(msg)
stdout, stderr = proc.communicate()
if proc.returncode != 0:
msg = "Error while running %s.\n%s\n%s" % (
" ".join(params), stdout, stderr)
# Enter recovery routine...
logger.error(msg)
raise errors.SubprocessError(msg)
return stdout, stderr
def exe_exists(exe):
"""Determine whether path/name refers to an executable.
:param str exe: Executable path or name
:returns: If exe is a valid executable
:rtype: bool
"""
def is_exe(path):
"""Determine if path is an exe."""
return os.path.isfile(path) and os.access(path, os.X_OK)
path, _ = os.path.split(exe)
if path:
return is_exe(exe)
else:
for path in os.environ["PATH"].split(os.pathsep):
if is_exe(os.path.join(path, exe)):
return True
return False
def make_or_verify_dir(directory, mode=0o755, uid=0, strict=False):
"""Make sure directory exists with proper permissions.
:param str directory: Path to a directory.
:param int mode: Directory mode.
:param int uid: Directory owner.
:param bool strict: require directory to be owned by current user
:raises .errors.Error: if a directory already exists,
but has wrong permissions or owner
:raises OSError: if invalid or inaccessible file names and
paths, or other arguments that have the correct type,
but are not accepted by the operating system.
"""
try:
os.makedirs(directory, mode)
except OSError as exception:
if exception.errno == errno.EEXIST:
if strict and not check_permissions(directory, mode, uid):
raise errors.Error(
"%s exists, but it should be owned by user %d with"
"permissions %s" % (directory, uid, oct(mode)))
else:
raise
def check_permissions(filepath, mode, uid=0):
"""Check file or directory permissions.
:param str filepath: Path to the tested file (or directory).
:param int mode: Expected file mode.
:param int uid: Expected file owner.
:returns: True if `mode` and `uid` match, False otherwise.
:rtype: bool
"""
file_stat = os.stat(filepath)
return stat.S_IMODE(file_stat.st_mode) == mode and file_stat.st_uid == uid
def safe_open(path, mode="w", chmod=None, buffering=None):
"""Safely open a file.
:param str path: Path to a file.
:param str mode: Same os `mode` for `open`.
:param int chmod: Same as `mode` for `os.open`, uses Python defaults
if ``None``.
:param int buffering: Same as `bufsize` for `os.fdopen`, uses Python
defaults if ``None``.
"""
# pylint: disable=star-args
open_args = () if chmod is None else (chmod,)
fdopen_args = () if buffering is None else (buffering,)
return os.fdopen(
os.open(path, os.O_CREAT | os.O_EXCL | os.O_RDWR, *open_args),
mode, *fdopen_args)
def _unique_file(path, filename_pat, count, mode):
while True:
current_path = os.path.join(path, filename_pat(count))
try:
return safe_open(current_path, chmod=mode),\
os.path.abspath(current_path)
except OSError as err:
# "File exists," is okay, try a different name.
if err.errno != errno.EEXIST:
raise
count += 1
def unique_file(path, mode=0o777):
"""Safely finds a unique file.
:param str path: path/filename.ext
:param int mode: File mode
:returns: tuple of file object and file name
"""
path, tail = os.path.split(path)
return _unique_file(
path, filename_pat=(lambda count: "%04d_%s" % (count, tail)),
count=0, mode=mode)
def unique_lineage_name(path, filename, mode=0o777):
"""Safely finds a unique file using lineage convention.
:param str path: directory path
:param str filename: proposed filename
:param int mode: file mode
:returns: tuple of file object and file name (which may be modified
from the requested one by appending digits to ensure uniqueness)
:raises OSError: if writing files fails for an unanticipated reason,
such as a full disk or a lack of permission to write to
specified location.
"""
preferred_path = os.path.join(path, "%s.conf" % (filename))
try:
return safe_open(preferred_path, chmod=mode), preferred_path
except OSError as err:
if err.errno != errno.EEXIST:
raise
return _unique_file(
path, filename_pat=(lambda count: "%s-%04d.conf" % (filename, count)),
count=1, mode=mode)
def safely_remove(path):
"""Remove a file that may not exist."""
try:
os.remove(path)
except OSError as err:
if err.errno != errno.ENOENT:
raise
def get_os_info(filepath="/etc/os-release"):
"""
Get OS name and version
:param str filepath: File path of os-release file
:returns: (os_name, os_version)
:rtype: `tuple` of `str`
"""
if os.path.isfile(filepath):
# Systemd os-release parsing might be viable
os_name, os_version = get_systemd_os_info(filepath=filepath)
if os_name:
return (os_name, os_version)
# Fallback to platform module
return get_python_os_info()
def get_os_info_ua(filepath="/etc/os-release"):
"""
Get OS name and version string for User Agent
:param str filepath: File path of os-release file
:returns: os_ua
:rtype: `str`
"""
if os.path.isfile(filepath):
os_ua = _get_systemd_os_release_var("PRETTY_NAME", filepath=filepath)
if not os_ua:
os_ua = _get_systemd_os_release_var("NAME", filepath=filepath)
if os_ua:
return os_ua
# Fallback
return " ".join(get_python_os_info())
def get_systemd_os_info(filepath="/etc/os-release"):
"""
Parse systemd /etc/os-release for distribution information
:param str filepath: File path of os-release file
:returns: (os_name, os_version)
:rtype: `tuple` of `str`
"""
os_name = _get_systemd_os_release_var("ID", filepath=filepath)
os_version = _get_systemd_os_release_var("VERSION_ID", filepath=filepath)
return (os_name, os_version)
def get_systemd_os_like(filepath="/etc/os-release"):
"""
Get a list of strings that indicate the distribution likeness to
other distributions.
:param str filepath: File path of os-release file
:returns: List of distribution acronyms
:rtype: `list` of `str`
"""
return _get_systemd_os_release_var("ID_LIKE", filepath).split(" ")
def _get_systemd_os_release_var(varname, filepath="/etc/os-release"):
"""
Get single value from systemd /etc/os-release
:param str varname: Name of variable to fetch
:param str filepath: File path of os-release file
:returns: requested value
:rtype: `str`
"""
var_string = varname+"="
if not os.path.isfile(filepath):
return ""
with open(filepath, 'r') as fh:
contents = fh.readlines()
for line in contents:
if line.strip().startswith(var_string):
# Return the value of var, normalized
return _normalize_string(line.strip()[len(var_string):])
return ""
def _normalize_string(orig):
"""
Helper function for _get_systemd_os_release_var() to remove quotes
and whitespaces
"""
return orig.replace('"', '').replace("'", "").strip()
def get_python_os_info():
"""
Get Operating System type/distribution and major version
using python platform module
:returns: (os_name, os_version)
:rtype: `tuple` of `str`
"""
info = platform.system_alias(
platform.system(),
platform.release(),
platform.version()
)
os_type, os_ver, _ = info
os_type = os_type.lower()
if os_type.startswith('linux'):
info = platform.linux_distribution()
# On arch, platform.linux_distribution() is reportedly ('','',''),
# so handle it defensively
if info[0]:
os_type = info[0]
if info[1]:
os_ver = info[1]
elif os_type.startswith('darwin'):
os_ver = subprocess.Popen(
["sw_vers", "-productVersion"],
stdout=subprocess.PIPE
).communicate()[0].rstrip('\n')
elif os_type.startswith('freebsd'):
# eg "9.3-RC3-p1"
os_ver = os_ver.partition("-")[0]
os_ver = os_ver.partition(".")[0]
elif platform.win32_ver()[1]:
os_ver = platform.win32_ver()[1]
else:
# Cases known to fall here: Cygwin python
os_ver = ''
return os_type, os_ver
# Just make sure we don't get pwned... Make sure that it also doesn't
# start with a period or have two consecutive periods <- this needs to
# be done in addition to the regex
EMAIL_REGEX = re.compile("[a-zA-Z0-9._%+-]+@[a-zA-Z0-9.-]+$")
def safe_email(email):
"""Scrub email address before using it."""
if EMAIL_REGEX.match(email) is not None:
return not email.startswith(".") and ".." not in email
else:
logger.warning("Invalid email address: %s.", email)
return False
def add_deprecated_argument(add_argument, argument_name, nargs):
"""Adds a deprecated argument with the name argument_name.
Deprecated arguments are not shown in the help. If they are used on
the command line, a warning is shown stating that the argument is
deprecated and no other action is taken.
:param callable add_argument: Function that adds arguments to an
argument parser/group.
:param str argument_name: Name of deprecated argument.
:param nargs: Value for nargs when adding the argument to argparse.
"""
class ShowWarning(argparse.Action):
"""Action to log a warning when an argument is used."""
def __call__(self, unused1, unused2, unused3, option_string=None):
sys.stderr.write(
"Use of {0} is deprecated.\n".format(option_string))
configargparse.ACTION_TYPES_THAT_DONT_NEED_A_VALUE.add(ShowWarning)
add_argument(argument_name, action=ShowWarning,
help=argparse.SUPPRESS, nargs=nargs)
def enforce_le_validity(domain):
"""Checks that Let's Encrypt will consider domain to be valid.
:param str domain: FQDN to check
:type domain: `str` or `unicode`
:returns: The domain cast to `str`, with ASCII-only contents
:rtype: str
:raises ConfigurationError: for invalid domains and cases where Let's
Encrypt currently will not issue certificates
"""
domain = enforce_domain_sanity(domain)
if not re.match("^[A-Za-z0-9.-]*$", domain):
raise errors.ConfigurationError(
"{0} contains an invalid character. "
"Valid characters are A-Z, a-z, 0-9, ., and -.".format(domain))
labels = domain.split(".")
if len(labels) < 2:
raise errors.ConfigurationError(
"{0} needs at least two labels".format(domain))
for label in labels:
if label.startswith("-"):
raise errors.ConfigurationError(
'label "{0}" in domain "{1}" cannot start with "-"'.format(
label, domain))
if label.endswith("-"):
raise errors.ConfigurationError(
'label "{0}" in domain "{1}" cannot end with "-"'.format(
label, domain))
return domain
def enforce_domain_sanity(domain):
"""Method which validates domain value and errors out if
the requirements are not met.
:param domain: Domain to check
:type domain: `str` or `unicode`
:raises ConfigurationError: for invalid domains and cases where Let's
Encrypt currently will not issue certificates
:returns: The domain cast to `str`, with ASCII-only contents
:rtype: str
"""
if isinstance(domain, six.text_type):
wildcard_marker = u"*."
else:
wildcard_marker = b"*."
# Check if there's a wildcard domain
if domain.startswith(wildcard_marker):
raise errors.ConfigurationError(
"Wildcard domains are not supported: {0}".format(domain))
# Unicode
try:
if isinstance(domain, six.binary_type):
domain = domain.decode('utf-8')
domain.encode('ascii')
except UnicodeError:
error_fmt = (u"Internationalized domain names "
"are not presently supported: {0}")
if isinstance(domain, six.text_type):
raise errors.ConfigurationError(error_fmt.format(domain))
else:
raise errors.ConfigurationError(str(error_fmt).format(domain))
domain = domain.lower()
# Remove trailing dot
domain = domain[:-1] if domain.endswith(u'.') else domain
# Explain separately that IP addresses aren't allowed (apart from not
# being FQDNs) because hope springs eternal concerning this point
try:
socket.inet_aton(domain)
raise errors.ConfigurationError(
"Requested name {0} is an IP address. The Let's Encrypt "
"certificate authority will not issue certificates for a "
"bare IP address.".format(domain))
except socket.error:
# It wasn't an IP address, so that's good
pass
# FQDN checks according to RFC 2181: domain name should be less than 255
# octets (inclusive). And each label is 1 - 63 octets (inclusive).
# https://tools.ietf.org/html/rfc2181#section-11
msg = "Requested domain {0} is not a FQDN because ".format(domain)
labels = domain.split('.')
for l in labels:
if not 0 < len(l) < 64:
raise errors.ConfigurationError(msg + "label {0} is too long.".format(l))
if len(domain) > 255:
raise errors.ConfigurationError(msg + "it is too long.")
return domain
def get_strict_version(normalized):
"""Converts a normalized version to a strict version.
:param str normalized: normalized version string
:returns: An equivalent strict version
:rtype: distutils.version.StrictVersion
"""
# strict version ending with "a" and a number designates a pre-release
# pylint: disable=no-member
return distutils.version.StrictVersion(normalized.replace(".dev", "a"))
|
|
# -*- coding: utf-8 -*-
#
# Luigi documentation build configuration file, created by
# sphinx-quickstart on Sat Feb 8 00:56:43 2014.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import datetime
import sphinx.environment
from docutils.utils import get_source_line
try:
import luigi
import luigi.parameter
def parameter_repr(self):
"""
When building documentation, we want Parameter objects to show their
description in a nice way
"""
significance = 'Insignificant ' if not self.significant else ''
class_name = self.__class__.__name__
has_default = self._default != luigi.parameter._no_value
default = ' (defaults to {})'.format(self._default) if has_default else ''
description = (': ' + self.description if self.description else '')
return significance + class_name + default + description
luigi.parameter.Parameter.__repr__ = parameter_repr
def assertIn(needle, haystack):
"""
We test repr of Parameter objects, since it'll be used for readthedocs
"""
assert needle in haystack
# TODO: find a better place to put this!
assertIn('IntParameter', repr(luigi.IntParameter()))
assertIn('defaults to 37', repr(luigi.IntParameter(default=37)))
assertIn('hi mom', repr(luigi.IntParameter(description='hi mom')))
assertIn('Insignificant BoolParameter', repr(luigi.BoolParameter(significant=False)))
except ImportError:
pass
def _warn_node(self, msg, node):
"""
Mute warnings that are like ``WARNING: nonlocal image URI found: https://img. ...``
Solution was found by googling, copied it from SO:
http://stackoverflow.com/questions/12772927/specifying-an-online-image-in-sphinx-restructuredtext-format
"""
if not msg.startswith('nonlocal image URI found:'):
self._warnfunc(msg, '%s:%s' % get_source_line(node))
sphinx.environment.BuildEnvironment.warn_node = _warn_node
if os.environ.get('READTHEDOCS', None) == 'True':
# Run sphinx-apidoc automatically in readthedocs
# Taken from this: https://lists.torproject.org/pipermail/tor-commits/2012-September/046695.html
os.system('sphinx-apidoc -o api -T ../luigi --separate')
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath(os.path.pardir))
# append the __init__ to class definitions
autoclass_content = 'both'
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.viewcode',
'sphinx.ext.autosummary',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Luigi'
copyright = u"2011-{}, Erik Bernhardsson and Elias Freider".format(datetime.datetime.now().year)
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
from pkg_resources import get_distribution
__version__ = get_distribution('luigi').version # assume luigi is already installed
# The short X.Y version.
version = ".".join(__version__.split(".")[0:2])
# The full version, including alpha/beta/rc tags.
release = __version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build', 'README.rst']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
autodoc_default_flags = ['members', 'undoc-members']
autodoc_member_order = 'bysource'
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
# on_rtd is whether we are on readthedocs.org, this line of code grabbed from docs.readthedocs.org
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
if not on_rtd: # only import and set the theme if we're building docs locally
try:
import sphinx_rtd_theme
except ImportError:
raise Exception("You must `pip install sphinx_rtd_theme` to build docs locally.")
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# otherwise, readthedocs.org uses their theme by default, so no need to specify it
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = 'luigi.png'
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
#html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'Luigidoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'Luigi.tex', u'Luigi Documentation',
u'Erik Bernhardsson and Elias Freider', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'luigi', u'Luigi Documentation',
[u'Erik Bernhardsson and Elias Freider'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'Luigi', u'Luigi Documentation',
u'Erik Bernhardsson and Elias Freider', 'Luigi', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
|
|
#!/usr/bin/env python2
#
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
from test_framework.test_framework import ComparisonTestFramework
from test_framework.util import *
from test_framework.comptool import TestManager, TestInstance, RejectResult
from test_framework.blocktools import *
import time
from test_framework.key import CECKey
from test_framework.script import CScript, SignatureHash, SIGHASH_ALL, OP_TRUE, OP_FALSE
class PreviousSpendableOutput(object):
def __init__(self, tx = CTransaction(), n = -1):
self.tx = tx
self.n = n # the output we're spending
'''
This reimplements tests from the aureusj/FullBlockTestGenerator used
by the pull-tester.
We use the testing framework in which we expect a particular answer from
each test.
'''
class FullBlockTest(ComparisonTestFramework):
''' Can either run this test as 1 node with expected answers, or two and compare them.
Change the "outcome" variable from each TestInstance object to only do the comparison. '''
def __init__(self):
self.num_nodes = 1
self.block_heights = {}
self.coinbase_key = CECKey()
self.coinbase_key.set_secretbytes(bytes("horsebattery"))
self.coinbase_pubkey = self.coinbase_key.get_pubkey()
self.block_time = int(time.time())+1
self.tip = None
self.blocks = {}
def run_test(self):
test = TestManager(self, self.options.tmpdir)
test.add_all_connections(self.nodes)
NetworkThread().start() # Start up network handling in another thread
test.run()
def add_transactions_to_block(self, block, tx_list):
[ tx.rehash() for tx in tx_list ]
block.vtx.extend(tx_list)
block.hashMerkleRoot = block.calc_merkle_root()
block.rehash()
return block
# Create a block on top of self.tip, and advance self.tip to point to the new block
# if spend is specified, then 1 satoshi will be spent from that to an anyone-can-spend output,
# and rest will go to fees.
def next_block(self, number, spend=None, additional_coinbase_value=0, script=None):
if self.tip == None:
base_block_hash = self.genesis_hash
else:
base_block_hash = self.tip.sha256
# First create the coinbase
height = self.block_heights[base_block_hash] + 1
coinbase = create_coinbase(height, self.coinbase_pubkey)
coinbase.vout[0].nValue += additional_coinbase_value
if (spend != None):
coinbase.vout[0].nValue += spend.tx.vout[spend.n].nValue - 1 # all but one satoshi to fees
coinbase.rehash()
block = create_block(base_block_hash, coinbase, self.block_time)
if (spend != None):
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(spend.tx.sha256, spend.n), "", 0xffffffff)) # no signature yet
# This copies the java comparison tool testing behavior: the first
# txout has a garbage scriptPubKey, "to make sure we're not
# pre-verifying too much" (?)
tx.vout.append(CTxOut(0, CScript([random.randint(0,255), height & 255])))
if script == None:
tx.vout.append(CTxOut(1, CScript([OP_TRUE])))
else:
tx.vout.append(CTxOut(1, script))
# Now sign it if necessary
scriptSig = ""
scriptPubKey = bytearray(spend.tx.vout[spend.n].scriptPubKey)
if (scriptPubKey[0] == OP_TRUE): # looks like an anyone-can-spend
scriptSig = CScript([OP_TRUE])
else:
# We have to actually sign it
(sighash, err) = SignatureHash(spend.tx.vout[spend.n].scriptPubKey, tx, 0, SIGHASH_ALL)
scriptSig = CScript([self.coinbase_key.sign(sighash) + bytes(bytearray([SIGHASH_ALL]))])
tx.vin[0].scriptSig = scriptSig
# Now add the transaction to the block
block = self.add_transactions_to_block(block, [tx])
block.solve()
self.tip = block
self.block_heights[block.sha256] = height
self.block_time += 1
assert number not in self.blocks
self.blocks[number] = block
return block
def get_tests(self):
self.genesis_hash = int(self.nodes[0].getbestblockhash(), 16)
self.block_heights[self.genesis_hash] = 0
spendable_outputs = []
# save the current tip so it can be spent by a later block
def save_spendable_output():
spendable_outputs.append(self.tip)
# get an output that we previous marked as spendable
def get_spendable_output():
return PreviousSpendableOutput(spendable_outputs.pop(0).vtx[0], 0)
# returns a test case that asserts that the current tip was accepted
def accepted():
return TestInstance([[self.tip, True]])
# returns a test case that asserts that the current tip was rejected
def rejected(reject = None):
if reject is None:
return TestInstance([[self.tip, False]])
else:
return TestInstance([[self.tip, reject]])
# move the tip back to a previous block
def tip(number):
self.tip = self.blocks[number]
# add transactions to a block produced by next_block
def update_block(block_number, new_transactions):
block = self.blocks[block_number]
old_hash = block.sha256
self.add_transactions_to_block(block, new_transactions)
block.solve()
# Update the internal state just like in next_block
self.tip = block
self.block_heights[block.sha256] = self.block_heights[old_hash]
del self.block_heights[old_hash]
self.blocks[block_number] = block
return block
# creates a new block and advances the tip to that block
block = self.next_block
# Create a new block
block(0)
save_spendable_output()
yield accepted()
# Now we need that block to mature so we can spend the coinbase.
test = TestInstance(sync_every_block=False)
for i in range(99):
block(1000 + i)
test.blocks_and_transactions.append([self.tip, True])
save_spendable_output()
yield test
# Start by building a couple of blocks on top (which output is spent is
# in parentheses):
# genesis -> b1 (0) -> b2 (1)
out0 = get_spendable_output()
block(1, spend=out0)
save_spendable_output()
yield accepted()
out1 = get_spendable_output()
b2 = block(2, spend=out1)
yield accepted()
# so fork like this:
#
# genesis -> b1 (0) -> b2 (1)
# \-> b3 (1)
#
# Nothing should happen at this point. We saw b2 first so it takes priority.
tip(1)
b3 = block(3, spend=out1)
txout_b3 = PreviousSpendableOutput(b3.vtx[1], 1)
yield rejected()
# Now we add another block to make the alternative chain longer.
#
# genesis -> b1 (0) -> b2 (1)
# \-> b3 (1) -> b4 (2)
out2 = get_spendable_output()
block(4, spend=out2)
yield accepted()
# ... and back to the first chain.
# genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3)
# \-> b3 (1) -> b4 (2)
tip(2)
block(5, spend=out2)
save_spendable_output()
yield rejected()
out3 = get_spendable_output()
block(6, spend=out3)
yield accepted()
# Try to create a fork that double-spends
# genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3)
# \-> b7 (2) -> b8 (4)
# \-> b3 (1) -> b4 (2)
tip(5)
block(7, spend=out2)
yield rejected()
out4 = get_spendable_output()
block(8, spend=out4)
yield rejected()
# Try to create a block that has too much fee
# genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3)
# \-> b9 (4)
# \-> b3 (1) -> b4 (2)
tip(6)
block(9, spend=out4, additional_coinbase_value=1)
yield rejected(RejectResult(16, 'bad-cb-amount'))
# Create a fork that ends in a block with too much fee (the one that causes the reorg)
# genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3)
# \-> b10 (3) -> b11 (4)
# \-> b3 (1) -> b4 (2)
tip(5)
block(10, spend=out3)
yield rejected()
block(11, spend=out4, additional_coinbase_value=1)
yield rejected(RejectResult(16, 'bad-cb-amount'))
# Try again, but with a valid fork first
# genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3)
# \-> b12 (3) -> b13 (4) -> b14 (5)
# (b12 added last)
# \-> b3 (1) -> b4 (2)
tip(5)
b12 = block(12, spend=out3)
save_spendable_output()
#yield TestInstance([[b12, False]])
b13 = block(13, spend=out4)
# Deliver the block header for b12, and the block b13.
# b13 should be accepted but the tip won't advance until b12 is delivered.
yield TestInstance([[CBlockHeader(b12), None], [b13, False]])
save_spendable_output()
out5 = get_spendable_output()
# b14 is invalid, but the node won't know that until it tries to connect
# Tip still can't advance because b12 is missing
block(14, spend=out5, additional_coinbase_value=1)
yield rejected()
yield TestInstance([[b12, True, b13.sha256]]) # New tip should be b13.
# Add a block with MAX_BLOCK_SIGOPS and one with one more sigop
# genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3)
# \-> b12 (3) -> b13 (4) -> b15 (5) -> b16 (6)
# \-> b3 (1) -> b4 (2)
# Test that a block with a lot of checksigs is okay
lots_of_checksigs = CScript([OP_CHECKSIG] * (1000000 / 50 - 1))
tip(13)
block(15, spend=out5, script=lots_of_checksigs)
yield accepted()
# Test that a block with too many checksigs is rejected
out6 = get_spendable_output()
too_many_checksigs = CScript([OP_CHECKSIG] * (1000000 / 50))
block(16, spend=out6, script=too_many_checksigs)
yield rejected(RejectResult(16, 'bad-blk-sigops'))
# Attempt to spend a transaction created on a different fork
# genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3)
# \-> b12 (3) -> b13 (4) -> b15 (5) -> b17 (b3.vtx[1])
# \-> b3 (1) -> b4 (2)
tip(15)
block(17, spend=txout_b3)
yield rejected(RejectResult(16, 'bad-txns-inputs-missingorspent'))
# Attempt to spend a transaction created on a different fork (on a fork this time)
# genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3)
# \-> b12 (3) -> b13 (4) -> b15 (5)
# \-> b18 (b3.vtx[1]) -> b19 (6)
# \-> b3 (1) -> b4 (2)
tip(13)
block(18, spend=txout_b3)
yield rejected()
block(19, spend=out6)
yield rejected()
# Attempt to spend a coinbase at depth too low
# genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3)
# \-> b12 (3) -> b13 (4) -> b15 (5) -> b20 (7)
# \-> b3 (1) -> b4 (2)
tip(15)
out7 = get_spendable_output()
block(20, spend=out7)
yield rejected(RejectResult(16, 'bad-txns-premature-spend-of-coinbase'))
# Attempt to spend a coinbase at depth too low (on a fork this time)
# genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3)
# \-> b12 (3) -> b13 (4) -> b15 (5)
# \-> b21 (6) -> b22 (5)
# \-> b3 (1) -> b4 (2)
tip(13)
block(21, spend=out6)
yield rejected()
block(22, spend=out5)
yield rejected()
# Create a block on either side of MAX_BLOCK_SIZE and make sure its accepted/rejected
# genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3)
# \-> b12 (3) -> b13 (4) -> b15 (5) -> b23 (6)
# \-> b24 (6) -> b25 (7)
# \-> b3 (1) -> b4 (2)
tip(15)
b23 = block(23, spend=out6)
old_hash = b23.sha256
tx = CTransaction()
script_length = MAX_BLOCK_SIZE - len(b23.serialize()) - 69
script_output = CScript([chr(0)*script_length])
tx.vout.append(CTxOut(0, script_output))
tx.vin.append(CTxIn(COutPoint(b23.vtx[1].sha256, 1)))
b23 = update_block(23, [tx])
# Make sure the math above worked out to produce a max-sized block
assert_equal(len(b23.serialize()), MAX_BLOCK_SIZE)
yield accepted()
# Make the next block one byte bigger and check that it fails
tip(15)
b24 = block(24, spend=out6)
script_length = MAX_BLOCK_SIZE - len(b24.serialize()) - 69
script_output = CScript([chr(0)*(script_length+1)])
tx.vout = [CTxOut(0, script_output)]
b24 = update_block(24, [tx])
assert_equal(len(b24.serialize()), MAX_BLOCK_SIZE+1)
yield rejected(RejectResult(16, 'bad-blk-length'))
b25 = block(25, spend=out7)
yield rejected()
# Create blocks with a coinbase input script size out of range
# genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3)
# \-> b12 (3) -> b13 (4) -> b15 (5) -> b23 (6) -> b30 (7)
# \-> ... (6) -> ... (7)
# \-> b3 (1) -> b4 (2)
tip(15)
b26 = block(26, spend=out6)
b26.vtx[0].vin[0].scriptSig = chr(0)
b26.vtx[0].rehash()
# update_block causes the merkle root to get updated, even with no new
# transactions, and updates the required state.
b26 = update_block(26, [])
yield rejected(RejectResult(16, 'bad-cb-length'))
# Extend the b26 chain to make sure aureusd isn't accepting b26
b27 = block(27, spend=out7)
yield rejected()
# Now try a too-large-coinbase script
tip(15)
b28 = block(28, spend=out6)
b28.vtx[0].vin[0].scriptSig = chr(0)*101
b28.vtx[0].rehash()
b28 = update_block(28, [])
yield rejected(RejectResult(16, 'bad-cb-length'))
# Extend the b28 chain to make sure aureusd isn't accepted b28
b29 = block(29, spend=out7)
# TODO: Should get a reject message back with "bad-prevblk", except
# there's a bug that prevents this from being detected. Just note
# failure for now, and add the reject result later.
yield rejected()
# b30 has a max-sized coinbase scriptSig.
tip(23)
b30 = block(30)
b30.vtx[0].vin[0].scriptSig = chr(0)*100
b30.vtx[0].rehash()
b30 = update_block(30, [])
yield accepted()
if __name__ == '__main__':
FullBlockTest().main()
|
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test cases for binary operators."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.compiler.tests import xla_test
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import bitwise_ops
from tensorflow.python.ops import gen_math_ops
from tensorflow.python.ops import gen_nn_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.platform import googletest
class BinaryOpsTest(xla_test.XLATestCase):
"""Test cases for binary operators."""
def _testBinary(self, op, a, b, expected, equality_test=None):
with self.cached_session() as session:
with self.test_scope():
pa = array_ops.placeholder(dtypes.as_dtype(a.dtype), a.shape, name="a")
pb = array_ops.placeholder(dtypes.as_dtype(b.dtype), b.shape, name="b")
output = op(pa, pb)
result = session.run(output, {pa: a, pb: b})
if equality_test is None:
equality_test = self.assertAllCloseAccordingToType
equality_test(result, expected, rtol=1e-3)
def _testSymmetricBinary(self, op, a, b, expected, equality_test=None):
self._testBinary(op, a, b, expected, equality_test)
self._testBinary(op, b, a, expected, equality_test)
def ListsAreClose(self, result, expected, rtol):
"""Tests closeness of two lists of floats."""
self.assertEqual(len(result), len(expected))
for i in range(len(result)):
self.assertAllCloseAccordingToType(result[i], expected[i], rtol)
def testFloatOps(self):
for dtype in self.float_types:
if dtype == dtypes.bfloat16.as_numpy_dtype:
a = -1.01
b = 4.1
else:
a = -1.001
b = 4.01
self._testBinary(
lambda x, y: math_ops.approximate_equal(x, y, tolerance=0.0001),
np.array([[[[-1, 2.00009999], [-3, b]]]], dtype=dtype),
np.array([[[[a, 2], [-3.00009, 4]]]], dtype=dtype),
expected=np.array([[[[False, True], [True, False]]]], dtype=dtype))
self._testBinary(
gen_math_ops.real_div,
np.array([3, 3, -1.5, -8, 44], dtype=dtype),
np.array([2, -2, 7, -4, 0], dtype=dtype),
expected=np.array(
[1.5, -1.5, -0.2142857, 2, float("inf")], dtype=dtype))
self._testBinary(math_ops.pow, dtype(3), dtype(4), expected=dtype(81))
self._testBinary(
math_ops.pow,
np.array([1, 2], dtype=dtype),
np.zeros(shape=[0, 2], dtype=dtype),
expected=np.zeros(shape=[0, 2], dtype=dtype))
self._testBinary(
math_ops.pow,
np.array([10, 4], dtype=dtype),
np.array([2, 3], dtype=dtype),
expected=np.array([100, 64], dtype=dtype))
self._testBinary(
math_ops.pow,
dtype(2),
np.array([3, 4], dtype=dtype),
expected=np.array([8, 16], dtype=dtype))
self._testBinary(
math_ops.pow,
np.array([[2], [3]], dtype=dtype),
dtype(4),
expected=np.array([[16], [81]], dtype=dtype))
self._testBinary(
math_ops.atan2,
np.array([0, np.sqrt(2), 1, np.sqrt(2), 0], dtype),
np.array([1, np.sqrt(2), 0, -np.sqrt(2), -1], dtype),
expected=np.array(
[0, np.pi / 4, np.pi / 2, np.pi * 3 / 4, np.pi], dtype=dtype))
self._testBinary(
gen_math_ops.reciprocal_grad,
np.array([4, -3, -2, 1], dtype=dtype),
np.array([5, -6, 7, -8], dtype=dtype),
expected=np.array([-80, 54, -28, 8], dtype=dtype))
self._testBinary(
gen_math_ops.sigmoid_grad,
np.array([4, 3, 2, 1], dtype=dtype),
np.array([5, 6, 7, 8], dtype=dtype),
expected=np.array([-60, -36, -14, 0], dtype=dtype))
self._testBinary(
gen_math_ops.rsqrt_grad,
np.array([4, 3, 2, 1], dtype=dtype),
np.array([5, 6, 7, 8], dtype=dtype),
expected=np.array([-160, -81, -28, -4], dtype=dtype))
self._testBinary(
gen_math_ops.sqrt_grad,
np.array([4, 3, 2, 1], dtype=dtype),
np.array([5, 6, 7, 8], dtype=dtype),
expected=np.array([0.625, 1, 1.75, 4], dtype=dtype))
self._testBinary(
gen_nn_ops.softplus_grad,
np.array([4, 3, 2, 1], dtype=dtype),
np.array([5, 6, 7, 8], dtype=dtype),
expected=np.array(
[3.97322869, 2.99258232, 1.99817801, 0.99966466], dtype=dtype))
self._testBinary(
gen_nn_ops.softsign_grad,
np.array([4, 3, 2, 1], dtype=dtype),
np.array([5, 6, 7, 8], dtype=dtype),
expected=np.array(
[0.11111111, 0.06122449, 0.03125, 0.01234568], dtype=dtype))
self._testBinary(
gen_math_ops.tanh_grad,
np.array([4, 3, 2, 1], dtype=dtype),
np.array([5, 6, 7, 8], dtype=dtype),
expected=np.array([-75, -48, -21, 0], dtype=dtype))
self._testBinary(
gen_nn_ops.elu_grad,
np.array([1, 2, 3, 4, 5, 6], dtype=dtype),
np.array([-.6, -.4, -.2, 0, .2, .4], dtype=dtype),
expected=np.array([0.4, 1.2, 2.4, 4, 5, 6], dtype=dtype))
self._testBinary(
gen_nn_ops.selu_grad,
np.array([1, 2, 3, 4, 5, 6], dtype=dtype),
np.array([-.6, -.4, -.2, .2, .4, .6], dtype=dtype),
expected=np.array(
[1.158099340847, 2.7161986816948, 4.67429802254,
4.202803949422, 5.2535049367774, 6.30420592413], dtype=dtype))
self._testBinary(
gen_nn_ops.relu_grad,
np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10], dtype=dtype),
np.array([0, 0, 0, 0, 0, 0.1, 0.3, 0.5, 0.7, 0.9], dtype=dtype),
expected=np.array([0, 0, 0, 0, 0, 6, 7, 8, 9, 10], dtype=dtype))
self._testBinary(
gen_nn_ops.relu6_grad,
np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12], dtype=dtype),
np.array(
[0, 0, 0, 0, 0, 0.1, 0.3, 0.5, 0.7, 0.9, 6.1, 10.0], dtype=dtype),
expected=np.array([0, 0, 0, 0, 0, 6, 7, 8, 9, 10, 0, 0], dtype=dtype))
self._testBinary(
gen_nn_ops.softmax_cross_entropy_with_logits,
np.array([[1, 2, 3, 4], [5, 6, 7, 8]], dtype=dtype),
np.array([[0.1, 0.2, 0.3, 0.4], [0.4, 0.3, 0.2, 0.1]], dtype=dtype),
expected=[
np.array([1.44019, 2.44019], dtype=dtype),
np.array([[-0.067941, -0.112856, -0.063117, 0.243914],
[-0.367941, -0.212856, 0.036883, 0.543914]],
dtype=dtype),
],
equality_test=self.ListsAreClose)
# TODO(b/68813416): Fails with bfloat16.
if dtype != dtypes.bfloat16.as_numpy_dtype:
self._testBinary(
gen_nn_ops.sparse_softmax_cross_entropy_with_logits,
np.array(
[[0.1, 0.2, 0.3, 0.4], [0.5, 0.6, 0.7, 0.8],
[0.9, 1.0, 1.1, 1.2]],
dtype=dtype),
np.array([2, 1, 7], dtype=np.int32),
expected=[
np.array([1.342536, 1.442536, np.nan], dtype=dtype),
np.array(
[[0.213838, 0.236328, -0.738817, 0.288651], [
0.213838, -0.763672, 0.261183, 0.288651
], [np.nan, np.nan, np.nan, np.nan]],
dtype=dtype),
],
equality_test=self.ListsAreClose)
def testIntOps(self):
for dtype in self.int_types:
self._testBinary(
gen_math_ops.truncate_div,
np.array([3, 3, -1, -9, -8], dtype=dtype),
np.array([2, -2, 7, 2, -4], dtype=dtype),
expected=np.array([1, -1, 0, -4, 2], dtype=dtype))
self._testSymmetricBinary(
bitwise_ops.bitwise_and,
np.array([0b1, 0b101, 0b1000], dtype=dtype),
np.array([0b0, 0b101, 0b1001], dtype=dtype),
expected=np.array([0b0, 0b101, 0b1000], dtype=dtype))
self._testSymmetricBinary(
bitwise_ops.bitwise_or,
np.array([0b1, 0b101, 0b1000], dtype=dtype),
np.array([0b0, 0b101, 0b1001], dtype=dtype),
expected=np.array([0b1, 0b101, 0b1001], dtype=dtype))
self._testSymmetricBinary(
bitwise_ops.bitwise_xor,
np.array([0b1, 0b111, 0b1100], dtype=dtype),
np.array([0b0, 0b101, 0b1001], dtype=dtype),
expected=np.array([0b1, 0b010, 0b0101], dtype=dtype))
lhs = np.array([0, 5, 3, 14], dtype=dtype)
rhs = np.array([5, 0, 7, 11], dtype=dtype)
self._testBinary(
bitwise_ops.left_shift, lhs, rhs,
expected=np.left_shift(lhs, rhs))
self._testBinary(
bitwise_ops.right_shift, lhs, rhs,
expected=np.right_shift(lhs, rhs))
if dtype in [np.int8, np.int16, np.int32, np.int64]:
lhs = np.array([-1, -5, -3, -14, -2], dtype=dtype)
rhs = np.array([5, 0, 1, 11, 36], dtype=dtype)
# HLO has saturating shift behavior.
bits = np.ceil(
np.log(np.iinfo(dtype).max - np.iinfo(dtype).min) / np.log(2))
expected = [
np.right_shift(l, r) if r < bits else np.sign(l)
for l, r in zip(lhs, rhs)
]
self._testBinary(bitwise_ops.right_shift, lhs, rhs, expected=expected)
def testNumericOps(self):
for dtype in self.numeric_types:
self._testBinary(
math_ops.add,
np.array([1, 2], dtype=dtype),
np.array([10, 20], dtype=dtype),
expected=np.array([11, 22], dtype=dtype))
self._testBinary(
math_ops.add,
dtype(5),
np.array([1, 2], dtype=dtype),
expected=np.array([6, 7], dtype=dtype))
self._testBinary(
math_ops.add,
np.array([[1], [2]], dtype=dtype),
dtype(7),
expected=np.array([[8], [9]], dtype=dtype))
self._testBinary(
math_ops.subtract,
np.array([1, 2, 100], dtype=dtype),
np.array([10, 20, -1], dtype=dtype),
expected=np.array([-9, -18, 101], dtype=dtype))
self._testBinary(
math_ops.subtract,
dtype(5),
np.array([1, 2], dtype=dtype),
expected=np.array([4, 3], dtype=dtype))
self._testBinary(
math_ops.subtract,
np.array([[1], [2]], dtype=dtype),
dtype(7),
expected=np.array([[-6], [-5]], dtype=dtype))
if dtype not in self.complex_types: # min/max not supported for complex
self._testBinary(
math_ops.maximum,
np.array([1, 2], dtype=dtype),
np.array([10, 20], dtype=dtype),
expected=np.array([10, 20], dtype=dtype))
self._testBinary(
math_ops.maximum,
dtype(5),
np.array([1, 20], dtype=dtype),
expected=np.array([5, 20], dtype=dtype))
self._testBinary(
math_ops.maximum,
np.array([[10], [2]], dtype=dtype),
dtype(7),
expected=np.array([[10], [7]], dtype=dtype))
self._testBinary(
math_ops.minimum,
np.array([1, 20], dtype=dtype),
np.array([10, 2], dtype=dtype),
expected=np.array([1, 2], dtype=dtype))
self._testBinary(
math_ops.minimum,
dtype(5),
np.array([1, 20], dtype=dtype),
expected=np.array([1, 5], dtype=dtype))
self._testBinary(
math_ops.minimum,
np.array([[10], [2]], dtype=dtype),
dtype(7),
expected=np.array([[7], [2]], dtype=dtype))
self._testBinary(
math_ops.multiply,
np.array([1, 20], dtype=dtype),
np.array([10, 2], dtype=dtype),
expected=np.array([10, 40], dtype=dtype))
self._testBinary(
math_ops.multiply,
dtype(5),
np.array([1, 20], dtype=dtype),
expected=np.array([5, 100], dtype=dtype))
self._testBinary(
math_ops.multiply,
np.array([[10], [2]], dtype=dtype),
dtype(7),
expected=np.array([[70], [14]], dtype=dtype))
# Complex support for squared_difference is incidental, see b/68205550
if dtype not in self.complex_types:
self._testBinary(
math_ops.squared_difference,
np.array([1, 2], dtype=dtype),
np.array([10, 20], dtype=dtype),
expected=np.array([81, 324], dtype=dtype))
self._testBinary(
math_ops.squared_difference,
dtype(5),
np.array([1, 2], dtype=dtype),
expected=np.array([16, 9], dtype=dtype))
self._testBinary(
math_ops.squared_difference,
np.array([[1], [2]], dtype=dtype),
dtype(7),
expected=np.array([[36], [25]], dtype=dtype))
self._testBinary(
nn_ops.bias_add,
np.array([[1, 2], [3, 4]], dtype=dtype),
np.array([2, -1], dtype=dtype),
expected=np.array([[3, 1], [5, 3]], dtype=dtype))
self._testBinary(
nn_ops.bias_add,
np.array([[[[1, 2], [3, 4]]]], dtype=dtype),
np.array([2, -1], dtype=dtype),
expected=np.array([[[[3, 1], [5, 3]]]], dtype=dtype))
if np.int64 in self.numeric_types:
self._testBinary(
math_ops.add,
np.array([0xffffffff, 0xfffffffff, 1, 1], dtype=np.int64),
np.array([1, 1, 0xffffffff, 0xfffffffff], dtype=np.int64),
expected=np.array([1 << 32, 1 << 36, 1 << 32, 1 << 36],
dtype=np.int64))
def testComplexOps(self):
for dtype in self.complex_types:
ctypes = {np.complex64: np.float32}
self._testBinary(
math_ops.complex,
np.array([[[[-1, 2], [2, 0]]]], dtype=ctypes[dtype]),
np.array([[[[2, -3], [0, 4]]]], dtype=ctypes[dtype]),
expected=np.array([[[[-1 + 2j, 2 - 3j], [2, 4j]]]], dtype=dtype))
self._testBinary(
lambda x, y: math_ops.approximate_equal(x, y, tolerance=0.0001),
np.array(
[[[[-1 + 2j, 2.00009999 - 3j], [2 - 3j, 3 + 4.01j]]]],
dtype=dtype),
np.array(
[[[[-1.001 + 2j, 2 - 3j], [2 - 3.00009j, 3 + 4j]]]], dtype=dtype),
expected=np.array([[[[False, True], [True, False]]]], dtype=dtype))
self._testBinary(
gen_math_ops.real_div,
np.array([3, 3j, -1.5j, -8, 2 + 3j, 2 + 4j], dtype=dtype),
np.array([2, -2, 7j, -4j, 4 - 6j, 1 + 2j], dtype=dtype),
expected=np.array(
[1.5, -1.5j, -0.2142857, -2j, (2 + 3j) / (4 - 6j), 2],
dtype=dtype))
# Test inf/nan scenarios.
self._testBinary(
gen_math_ops.real_div,
np.array([4 + 3j, 4, 3j, -4, -4j, 2 - 3j], dtype=dtype),
np.array([0, 0, 0, 0, 0, 0], dtype=dtype),
expected=np.array(
[
dtype(1 + 1j) / 0,
dtype(1) / 0,
dtype(1j) / 0,
dtype(-1) / 0,
dtype(-1j) / 0,
dtype(1 - 1j) / 0
],
dtype=dtype))
self._testBinary(
math_ops.pow,
dtype(3 + 2j),
dtype(4 - 5j),
expected=np.power(dtype(3 + 2j), dtype(4 - 5j)))
self._testBinary( # empty rhs
math_ops.pow,
np.array([1 + 2j, 2 - 3j], dtype=dtype),
np.zeros(shape=[0, 2], dtype=dtype),
expected=np.zeros(shape=[0, 2], dtype=dtype))
self._testBinary( # to zero power
math_ops.pow,
np.array([1 + 2j, 2 - 3j], dtype=dtype),
np.zeros(shape=[1, 2], dtype=dtype),
expected=np.ones(shape=[1, 2], dtype=dtype))
lhs = np.array([1 - 2j, 4 + 3j, 2 - 3j, 3, 2j, 1, 4], dtype=dtype)
rhs = np.array([2, 3j, 3 + 4j, 2 + 3j, 3 - 2j, 2, 3 + 3j], dtype=dtype)
scalar = dtype(2 + 2j)
self._testBinary(math_ops.pow, lhs, rhs, expected=np.power(lhs, rhs))
self._testBinary(
math_ops.pow, scalar, rhs, expected=np.power(scalar, rhs))
self._testBinary(math_ops.pow, lhs, scalar, np.power(lhs, scalar))
lhs = np.array([4 + 2j, -3 - 1j, 2j, 1], dtype=dtype)
rhs = np.array([5, -6j, 7 - 3j, -8j], dtype=dtype)
self._testBinary(
gen_math_ops.reciprocal_grad, lhs, rhs, expected=-rhs * lhs * lhs)
self._testBinary(
gen_math_ops.sigmoid_grad, lhs, rhs, expected=rhs * lhs * (1 - lhs))
self._testBinary(
gen_math_ops.rsqrt_grad, lhs, rhs, expected=lhs**3 * rhs / -2)
self._testBinary(
gen_math_ops.sqrt_grad, lhs, rhs, expected=rhs / (2 * lhs))
self._testBinary(
gen_math_ops.tanh_grad, lhs, rhs, expected=rhs * (1 - lhs * lhs))
def testComplexMath(self):
for dtype in self.complex_types:
self._testBinary(
math_ops.add,
np.array([1 + 3j, 2 + 7j], dtype=dtype),
np.array([10 - 4j, 20 + 17j], dtype=dtype),
expected=np.array([11 - 1j, 22 + 24j], dtype=dtype))
self._testBinary(
math_ops.add,
dtype(5 - 7j),
np.array([1 + 2j, 2 + 4j], dtype=dtype),
expected=np.array([6 - 5j, 7 - 3j], dtype=dtype))
self._testBinary(
math_ops.add,
np.array([[1 - 2j], [2 + 1j]], dtype=dtype),
dtype(7 + 5j),
expected=np.array([[8 + 3j], [9 + 6j]], dtype=dtype))
self._testBinary(
math_ops.subtract,
np.array([1 + 3j, 2 + 7j], dtype=dtype),
np.array([10 - 4j, 20 + 17j], dtype=dtype),
expected=np.array([-9 + 7j, -18 - 10j], dtype=dtype))
self._testBinary(
math_ops.subtract,
dtype(5 - 7j),
np.array([1 + 2j, 2 + 4j], dtype=dtype),
expected=np.array([4 - 9j, 3 - 11j], dtype=dtype))
self._testBinary(
math_ops.subtract,
np.array([[1 - 2j], [2 + 1j]], dtype=dtype),
dtype(7 + 5j),
expected=np.array([[-6 - 7j], [-5 - 4j]], dtype=dtype))
self._testBinary(
math_ops.multiply,
np.array([1 + 3j, 2 + 7j], dtype=dtype),
np.array([10 - 4j, 20 + 17j], dtype=dtype),
expected=np.array(
[(1 + 3j) * (10 - 4j), (2 + 7j) * (20 + 17j)], dtype=dtype))
self._testBinary(
math_ops.multiply,
dtype(5 - 7j),
np.array([1 + 2j, 2 + 4j], dtype=dtype),
expected=np.array(
[(5 - 7j) * (1 + 2j), (5 - 7j) * (2 + 4j)], dtype=dtype))
self._testBinary(
math_ops.multiply,
np.array([[1 - 2j], [2 + 1j]], dtype=dtype),
dtype(7 + 5j),
expected=np.array(
[[(7 + 5j) * (1 - 2j)], [(7 + 5j) * (2 + 1j)]], dtype=dtype))
self._testBinary(
math_ops.div,
np.array([8 - 1j, 2 + 16j], dtype=dtype),
np.array([2 + 4j, 4 - 8j], dtype=dtype),
expected=np.array(
[(8 - 1j) / (2 + 4j), (2 + 16j) / (4 - 8j)], dtype=dtype))
self._testBinary(
math_ops.div,
dtype(1 + 2j),
np.array([2 + 4j, 4 - 8j], dtype=dtype),
expected=np.array(
[(1 + 2j) / (2 + 4j), (1 + 2j) / (4 - 8j)], dtype=dtype))
self._testBinary(
math_ops.div,
np.array([2 + 4j, 4 - 8j], dtype=dtype),
dtype(1 + 2j),
expected=np.array(
[(2 + 4j) / (1 + 2j), (4 - 8j) / (1 + 2j)], dtype=dtype))
# TODO(b/68205550): math_ops.squared_difference shouldn't be supported.
self._testBinary(
nn_ops.bias_add,
np.array([[1 + 2j, 2 + 7j], [3 - 5j, 4 + 2j]], dtype=dtype),
np.array([2 + 6j, -1 - 3j], dtype=dtype),
expected=np.array([[3 + 8j, 1 + 4j], [5 + 1j, 3 - 1j]], dtype=dtype))
self._testBinary(
nn_ops.bias_add,
np.array([[[[1 + 4j, 2 - 1j], [3 + 7j, 4]]]], dtype=dtype),
np.array([2 + 1j, -1 + 2j], dtype=dtype),
expected=np.array(
[[[[3 + 5j, 1 + 1j], [5 + 8j, 3 + 2j]]]], dtype=dtype))
def _testDivision(self, dtype):
"""Test cases for division operators."""
self._testBinary(
math_ops.div,
np.array([10, 20], dtype=dtype),
np.array([10, 2], dtype=dtype),
expected=np.array([1, 10], dtype=dtype))
self._testBinary(
math_ops.div,
dtype(40),
np.array([2, 20], dtype=dtype),
expected=np.array([20, 2], dtype=dtype))
self._testBinary(
math_ops.div,
np.array([[10], [4]], dtype=dtype),
dtype(2),
expected=np.array([[5], [2]], dtype=dtype))
if dtype not in self.complex_types: # floordiv unsupported for complex.
self._testBinary(
gen_math_ops.floor_div,
np.array([3, 3, -1, -9, -8], dtype=dtype),
np.array([2, -2, 7, 2, -4], dtype=dtype),
expected=np.array([1, -2, -1, -5, 2], dtype=dtype))
def testIntDivision(self):
for dtype in self.int_types:
self._testDivision(dtype)
def testFloatDivision(self):
for dtype in self.float_types | self.complex_types:
self._testDivision(dtype)
def _testRemainder(self, dtype):
"""Test cases for remainder operators."""
self._testBinary(
gen_math_ops.floor_mod,
np.array([3, 3, -1, -8], dtype=dtype),
np.array([2, -2, 7, -4], dtype=dtype),
expected=np.array([1, -1, 6, 0], dtype=dtype))
self._testBinary(
gen_math_ops.truncate_mod,
np.array([3, 3, -1, -8], dtype=dtype),
np.array([2, -2, 7, -4], dtype=dtype),
expected=np.array([1, 1, -1, 0], dtype=dtype))
def testIntRemainder(self):
for dtype in self.int_types:
self._testRemainder(dtype)
def testFloatRemainder(self):
for dtype in self.float_types:
self._testRemainder(dtype)
def testLogicalOps(self):
self._testBinary(
math_ops.logical_and,
np.array([[True, False], [False, True]], dtype=np.bool),
np.array([[False, True], [False, True]], dtype=np.bool),
expected=np.array([[False, False], [False, True]], dtype=np.bool))
self._testBinary(
math_ops.logical_or,
np.array([[True, False], [False, True]], dtype=np.bool),
np.array([[False, True], [False, True]], dtype=np.bool),
expected=np.array([[True, True], [False, True]], dtype=np.bool))
def testComparisons(self):
self._testBinary(
math_ops.equal,
np.array([1, 5, 20], dtype=np.float32),
np.array([10, 5, 2], dtype=np.float32),
expected=np.array([False, True, False], dtype=np.bool))
self._testBinary(
math_ops.equal,
np.float32(5),
np.array([1, 5, 20], dtype=np.float32),
expected=np.array([False, True, False], dtype=np.bool))
self._testBinary(
math_ops.equal,
np.array([[10], [7], [2]], dtype=np.float32),
np.float32(7),
expected=np.array([[False], [True], [False]], dtype=np.bool))
self._testBinary(
math_ops.not_equal,
np.array([1, 5, 20], dtype=np.float32),
np.array([10, 5, 2], dtype=np.float32),
expected=np.array([True, False, True], dtype=np.bool))
self._testBinary(
math_ops.not_equal,
np.float32(5),
np.array([1, 5, 20], dtype=np.float32),
expected=np.array([True, False, True], dtype=np.bool))
self._testBinary(
math_ops.not_equal,
np.array([[10], [7], [2]], dtype=np.float32),
np.float32(7),
expected=np.array([[True], [False], [True]], dtype=np.bool))
for greater_op in [math_ops.greater, (lambda x, y: x > y)]:
self._testBinary(
greater_op,
np.array([1, 5, 20], dtype=np.float32),
np.array([10, 5, 2], dtype=np.float32),
expected=np.array([False, False, True], dtype=np.bool))
self._testBinary(
greater_op,
np.float32(5),
np.array([1, 5, 20], dtype=np.float32),
expected=np.array([True, False, False], dtype=np.bool))
self._testBinary(
greater_op,
np.array([[10], [7], [2]], dtype=np.float32),
np.float32(7),
expected=np.array([[True], [False], [False]], dtype=np.bool))
for greater_equal_op in [math_ops.greater_equal, (lambda x, y: x >= y)]:
self._testBinary(
greater_equal_op,
np.array([1, 5, 20], dtype=np.float32),
np.array([10, 5, 2], dtype=np.float32),
expected=np.array([False, True, True], dtype=np.bool))
self._testBinary(
greater_equal_op,
np.float32(5),
np.array([1, 5, 20], dtype=np.float32),
expected=np.array([True, True, False], dtype=np.bool))
self._testBinary(
greater_equal_op,
np.array([[10], [7], [2]], dtype=np.float32),
np.float32(7),
expected=np.array([[True], [True], [False]], dtype=np.bool))
for less_op in [math_ops.less, (lambda x, y: x < y)]:
self._testBinary(
less_op,
np.array([1, 5, 20], dtype=np.float32),
np.array([10, 5, 2], dtype=np.float32),
expected=np.array([True, False, False], dtype=np.bool))
self._testBinary(
less_op,
np.float32(5),
np.array([1, 5, 20], dtype=np.float32),
expected=np.array([False, False, True], dtype=np.bool))
self._testBinary(
less_op,
np.array([[10], [7], [2]], dtype=np.float32),
np.float32(7),
expected=np.array([[False], [False], [True]], dtype=np.bool))
if np.int64 in self.numeric_types:
self._testBinary(
less_op,
np.array([[10], [7], [2], [-1]], dtype=np.int64),
np.int64(7),
expected=np.array(
[[False], [False], [True], [True]], dtype=np.bool))
for less_equal_op in [math_ops.less_equal, (lambda x, y: x <= y)]:
self._testBinary(
less_equal_op,
np.array([1, 5, 20], dtype=np.float32),
np.array([10, 5, 2], dtype=np.float32),
expected=np.array([True, True, False], dtype=np.bool))
self._testBinary(
less_equal_op,
np.float32(5),
np.array([1, 5, 20], dtype=np.float32),
expected=np.array([False, True, True], dtype=np.bool))
self._testBinary(
less_equal_op,
np.array([[10], [7], [2]], dtype=np.float32),
np.float32(7),
expected=np.array([[False], [True], [True]], dtype=np.bool))
def testS64Comparisons(self):
for op in [(lambda x, y: x < y), (lambda x, y: x <= y),
(lambda x, y: x >= y), (lambda x, y: x > y)]:
lhs = np.array(
[
np.int64(0x000000007FFFFFFF),
np.int64(0x000000007FFFFFFF),
np.int64(0x0000000080000000),
np.int64(0x0000000080000000),
np.int64(0x0000000080000001),
np.int64(0x00000000FFFF0000),
np.int64(0x00000000FFFF0000),
np.int64(0x00000000FFFFFFFE),
np.int64(0x00000000FFFFFFFF),
np.int64(0x00000000FFFFFFFF),
np.int64(0x0000000100000000),
np.int64(0x0000000200000002),
np.int64(0x0000000200000002),
np.int64(0x0000000200000002),
np.int64(0x0000000200000002),
np.int64(0x0000000200000002),
np.int64(0x0000000200000002),
np.int64(0x0000000200000002),
np.int64(0x0000000200000002),
np.int64(0x0000000200000002),
np.int64(-0x7FFFFFFF00000002),
np.int64(-0x7FFFFFFF00000002),
np.int64(-0x7FFFFFFF00000001),
np.int64(-0x7FFFFFFF00000001),
np.int64(-0x7FFFFFFF00000001),
np.int64(-0x7FFFFFFF00000001),
np.int64(0x7ffffffefff00010),
np.int64(0x7ffffffefff00010),
np.int64(-1),
np.int64(-1)
],
dtype=np.int64)
rhs = np.array(
[
np.int64(0x000000007FFFFFFE),
np.int64(0x000000007FFFFFFF),
np.int64(0x000000007FFFFFFF),
np.int64(0x0000000080000000),
np.int64(0x0000000080000001),
np.int64(0x00000000FFFF0000),
np.int64(0x00000000FFFF0001),
np.int64(0x00000000FFFFFFFF),
np.int64(0x00000000FFFFFFFE),
np.int64(0x00000000FFFFFFFF),
np.int64(0x00000000FFFFFFFF),
np.int64(0x0000000100000001),
np.int64(0x0000000100000002),
np.int64(0x0000000100000003),
np.int64(0x0000000200000001),
np.int64(0x0000000200000002),
np.int64(0x0000000200000003),
np.int64(0x0000000300000001),
np.int64(0x0000000300000002),
np.int64(0x0000000300000003),
np.int64(0x00000000FFFFFFFF),
np.int64(-0x7FFFFFFF00000001),
np.int64(0x00000000FFFFFFFE),
np.int64(0x00000000FFFFFFFF),
np.int64(-0x7FFFFFFF00000002),
np.int64(-0x7FFFFFFF00000001),
np.int64(0x00000000FFFFFFFF),
np.int64(-0x7FFFFFFF00000001),
np.int64(-2),
np.int64(-1)
],
dtype=np.int64)
expected = np.array([op(l, r) for l, r in zip(lhs, rhs)], dtype=np.bool)
self._testBinary(op, lhs, rhs, expected=expected)
def testBroadcasting(self):
"""Tests broadcasting behavior of an operator."""
for dtype in self.numeric_types:
self._testBinary(
math_ops.add,
np.array(3, dtype=dtype),
np.array([10, 20], dtype=dtype),
expected=np.array([13, 23], dtype=dtype))
self._testBinary(
math_ops.add,
np.array([10, 20], dtype=dtype),
np.array(4, dtype=dtype),
expected=np.array([14, 24], dtype=dtype))
# [1,3] x [4,1] => [4,3]
self._testBinary(
math_ops.add,
np.array([[10, 20, 30]], dtype=dtype),
np.array([[1], [2], [3], [4]], dtype=dtype),
expected=np.array(
[[11, 21, 31], [12, 22, 32], [13, 23, 33], [14, 24, 34]],
dtype=dtype))
# [3] * [4,1] => [4,3]
self._testBinary(
math_ops.add,
np.array([10, 20, 30], dtype=dtype),
np.array([[1], [2], [3], [4]], dtype=dtype),
expected=np.array(
[[11, 21, 31], [12, 22, 32], [13, 23, 33], [14, 24, 34]],
dtype=dtype))
def testFill(self):
for dtype in self.numeric_types:
self._testBinary(
array_ops.fill,
np.array([], dtype=np.int32),
dtype(-42),
expected=dtype(-42))
self._testBinary(
array_ops.fill,
np.array([1, 2], dtype=np.int32),
dtype(7),
expected=np.array([[7, 7]], dtype=dtype))
self._testBinary(
array_ops.fill,
np.array([3, 2], dtype=np.int32),
dtype(50),
expected=np.array([[50, 50], [50, 50], [50, 50]], dtype=dtype))
# Helper method used by testMatMul, testSparseMatMul, testBatchMatMul below.
def _testMatMul(self, op):
for dtype in self.float_types:
self._testBinary(
op,
np.array([[-0.25]], dtype=dtype),
np.array([[8]], dtype=dtype),
expected=np.array([[-2]], dtype=dtype))
self._testBinary(
op,
np.array([[100, 10, 0.5]], dtype=dtype),
np.array([[1, 3], [2, 5], [6, 8]], dtype=dtype),
expected=np.array([[123, 354]], dtype=dtype))
self._testBinary(
op,
np.array([[1, 3], [2, 5], [6, 8]], dtype=dtype),
np.array([[100], [10]], dtype=dtype),
expected=np.array([[130], [250], [680]], dtype=dtype))
self._testBinary(
op,
np.array([[1000, 100], [10, 1]], dtype=dtype),
np.array([[1, 2], [3, 4]], dtype=dtype),
expected=np.array([[1300, 2400], [13, 24]], dtype=dtype))
self._testBinary(
op,
np.array([], dtype=dtype).reshape((2, 0)),
np.array([], dtype=dtype).reshape((0, 3)),
expected=np.array([[0, 0, 0], [0, 0, 0]], dtype=dtype))
def testMatMul(self):
self._testMatMul(math_ops.matmul)
# TODO(phawkins): failing on GPU, no registered kernel.
def DISABLED_testSparseMatMul(self):
# Binary wrappers for sparse_matmul with different hints
def SparseMatmulWrapperTF(a, b):
return math_ops.sparse_matmul(a, b, a_is_sparse=True)
def SparseMatmulWrapperFT(a, b):
return math_ops.sparse_matmul(a, b, b_is_sparse=True)
def SparseMatmulWrapperTT(a, b):
return math_ops.sparse_matmul(a, b, a_is_sparse=True, b_is_sparse=True)
self._testMatMul(math_ops.sparse_matmul)
self._testMatMul(SparseMatmulWrapperTF)
self._testMatMul(SparseMatmulWrapperFT)
self._testMatMul(SparseMatmulWrapperTT)
def testBatchMatMul(self):
# Same tests as for tf.matmul above.
self._testMatMul(math_ops.matmul)
# Tests with batches of matrices.
self._testBinary(
math_ops.matmul,
np.array([[[-0.25]]], dtype=np.float32),
np.array([[[8]]], dtype=np.float32),
expected=np.array([[[-2]]], dtype=np.float32))
self._testBinary(
math_ops.matmul,
np.array([[[-0.25]], [[4]]], dtype=np.float32),
np.array([[[8]], [[2]]], dtype=np.float32),
expected=np.array([[[-2]], [[8]]], dtype=np.float32))
self._testBinary(
math_ops.matmul,
np.array(
[[[[7, 13], [10, 1]], [[2, 0.25], [20, 2]]],
[[[3, 5], [30, 3]], [[0.75, 1], [40, 4]]]],
dtype=np.float32),
np.array(
[[[[1, 2], [3, 4]], [[5, 6], [7, 8]]], [[[11, 22], [33, 44]],
[[55, 66], [77, 88]]]],
dtype=np.float32),
expected=np.array(
[[[[46, 66], [13, 24]], [[11.75, 14], [114, 136]]],
[[[198, 286], [429, 792]], [[118.25, 137.5], [2508, 2992]]]],
dtype=np.float32))
self._testBinary(
math_ops.matmul,
np.array([], dtype=np.float32).reshape((2, 2, 0)),
np.array([], dtype=np.float32).reshape((2, 0, 3)),
expected=np.array(
[[[0, 0, 0], [0, 0, 0]], [[0, 0, 0], [0, 0, 0]]],
dtype=np.float32))
self._testBinary(
math_ops.matmul,
np.array([], dtype=np.float32).reshape((0, 2, 4)),
np.array([], dtype=np.float32).reshape((0, 4, 3)),
expected=np.array([], dtype=np.float32).reshape(0, 2, 3))
# Regression test for b/31472796.
if hasattr(np, "matmul"):
x = np.arange(0, 3 * 5 * 2 * 7, dtype=np.float32).reshape((3, 5, 2, 7))
self._testBinary(
lambda x, y: math_ops.matmul(x, y, adjoint_b=True),
x, x,
expected=np.matmul(x, x.transpose([0, 1, 3, 2])))
def testExpandDims(self):
for dtype in self.numeric_types:
self._testBinary(
array_ops.expand_dims,
dtype(7),
np.int32(0),
expected=np.array([7], dtype=dtype))
self._testBinary(
array_ops.expand_dims,
np.array([42], dtype=dtype),
np.int32(0),
expected=np.array([[42]], dtype=dtype))
self._testBinary(
array_ops.expand_dims,
np.array([], dtype=dtype),
np.int32(0),
expected=np.array([[]], dtype=dtype))
self._testBinary(
array_ops.expand_dims,
np.array([[[1, 2], [3, 4]]], dtype=dtype),
np.int32(0),
expected=np.array([[[[1, 2], [3, 4]]]], dtype=dtype))
self._testBinary(
array_ops.expand_dims,
np.array([[[1, 2], [3, 4]]], dtype=dtype),
np.int32(1),
expected=np.array([[[[1, 2], [3, 4]]]], dtype=dtype))
self._testBinary(
array_ops.expand_dims,
np.array([[[1, 2], [3, 4]]], dtype=dtype),
np.int32(2),
expected=np.array([[[[1, 2]], [[3, 4]]]], dtype=dtype))
self._testBinary(
array_ops.expand_dims,
np.array([[[1, 2], [3, 4]]], dtype=dtype),
np.int32(3),
expected=np.array([[[[1], [2]], [[3], [4]]]], dtype=dtype))
def testPad(self):
for dtype in self.numeric_types:
self._testBinary(
array_ops.pad,
np.array(
[[1, 2, 3], [4, 5, 6]], dtype=dtype),
np.array(
[[1, 2], [2, 1]], dtype=np.int32),
expected=np.array(
[[0, 0, 0, 0, 0, 0],
[0, 0, 1, 2, 3, 0],
[0, 0, 4, 5, 6, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0]],
dtype=dtype))
self._testBinary(
lambda x, y: array_ops.pad(x, y, constant_values=7),
np.array(
[[1, 2, 3], [4, 5, 6]], dtype=dtype),
np.array(
[[0, 3], [2, 1]], dtype=np.int32),
expected=np.array(
[[7, 7, 1, 2, 3, 7],
[7, 7, 4, 5, 6, 7],
[7, 7, 7, 7, 7, 7],
[7, 7, 7, 7, 7, 7],
[7, 7, 7, 7, 7, 7]],
dtype=dtype))
def testSymmetricMirrorPad(self):
mirror_pad = lambda t, paddings: array_ops.pad(t, paddings, "SYMMETRIC")
for dtype in self.numeric_types:
self._testBinary(
mirror_pad,
np.array(
[
[1, 2, 3], #
[4, 5, 6], #
],
dtype=dtype),
np.array([[
2,
2,
], [3, 3]], dtype=np.int32),
expected=np.array(
[
[6, 5, 4, 4, 5, 6, 6, 5, 4], #
[3, 2, 1, 1, 2, 3, 3, 2, 1], #
[3, 2, 1, 1, 2, 3, 3, 2, 1], #
[6, 5, 4, 4, 5, 6, 6, 5, 4], #
[6, 5, 4, 4, 5, 6, 6, 5, 4], #
[3, 2, 1, 1, 2, 3, 3, 2, 1], #
],
dtype=dtype))
self._testBinary(
mirror_pad,
np.array([[1, 2, 3], [4, 5, 6]], dtype=dtype),
np.array([[0, 0], [0, 0]], dtype=np.int32),
expected=np.array([[1, 2, 3], [4, 5, 6]], dtype=dtype))
def testReflectMirrorPad(self):
mirror_pad = lambda t, paddings: array_ops.pad(t, paddings, "REFLECT")
for dtype in self.numeric_types:
self._testBinary(
mirror_pad,
np.array(
[
[1, 2, 3], #
[4, 5, 6], #
],
dtype=dtype),
np.array([[
1,
1,
], [2, 2]], dtype=np.int32),
expected=np.array(
[
[6, 5, 4, 5, 6, 5, 4], #
[3, 2, 1, 2, 3, 2, 1], #
[6, 5, 4, 5, 6, 5, 4], #
[3, 2, 1, 2, 3, 2, 1]
],
dtype=dtype))
self._testBinary(
mirror_pad,
np.array([[1, 2, 3], [4, 5, 6]], dtype=dtype),
np.array([[0, 0], [0, 0]], dtype=np.int32),
expected=np.array([[1, 2, 3], [4, 5, 6]], dtype=dtype))
self._testBinary(
mirror_pad,
np.array(
[
[1, 2, 3], #
[4, 5, 6], #
[7, 8, 9]
],
dtype=dtype),
np.array([[2, 2], [0, 0]], dtype=np.int32),
expected=np.array(
[
[7, 8, 9], #
[4, 5, 6], #
[1, 2, 3], #
[4, 5, 6], #
[7, 8, 9], #
[4, 5, 6], #
[1, 2, 3]
],
dtype=dtype))
self._testBinary(
mirror_pad,
np.array(
[
[[1, 2, 3], [4, 5, 6]],
[[7, 8, 9], [10, 11, 12]],
], dtype=dtype),
np.array([[0, 0], [1, 1], [1, 1]], dtype=np.int32),
expected=np.array(
[
[
[5, 4, 5, 6, 5], #
[2, 1, 2, 3, 2], #
[5, 4, 5, 6, 5], #
[2, 1, 2, 3, 2], #
],
[
[11, 10, 11, 12, 11], #
[8, 7, 8, 9, 8], #
[11, 10, 11, 12, 11], #
[8, 7, 8, 9, 8], #
]
],
dtype=dtype))
def testReshape(self):
for dtype in self.numeric_types:
self._testBinary(
array_ops.reshape,
np.array([], dtype=dtype),
np.array([0, 4], dtype=np.int32),
expected=np.zeros(shape=[0, 4], dtype=dtype))
self._testBinary(
array_ops.reshape,
np.array([0, 1, 2, 3, 4, 5], dtype=dtype),
np.array([2, 3], dtype=np.int32),
expected=np.array([[0, 1, 2], [3, 4, 5]], dtype=dtype))
self._testBinary(
array_ops.reshape,
np.array([0, 1, 2, 3, 4, 5], dtype=dtype),
np.array([3, 2], dtype=np.int32),
expected=np.array([[0, 1], [2, 3], [4, 5]], dtype=dtype))
self._testBinary(
array_ops.reshape,
np.array([0, 1, 2, 3, 4, 5], dtype=dtype),
np.array([-1, 6], dtype=np.int32),
expected=np.array([[0, 1, 2, 3, 4, 5]], dtype=dtype))
self._testBinary(
array_ops.reshape,
np.array([0, 1, 2, 3, 4, 5], dtype=dtype),
np.array([6, -1], dtype=np.int32),
expected=np.array([[0], [1], [2], [3], [4], [5]], dtype=dtype))
self._testBinary(
array_ops.reshape,
np.array([0, 1, 2, 3, 4, 5], dtype=dtype),
np.array([2, -1], dtype=np.int32),
expected=np.array([[0, 1, 2], [3, 4, 5]], dtype=dtype))
self._testBinary(
array_ops.reshape,
np.array([0, 1, 2, 3, 4, 5], dtype=dtype),
np.array([-1, 3], dtype=np.int32),
expected=np.array([[0, 1, 2], [3, 4, 5]], dtype=dtype))
def testSplit(self):
for dtype in self.numeric_types:
for axis in [0, -3]:
self._testBinary(
lambda x, y: array_ops.split(value=y, num_or_size_splits=3, axis=x),
np.int32(axis),
np.array([[[1], [2]], [[3], [4]], [[5], [6]]],
dtype=dtype),
expected=[
np.array([[[1], [2]]], dtype=dtype),
np.array([[[3], [4]]], dtype=dtype),
np.array([[[5], [6]]], dtype=dtype),
],
equality_test=self.ListsAreClose)
for axis in [1, -2]:
self._testBinary(
lambda x, y: array_ops.split(value=y, num_or_size_splits=2, axis=x),
np.int32(axis),
np.array([[[1], [2]], [[3], [4]], [[5], [6]]],
dtype=dtype),
expected=[
np.array([[[1]], [[3]], [[5]]], dtype=dtype),
np.array([[[2]], [[4]], [[6]]], dtype=dtype),
],
equality_test=self.ListsAreClose)
def splitvOp(x, y): # pylint: disable=invalid-name
return array_ops.split(value=y, num_or_size_splits=[2, 3], axis=x)
for axis in [1, -1]:
self._testBinary(
splitvOp,
np.int32(axis),
np.array([[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]],
dtype=dtype),
expected=[
np.array([[0, 1], [5, 6]], dtype=dtype),
np.array([[2, 3, 4], [7, 8, 9]], dtype=dtype),
],
equality_test=self.ListsAreClose)
def testTile(self):
for dtype in self.numeric_types:
self._testBinary(
array_ops.tile,
np.array([[6], [3], [4]], dtype=dtype),
np.array([2, 0], dtype=np.int32),
expected=np.empty([6, 0], dtype=dtype))
self._testBinary(
array_ops.tile,
np.array([[6, 3, 4]], dtype=dtype),
np.array([2, 0], dtype=np.int32),
expected=np.empty([2, 0], dtype=dtype))
self._testBinary(
array_ops.tile,
np.array([[6]], dtype=dtype),
np.array([1, 2], dtype=np.int32),
expected=np.array([[6, 6]], dtype=dtype))
self._testBinary(
array_ops.tile,
np.array([[1], [2]], dtype=dtype),
np.array([1, 2], dtype=np.int32),
expected=np.array([[1, 1], [2, 2]], dtype=dtype))
self._testBinary(
array_ops.tile,
np.array([[1, 2], [3, 4]], dtype=dtype),
np.array([3, 2], dtype=np.int32),
expected=np.array(
[[1, 2, 1, 2],
[3, 4, 3, 4],
[1, 2, 1, 2],
[3, 4, 3, 4],
[1, 2, 1, 2],
[3, 4, 3, 4]],
dtype=dtype))
self._testBinary(
array_ops.tile,
np.array([[1, 2], [3, 4]], dtype=dtype),
np.array([1, 1], dtype=np.int32),
expected=np.array(
[[1, 2],
[3, 4]],
dtype=dtype))
self._testBinary(
array_ops.tile,
np.array([[1, 2]], dtype=dtype),
np.array([3, 1], dtype=np.int32),
expected=np.array(
[[1, 2],
[1, 2],
[1, 2]],
dtype=dtype))
def testTranspose(self):
for dtype in self.numeric_types:
self._testBinary(
array_ops.transpose,
np.zeros(shape=[1, 0, 4], dtype=dtype),
np.array([1, 2, 0], dtype=np.int32),
expected=np.zeros(shape=[0, 4, 1], dtype=dtype))
self._testBinary(
array_ops.transpose,
np.array([[1, 2], [3, 4]], dtype=dtype),
np.array([0, 1], dtype=np.int32),
expected=np.array([[1, 2], [3, 4]], dtype=dtype))
self._testBinary(
array_ops.transpose,
np.array([[1, 2], [3, 4]], dtype=dtype),
np.array([1, 0], dtype=np.int32),
expected=np.array([[1, 3], [2, 4]], dtype=dtype))
def testConjugateTranspose(self):
for dtype in self.complex_types:
self._testBinary(
array_ops.conjugate_transpose,
np.zeros(shape=[1, 0, 4], dtype=dtype),
np.array([1, 2, 0], dtype=np.int32),
expected=np.zeros(shape=[0, 4, 1], dtype=dtype))
self._testBinary(
array_ops.conjugate_transpose,
np.array([[1 - 1j, 2 + 2j], [3 - 3j, 4 + 4j]], dtype=dtype),
np.array([0, 1], dtype=np.int32),
expected=np.array([[1 + 1j, 2 - 2j], [3 + 3j, 4 - 4j]], dtype=dtype))
self._testBinary(
array_ops.conjugate_transpose,
np.array([[1 - 1j, 2 + 2j], [3 - 3j, 4 + 4j]], dtype=dtype),
np.array([1, 0], dtype=np.int32),
expected=np.array([[1 + 1j, 3 + 3j], [2 - 2j, 4 - 4j]], dtype=dtype))
def testCross(self):
for dtype in self.float_types:
self._testBinary(
gen_math_ops.cross,
np.zeros((4, 3), dtype=dtype),
np.zeros((4, 3), dtype=dtype),
expected=np.zeros((4, 3), dtype=dtype))
self._testBinary(
gen_math_ops.cross,
np.array([1, 2, 3], dtype=dtype),
np.array([4, 5, 6], dtype=dtype),
expected=np.array([-3, 6, -3], dtype=dtype))
self._testBinary(
gen_math_ops.cross,
np.array([[1, 2, 3], [10, 11, 12]], dtype=dtype),
np.array([[4, 5, 6], [40, 50, 60]], dtype=dtype),
expected=np.array([[-3, 6, -3], [60, -120, 60]], dtype=dtype))
def testBroadcastArgs(self):
self._testBinary(array_ops.broadcast_dynamic_shape,
np.array([2, 3, 5], dtype=np.int32),
np.array([1], dtype=np.int32),
expected=np.array([2, 3, 5], dtype=np.int32))
self._testBinary(array_ops.broadcast_dynamic_shape,
np.array([1], dtype=np.int32),
np.array([2, 3, 5], dtype=np.int32),
expected=np.array([2, 3, 5], dtype=np.int32))
self._testBinary(array_ops.broadcast_dynamic_shape,
np.array([2, 3, 5], dtype=np.int32),
np.array([5], dtype=np.int32),
expected=np.array([2, 3, 5], dtype=np.int32))
self._testBinary(array_ops.broadcast_dynamic_shape,
np.array([5], dtype=np.int32),
np.array([2, 3, 5], dtype=np.int32),
expected=np.array([2, 3, 5], dtype=np.int32))
self._testBinary(array_ops.broadcast_dynamic_shape,
np.array([2, 3, 5], dtype=np.int32),
np.array([3, 5], dtype=np.int32),
expected=np.array([2, 3, 5], dtype=np.int32))
self._testBinary(array_ops.broadcast_dynamic_shape,
np.array([3, 5], dtype=np.int32),
np.array([2, 3, 5], dtype=np.int32),
expected=np.array([2, 3, 5], dtype=np.int32))
self._testBinary(array_ops.broadcast_dynamic_shape,
np.array([2, 3, 5], dtype=np.int32),
np.array([3, 1], dtype=np.int32),
expected=np.array([2, 3, 5], dtype=np.int32))
self._testBinary(array_ops.broadcast_dynamic_shape,
np.array([3, 1], dtype=np.int32),
np.array([2, 3, 5], dtype=np.int32),
expected=np.array([2, 3, 5], dtype=np.int32))
self._testBinary(array_ops.broadcast_dynamic_shape,
np.array([2, 1, 5], dtype=np.int32),
np.array([3, 1], dtype=np.int32),
expected=np.array([2, 3, 5], dtype=np.int32))
self._testBinary(array_ops.broadcast_dynamic_shape,
np.array([3, 1], dtype=np.int32),
np.array([2, 1, 5], dtype=np.int32),
expected=np.array([2, 3, 5], dtype=np.int32))
with self.assertRaisesWithPredicateMatch(errors.InvalidArgumentError,
"Incompatible shapes"):
self._testBinary(array_ops.broadcast_dynamic_shape,
np.array([1, 2, 3], dtype=np.int32),
np.array([4, 5, 6], dtype=np.int32),
expected=None)
def testMatrixSetDiag(self):
for dtype in self.numeric_types:
# Square
self._testBinary(
array_ops.matrix_set_diag,
np.array([[0.0, 1.0, 0.0], [1.0, 0.0, 1.0], [1.0, 1.0, 1.0]],
dtype=dtype),
np.array([1.0, 2.0, 3.0], dtype=dtype),
expected=np.array([[1.0, 1.0, 0.0], [1.0, 2.0, 1.0], [1.0, 1.0, 3.0]],
dtype=dtype))
self._testBinary(
array_ops.matrix_set_diag,
np.array([[[1.0, 0.0, 3.0], [0.0, 2.0, 0.0], [1.0, 0.0, 3.0]],
[[4.0, 0.0, 4.0], [0.0, 5.0, 0.0], [2.0, 0.0, 6.0]]],
dtype=dtype),
np.array([[-1.0, 0.0, -3.0], [-4.0, -5.0, -6.0]], dtype=dtype),
expected=np.array(
[[[-1.0, 0.0, 3.0], [0.0, 0.0, 0.0], [1.0, 0.0, -3.0]],
[[-4.0, 0.0, 4.0], [0.0, -5.0, 0.0], [2.0, 0.0, -6.0]]],
dtype=dtype))
# Rectangular
self._testBinary(
array_ops.matrix_set_diag,
np.array([[0.0, 1.0, 0.0], [1.0, 0.0, 1.0]], dtype=dtype),
np.array([3.0, 4.0], dtype=dtype),
expected=np.array([[3.0, 1.0, 0.0], [1.0, 4.0, 1.0]], dtype=dtype))
self._testBinary(
array_ops.matrix_set_diag,
np.array([[0.0, 1.0], [1.0, 0.0], [1.0, 1.0]], dtype=dtype),
np.array([3.0, 4.0], dtype=dtype),
expected=np.array([[3.0, 1.0], [1.0, 4.0], [1.0, 1.0]], dtype=dtype))
self._testBinary(
array_ops.matrix_set_diag,
np.array([[[1.0, 0.0, 3.0], [0.0, 2.0, 0.0]],
[[4.0, 0.0, 4.0], [0.0, 5.0, 0.0]]], dtype=dtype),
np.array([[-1.0, -2.0], [-4.0, -5.0]],
dtype=dtype),
expected=np.array([[[-1.0, 0.0, 3.0], [0.0, -2.0, 0.0]],
[[-4.0, 0.0, 4.0], [0.0, -5.0, 0.0]]],
dtype=dtype))
def testBroadcastTo(self):
for dtype in self.all_types:
x = np.random.randint(0, high=100, size=[2, 3])
self._testBinary(
array_ops.broadcast_to,
x,
np.array([2, 3], dtype=np.int32),
expected=x)
self._testBinary(
array_ops.broadcast_to,
x,
np.array([6, 6], dtype=np.int32),
expected=np.tile(x, [3, 2]))
self._testBinary(
array_ops.broadcast_to,
x,
np.array([7, 4, 3], dtype=np.int32),
expected=np.tile(x, [7, 2, 1]))
self._testBinary(
array_ops.broadcast_to,
x,
np.array([7, 0, 3], dtype=np.int32),
expected=np.zeros([7, 0, 3], dtype=dtype))
self._testBinary(
array_ops.broadcast_to,
x,
np.array([7, 1, 2, 9], dtype=np.int32),
expected=np.tile(x, [7, 1, 1, 3]))
self._testBinary(
array_ops.broadcast_to,
np.zeros([2, 0], dtype=dtype),
np.array([4, 0], dtype=np.int32),
expected=np.zeros([4, 0], dtype=dtype))
if __name__ == "__main__":
googletest.main()
|
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import json
from heat_integrationtests.functional import functional_base
test_template_one_resource = {
'heat_template_version': '2013-05-23',
'description': 'Test template to create one instance.',
'resources': {
'test1': {
'type': 'OS::Heat::TestResource',
'properties': {
'value': 'Test1',
'fail': False,
'update_replace': False,
'wait_secs': 1,
'action_wait_secs': {'create': 1},
'client_name': 'nova',
'entity_name': 'servers',
}
}
}
}
test_template_two_resource = {
'heat_template_version': '2013-05-23',
'description': 'Test template to create two instance.',
'resources': {
'test1': {
'type': 'OS::Heat::TestResource',
'properties': {
'value': 'Test1',
'fail': False,
'update_replace': False,
'wait_secs': 0,
'action_wait_secs': {'update': 1}
}
},
'test2': {
'type': 'OS::Heat::TestResource',
'properties': {
'value': 'Test1',
'fail': False,
'update_replace': False,
'wait_secs': 0
}
}
}
}
def _change_rsrc_properties(template, rsrcs, values):
modified_template = copy.deepcopy(template)
for rsrc_name in rsrcs:
rsrc_prop = modified_template['resources'][
rsrc_name]['properties']
for prop in rsrc_prop:
if prop in values:
rsrc_prop[prop] = values[prop]
return modified_template
class CreateStackTest(functional_base.FunctionalTestsBase):
def setUp(self):
super(CreateStackTest, self).setUp()
def test_create_rollback(self):
values = {'fail': True, 'value': 'test_create_rollback'}
template = _change_rsrc_properties(test_template_one_resource,
['test1'], values)
self.stack_create(
template=template,
expected_status='ROLLBACK_COMPLETE',
disable_rollback=False)
class UpdateStackTest(functional_base.FunctionalTestsBase):
provider_template = {
'heat_template_version': '2013-05-23',
'description': 'foo',
'resources': {
'test1': {
'type': 'My::TestResource'
}
}
}
provider_group_template = '''
heat_template_version: 2013-05-23
parameters:
count:
type: number
default: 2
resources:
test_group:
type: OS::Heat::ResourceGroup
properties:
count: {get_param: count}
resource_def:
type: My::TestResource
'''
update_userdata_template = '''
heat_template_version: 2014-10-16
parameters:
flavor:
type: string
user_data:
type: string
image:
type: string
network:
type: string
resources:
server:
type: OS::Nova::Server
properties:
image: {get_param: image}
flavor: {get_param: flavor}
networks: [{network: {get_param: network} }]
user_data_format: SOFTWARE_CONFIG
user_data: {get_param: user_data}
'''
fail_param_template = '''
heat_template_version: 2014-10-16
parameters:
do_fail:
type: boolean
default: False
resources:
aresource:
type: OS::Heat::TestResource
properties:
value: Test
fail: {get_param: do_fail}
wait_secs: 1
'''
def setUp(self):
super(UpdateStackTest, self).setUp()
def test_stack_update_nochange(self):
template = _change_rsrc_properties(test_template_one_resource,
['test1'],
{'value': 'test_no_change'})
stack_identifier = self.stack_create(
template=template)
expected_resources = {'test1': 'OS::Heat::TestResource'}
self.assertEqual(expected_resources,
self.list_resources(stack_identifier))
# Update with no changes, resources should be unchanged
self.update_stack(stack_identifier, template)
self.assertEqual(expected_resources,
self.list_resources(stack_identifier))
def test_stack_in_place_update(self):
template = _change_rsrc_properties(test_template_one_resource,
['test1'],
{'value': 'test_in_place'})
stack_identifier = self.stack_create(
template=template)
expected_resources = {'test1': 'OS::Heat::TestResource'}
self.assertEqual(expected_resources,
self.list_resources(stack_identifier))
resource = self.client.resources.list(stack_identifier)
initial_phy_id = resource[0].physical_resource_id
tmpl_update = _change_rsrc_properties(
test_template_one_resource, ['test1'],
{'value': 'test_in_place_update'})
# Update the Value
self.update_stack(stack_identifier, tmpl_update)
resource = self.client.resources.list(stack_identifier)
# By default update_in_place
self.assertEqual(initial_phy_id,
resource[0].physical_resource_id)
def test_stack_update_replace(self):
template = _change_rsrc_properties(test_template_one_resource,
['test1'],
{'value': 'test_replace'})
stack_identifier = self.stack_create(
template=template)
expected_resources = {'test1': 'OS::Heat::TestResource'}
self.assertEqual(expected_resources,
self.list_resources(stack_identifier))
resource = self.client.resources.list(stack_identifier)
initial_phy_id = resource[0].physical_resource_id
# Update the value and also set update_replace prop
tmpl_update = _change_rsrc_properties(
test_template_one_resource, ['test1'],
{'value': 'test_in_place_update', 'update_replace': True})
self.update_stack(stack_identifier, tmpl_update)
resource = self.client.resources.list(stack_identifier)
# update Replace
self.assertNotEqual(initial_phy_id,
resource[0].physical_resource_id)
def test_stack_update_add_remove(self):
template = _change_rsrc_properties(test_template_one_resource,
['test1'],
{'value': 'test_add_remove'})
stack_identifier = self.stack_create(
template=template)
initial_resources = {'test1': 'OS::Heat::TestResource'}
self.assertEqual(initial_resources,
self.list_resources(stack_identifier))
tmpl_update = _change_rsrc_properties(
test_template_two_resource, ['test1', 'test2'],
{'value': 'test_add_remove_update'})
# Add one resource via a stack update
self.update_stack(stack_identifier, tmpl_update)
updated_resources = {'test1': 'OS::Heat::TestResource',
'test2': 'OS::Heat::TestResource'}
self.assertEqual(updated_resources,
self.list_resources(stack_identifier))
# Then remove it by updating with the original template
self.update_stack(stack_identifier, template)
self.assertEqual(initial_resources,
self.list_resources(stack_identifier))
def test_stack_update_rollback(self):
template = _change_rsrc_properties(test_template_one_resource,
['test1'],
{'value': 'test_update_rollback'})
stack_identifier = self.stack_create(
template=template)
initial_resources = {'test1': 'OS::Heat::TestResource'}
self.assertEqual(initial_resources,
self.list_resources(stack_identifier))
tmpl_update = _change_rsrc_properties(
test_template_two_resource, ['test1', 'test2'],
{'value': 'test_update_rollback', 'fail': True})
# stack update, also set failure
self.update_stack(stack_identifier, tmpl_update,
expected_status='ROLLBACK_COMPLETE',
disable_rollback=False)
# since stack update failed only the original resource is present
updated_resources = {'test1': 'OS::Heat::TestResource'}
self.assertEqual(updated_resources,
self.list_resources(stack_identifier))
def test_stack_update_from_failed(self):
# Prove it's possible to update from an UPDATE_FAILED state
template = _change_rsrc_properties(test_template_one_resource,
['test1'],
{'value': 'test_update_failed'})
stack_identifier = self.stack_create(
template=template)
initial_resources = {'test1': 'OS::Heat::TestResource'}
self.assertEqual(initial_resources,
self.list_resources(stack_identifier))
tmpl_update = _change_rsrc_properties(
test_template_one_resource, ['test1'], {'fail': True})
# Update with bad template, we should fail
self.update_stack(stack_identifier, tmpl_update,
expected_status='UPDATE_FAILED')
# but then passing a good template should succeed
self.update_stack(stack_identifier, test_template_two_resource)
updated_resources = {'test1': 'OS::Heat::TestResource',
'test2': 'OS::Heat::TestResource'}
self.assertEqual(updated_resources,
self.list_resources(stack_identifier))
def test_stack_update_provider(self):
template = _change_rsrc_properties(
test_template_one_resource, ['test1'],
{'value': 'test_provider_template'})
files = {'provider.template': json.dumps(template)}
env = {'resource_registry':
{'My::TestResource': 'provider.template'}}
stack_identifier = self.stack_create(
template=self.provider_template,
files=files,
environment=env
)
initial_resources = {'test1': 'My::TestResource'}
self.assertEqual(initial_resources,
self.list_resources(stack_identifier))
# Prove the resource is backed by a nested stack, save the ID
nested_identifier = self.assert_resource_is_a_stack(stack_identifier,
'test1')
nested_id = nested_identifier.split('/')[-1]
# Then check the expected resources are in the nested stack
nested_resources = {'test1': 'OS::Heat::TestResource'}
self.assertEqual(nested_resources,
self.list_resources(nested_identifier))
tmpl_update = _change_rsrc_properties(
test_template_two_resource, ['test1', 'test2'],
{'value': 'test_provider_template'})
# Add one resource via a stack update by changing the nested stack
files['provider.template'] = json.dumps(tmpl_update)
self.update_stack(stack_identifier, self.provider_template,
environment=env, files=files)
# Parent resources should be unchanged and the nested stack
# should have been updated in-place without replacement
self.assertEqual(initial_resources,
self.list_resources(stack_identifier))
rsrc = self.client.resources.get(stack_identifier, 'test1')
self.assertEqual(rsrc.physical_resource_id, nested_id)
# Then check the expected resources are in the nested stack
nested_resources = {'test1': 'OS::Heat::TestResource',
'test2': 'OS::Heat::TestResource'}
self.assertEqual(nested_resources,
self.list_resources(nested_identifier))
def test_stack_update_alias_type(self):
env = {'resource_registry':
{'My::TestResource': 'OS::Heat::RandomString',
'My::TestResource2': 'OS::Heat::RandomString'}}
stack_identifier = self.stack_create(
template=self.provider_template,
environment=env
)
p_res = self.client.resources.get(stack_identifier, 'test1')
self.assertEqual('My::TestResource', p_res.resource_type)
initial_resources = {'test1': 'My::TestResource'}
self.assertEqual(initial_resources,
self.list_resources(stack_identifier))
res = self.client.resources.get(stack_identifier, 'test1')
# Modify the type of the resource alias to My::TestResource2
tmpl_update = copy.deepcopy(self.provider_template)
tmpl_update['resources']['test1']['type'] = 'My::TestResource2'
self.update_stack(stack_identifier, tmpl_update, environment=env)
res_a = self.client.resources.get(stack_identifier, 'test1')
self.assertEqual(res.physical_resource_id, res_a.physical_resource_id)
self.assertEqual(res.attributes['value'], res_a.attributes['value'])
def test_stack_update_alias_changes(self):
env = {'resource_registry':
{'My::TestResource': 'OS::Heat::RandomString'}}
stack_identifier = self.stack_create(
template=self.provider_template,
environment=env
)
p_res = self.client.resources.get(stack_identifier, 'test1')
self.assertEqual('My::TestResource', p_res.resource_type)
initial_resources = {'test1': 'My::TestResource'}
self.assertEqual(initial_resources,
self.list_resources(stack_identifier))
res = self.client.resources.get(stack_identifier, 'test1')
# Modify the resource alias to point to a different type
env = {'resource_registry':
{'My::TestResource': 'OS::Heat::TestResource'}}
self.update_stack(stack_identifier, template=self.provider_template,
environment=env)
res_a = self.client.resources.get(stack_identifier, 'test1')
self.assertNotEqual(res.physical_resource_id,
res_a.physical_resource_id)
def test_stack_update_provider_type(self):
template = _change_rsrc_properties(
test_template_one_resource, ['test1'],
{'value': 'test_provider_template'})
files = {'provider.template': json.dumps(template)}
env = {'resource_registry':
{'My::TestResource': 'provider.template',
'My::TestResource2': 'provider.template'}}
stack_identifier = self.stack_create(
template=self.provider_template,
files=files,
environment=env
)
p_res = self.client.resources.get(stack_identifier, 'test1')
self.assertEqual('My::TestResource', p_res.resource_type)
initial_resources = {'test1': 'My::TestResource'}
self.assertEqual(initial_resources,
self.list_resources(stack_identifier))
# Prove the resource is backed by a nested stack, save the ID
nested_identifier = self.assert_resource_is_a_stack(stack_identifier,
'test1')
nested_id = nested_identifier.split('/')[-1]
# Then check the expected resources are in the nested stack
nested_resources = {'test1': 'OS::Heat::TestResource'}
self.assertEqual(nested_resources,
self.list_resources(nested_identifier))
n_res = self.client.resources.get(nested_identifier, 'test1')
# Modify the type of the provider resource to My::TestResource2
tmpl_update = copy.deepcopy(self.provider_template)
tmpl_update['resources']['test1']['type'] = 'My::TestResource2'
self.update_stack(stack_identifier, tmpl_update,
environment=env, files=files)
p_res = self.client.resources.get(stack_identifier, 'test1')
self.assertEqual('My::TestResource2', p_res.resource_type)
# Parent resources should be unchanged and the nested stack
# should have been updated in-place without replacement
self.assertEqual({u'test1': u'My::TestResource2'},
self.list_resources(stack_identifier))
rsrc = self.client.resources.get(stack_identifier, 'test1')
self.assertEqual(rsrc.physical_resource_id, nested_id)
# Then check the expected resources are in the nested stack
self.assertEqual(nested_resources,
self.list_resources(nested_identifier))
n_res2 = self.client.resources.get(nested_identifier, 'test1')
self.assertEqual(n_res.physical_resource_id,
n_res2.physical_resource_id)
def test_stack_update_provider_group(self):
"""Test two-level nested update."""
# Create a ResourceGroup (which creates a nested stack),
# containing provider resources (which create a nested
# stack), thus exercising an update which traverses
# two levels of nesting.
template = _change_rsrc_properties(
test_template_one_resource, ['test1'],
{'value': 'test_provider_group_template'})
files = {'provider.template': json.dumps(template)}
env = {'resource_registry':
{'My::TestResource': 'provider.template'}}
stack_identifier = self.stack_create(
template=self.provider_group_template,
files=files,
environment=env
)
initial_resources = {'test_group': 'OS::Heat::ResourceGroup'}
self.assertEqual(initial_resources,
self.list_resources(stack_identifier))
# Prove the resource is backed by a nested stack, save the ID
nested_identifier = self.assert_resource_is_a_stack(stack_identifier,
'test_group')
# Then check the expected resources are in the nested stack
nested_resources = {'0': 'My::TestResource',
'1': 'My::TestResource'}
self.assertEqual(nested_resources,
self.list_resources(nested_identifier))
for n_rsrc in nested_resources:
rsrc = self.client.resources.get(nested_identifier, n_rsrc)
provider_stack = self.client.stacks.get(rsrc.physical_resource_id)
provider_identifier = '%s/%s' % (provider_stack.stack_name,
provider_stack.id)
provider_resources = {u'test1': u'OS::Heat::TestResource'}
self.assertEqual(provider_resources,
self.list_resources(provider_identifier))
tmpl_update = _change_rsrc_properties(
test_template_two_resource, ['test1', 'test2'],
{'value': 'test_provider_group_template'})
# Add one resource via a stack update by changing the nested stack
files['provider.template'] = json.dumps(tmpl_update)
self.update_stack(stack_identifier, self.provider_group_template,
environment=env, files=files)
# Parent resources should be unchanged and the nested stack
# should have been updated in-place without replacement
self.assertEqual(initial_resources,
self.list_resources(stack_identifier))
# Resource group stack should also be unchanged (but updated)
nested_stack = self.client.stacks.get(nested_identifier)
self.assertEqual('UPDATE_COMPLETE', nested_stack.stack_status)
self.assertEqual(nested_resources,
self.list_resources(nested_identifier))
for n_rsrc in nested_resources:
rsrc = self.client.resources.get(nested_identifier, n_rsrc)
provider_stack = self.client.stacks.get(rsrc.physical_resource_id)
provider_identifier = '%s/%s' % (provider_stack.stack_name,
provider_stack.id)
provider_resources = {'test1': 'OS::Heat::TestResource',
'test2': 'OS::Heat::TestResource'}
self.assertEqual(provider_resources,
self.list_resources(provider_identifier))
def test_stack_update_with_replacing_userdata(self):
"""Test case for updating userdata of instance.
Confirm that we can update userdata of instance during updating stack
by the user of member role.
Make sure that a resource that inherits from StackUser can be deleted
during updating stack.
"""
if not self.conf.minimal_image_ref:
raise self.skipException("No minimal image configured to test")
if not self.conf.minimal_instance_type:
raise self.skipException("No flavor configured to test")
parms = {'flavor': self.conf.minimal_instance_type,
'image': self.conf.minimal_image_ref,
'network': self.conf.fixed_network_name,
'user_data': ''}
stack_identifier = self.stack_create(
template=self.update_userdata_template,
parameters=parms
)
parms_updated = parms
parms_updated['user_data'] = 'two'
self.update_stack(
stack_identifier,
template=self.update_userdata_template,
parameters=parms_updated)
def test_stack_update_provider_group_patch(self):
'''Test two-level nested update with PATCH'''
template = _change_rsrc_properties(
test_template_one_resource, ['test1'],
{'value': 'test_provider_group_template'})
files = {'provider.template': json.dumps(template)}
env = {'resource_registry':
{'My::TestResource': 'provider.template'}}
stack_identifier = self.stack_create(
template=self.provider_group_template,
files=files,
environment=env
)
initial_resources = {'test_group': 'OS::Heat::ResourceGroup'}
self.assertEqual(initial_resources,
self.list_resources(stack_identifier))
# Prove the resource is backed by a nested stack, save the ID
nested_identifier = self.assert_resource_is_a_stack(stack_identifier,
'test_group')
# Then check the expected resources are in the nested stack
nested_resources = {'0': 'My::TestResource',
'1': 'My::TestResource'}
self.assertEqual(nested_resources,
self.list_resources(nested_identifier))
# increase the count, pass only the paramter, no env or template
params = {'count': 3}
self.update_stack(stack_identifier, parameters=params, existing=True)
# Parent resources should be unchanged and the nested stack
# should have been updated in-place without replacement
self.assertEqual(initial_resources,
self.list_resources(stack_identifier))
# Resource group stack should also be unchanged (but updated)
nested_stack = self.client.stacks.get(nested_identifier)
self.assertEqual('UPDATE_COMPLETE', nested_stack.stack_status)
# Add a resource, as we should have added one
nested_resources['2'] = 'My::TestResource'
self.assertEqual(nested_resources,
self.list_resources(nested_identifier))
def test_stack_update_from_failed_patch(self):
'''Test PATCH update from a failed state.'''
# Start with empty template
stack_identifier = self.stack_create(
template='heat_template_version: 2014-10-16')
# Update with a good template, but bad parameter
self.update_stack(stack_identifier,
template=self.fail_param_template,
parameters={'do_fail': True},
expected_status='UPDATE_FAILED')
# PATCH update, only providing the parameter
self.update_stack(stack_identifier,
parameters={'do_fail': False},
existing=True)
self.assertEqual({u'aresource': u'OS::Heat::TestResource'},
self.list_resources(stack_identifier))
def test_stack_update_with_new_env(self):
"""Update handles new resource types in the environment.
If a resource type appears during an update and the update fails,
retrying the update is able to find the type properly in the
environment.
"""
stack_identifier = self.stack_create(
template=test_template_one_resource)
# Update with a new resource and make the update fails
template = _change_rsrc_properties(test_template_one_resource,
['test1'], {'fail': True})
template['resources']['test2'] = {'type': 'My::TestResource'}
template['resources']['test1']['depends_on'] = 'test2'
env = {'resource_registry':
{'My::TestResource': 'OS::Heat::TestResource'}}
self.update_stack(stack_identifier,
template=template,
environment=env,
expected_status='UPDATE_FAILED')
# Fixing the template should fix the stack
template = _change_rsrc_properties(template,
['test1'], {'fail': False})
self.update_stack(stack_identifier,
template=template,
environment=env)
self.assertEqual({'test1': 'OS::Heat::TestResource',
'test2': 'My::TestResource'},
self.list_resources(stack_identifier))
|
|
# -*- coding: utf-8 -*-
from gluon import *
from gluon.storage import Storage
from s3 import *
from s3theme import NAV, SECTION
# =============================================================================
class S3MainMenuLayout(S3NavigationItem):
""" Custom Main Menu Layout """
@staticmethod
def layout(item):
""" Custom Layout Method """
# Manage flags: hide any disabled/unauthorized items
if not item.authorized:
item.enabled = False
item.visible = False
elif item.enabled is None or item.enabled:
item.enabled = True
item.visible = True
if item.enabled and item.visible:
items = item.render_components()
if item.parent is not None:
classes = []
if item.parent.parent is None:
# Item at the top-level?
toplevel = True
if item.opts.right:
classes.append("menu-right")
else:
toplevel = False
if item.components:
classes.append("has-dropdown not-click")
if item.selected:
classes.append("active")
_class = " ".join(classes)
# Menu item with Dropdown
if item.get_first(enabled=True):
_href = item.url()
return LI(A(item.label,
_href=_href,
_id=item.attr._id
),
UL(items,
_class="dropdown"
),
_class=_class,
)
else:
# Menu item without Drop-Down
if toplevel:
item_url = item.url()
if item_url == URL(c="default", f="index"):
classes.append("menu-home")
if item.selected:
classes.append("active")
_class = " ".join(classes)
return LI(A(item.label,
_href=item_url,
_id=item.attr._id,
),
_class=_class,
)
else:
# Submenu item
if isinstance(item.label, dict):
if "name" in item.label:
label = item.label["name"]
else:
return None
else:
label = item.label
link = A(label, _href=item.url(), _id=item.attr._id)
return LI(link)
else:
# Main menu
right = []
left = []
for item in items:
if "menu-right" in item["_class"]:
item.remove_class("menu-right")
right.append(item)
else:
left.append(item)
right.reverse()
if current.response.s3.rtl:
right, left = left, right
return NAV(UL(LI(A(" ",
_href=URL(c="default", f="index"),
),
_class="name"
),
LI(A(SPAN(current.T("Menu"))),
_class="toggle-topbar menu-icon",
),
_class="title-area",
),
SECTION(UL(right, _class="right"),
UL(left, _class="left"),
_class="top-bar-section",
),
_class = "top-bar",
data = {"topbar": " "},
)
else:
return None
# ---------------------------------------------------------------------
@staticmethod
def checkbox_item(item):
""" Render special active items """
name = item.label
link = item.url()
_id = name["id"]
if "name" in name:
_name = name["name"]
else:
_name = ""
if "value" in name:
_value = name["value"]
else:
_value = False
if "request_type" in name:
_request_type = name["request_type"]
else:
_request_type = "ajax"
if link:
if _request_type == "ajax":
_onchange='''var val=$('#%s:checked').length;$.getS3('%s'+'?val='+val,null,false,null,false,false)''' % \
(_id, link)
else:
# Just load the page. Use this if the changed menu
# item should alter the contents of the page, and
# it's simpler just to load it.
_onchange="location.href='%s'" % link
else:
_onchange=None
return LI(A(INPUT(_type="checkbox",
_id=_id,
_onchange=_onchange,
value=_value,
),
"%s" % _name,
_nowrap="nowrap",
),
_class="menu-toggle",
)
# =============================================================================
class S3PersonalMenuLayout(S3NavigationItem):
@staticmethod
def layout(item):
if item.parent is None:
# The menu
items = item.render_components()
if items:
return TAG["ul"](items, _class="sub-nav personal-menu")
else:
return "" # menu is empty
else:
# A menu item
if item.enabled and item.authorized:
return TAG["li"](A(item.label, _href=item.url()))
else:
return None
# -----------------------------------------------------------------------------
# Shortcut
MP = S3PersonalMenuLayout
# =============================================================================
class S3AboutMenuLayout(S3NavigationItem):
@staticmethod
def layout(item):
if item.parent is None:
# The menu
items = item.render_components()
if items:
return TAG["ul"](items, _class="sub-nav about-menu left")
else:
return "" # menu is empty
else:
# A menu item
if item.enabled and item.authorized:
return TAG["li"](A(item.label, _href=item.url()))
else:
return None
# -----------------------------------------------------------------------------
# Shortcut
MA = S3AboutMenuLayout
# =============================================================================
class S3LanguageMenuLayout(S3NavigationItem):
@staticmethod
def layout(item):
""" Language menu layout
options for each entry:
- lang_code: the language code
- lang_name: the language name
option for the menu
- current_language: code of the current language
"""
if item.enabled:
if item.components:
# The language menu itself
current_language = current.T.accepted_language
items = item.render_components()
select = SELECT(items, value=current_language,
_name="_language",
# @ToDo T:
_title="Language Selection",
_onchange="S3.reloadWithQueryStringVars({'_language':$(this).val()});")
form = FORM(select, _class="language-selector",
_name="_language",
_action="",
_method="get")
return form
else:
# A language entry
return OPTION(item.opts.lang_name,
_value=item.opts.lang_code)
else:
return None
# -------------------------------------------------------------------------
def check_enabled(self):
""" Check whether the language menu is enabled """
if current.deployment_settings.get_L10n_display_toolbar():
return True
else:
return False
# -----------------------------------------------------------------------------
# Shortcut
ML = S3LanguageMenuLayout
# =============================================================================
class S3OrgMenuLayout(S3NavigationItem):
""" Layout for the organisation-specific menu """
@staticmethod
def layout(item):
name = "Deutsches Rotes Kreuz"
logo = IMG(_src = "/%s/static/themes/DRK/img/logo_small.png" %
current.request.application,
_alt = "Deutsches Rotes Kreuz",
_width=40,
)
# Note: render using current.menu.org.render()[0] + current.menu.org.render()[1]
return (name, logo)
# -----------------------------------------------------------------------------
# Shortcut
OM = S3OrgMenuLayout
# END =========================================================================
|
|
# -*- coding: utf-8 -*-
"""Operations run inside the report directory to extract data.
:copyright: Copyright (c) 2019 RadiaSoft LLC. All Rights Reserved.
:license: http://www.apache.org/licenses/LICENSE-2.0.html
"""
from __future__ import absolute_import, division, print_function
from pykern import pkio
from pykern import pkjson
from pykern.pkcollections import PKDict
from pykern.pkdebug import pkdp, pkdexc, pkdc, pkdlog
from sirepo import job
from sirepo import simulation_db
from sirepo.template import template_common
import requests
import sirepo.template
import sirepo.util
import subprocess
import sys
import time
def default_command(in_file):
"""Reads `in_file` passes to `msg.jobCmd`
Must be called in run_dir
Writes its output on stdout.
Args:
in_file (str): json parsed to msg
Returns:
str: json output of command, e.g. status msg
"""
try:
job.init()
f = pkio.py_path(in_file)
msg = pkjson.load_any(f)
#TODO(e-carlin): find common place to serialize/deserialize paths
msg.runDir = pkio.py_path(msg.runDir)
f.remove()
res = globals()['_do_' + msg.jobCmd](
msg,
sirepo.template.import_module(msg.simulationType)
)
if res is None:
return
r = PKDict(res).pksetdefault(state=job.COMPLETED)
except Exception as e:
r = PKDict(
state=job.ERROR,
error=e.sr_args.error if isinstance(e, sirepo.util.UserAlert) else str(e),
stack=pkdexc(),
)
return pkjson.dump_pretty(r, pretty=False)
def _background_percent_complete(msg, template, is_running):
# some templates return a dict so wrap in PKDict
r = PKDict(template.background_percent_complete(
msg.data.report,
msg.runDir,
is_running,
))
#TODO(robnagler) this is incorrect, because we want to see file updates
# not just our polling frequency
r.pksetdefault(lastUpdateTime=lambda: _mtime_or_now(msg.runDir))
r.pksetdefault(frameCount=0)
r.pksetdefault(percentComplete=0.0)
return r
def _do_cancel(msg, template):
if hasattr(template, 'remove_last_frame'):
template.remove_last_frame(msg.runDir)
return PKDict()
def _do_compute(msg, template):
msg.runDir = pkio.py_path(msg.runDir)
with msg.runDir.join(template_common.RUN_LOG).open('w') as run_log:
p = subprocess.Popen(
_do_prepare_simulation(msg, template).cmd,
stdout=run_log,
stderr=run_log,
)
while True:
for j in range(20):
time.sleep(.1)
r = p.poll()
i = r is None
if not i:
break
if msg.isParallel:
# TODO(e-carlin): This has a potential to fail. We likely
# don't want the job to fail in this case
_write_parallel_status(msg, template, i)
if i:
continue
if r != 0:
return PKDict(state=job.ERROR, error='non zero returncode={}'.format(r))
return PKDict(state=job.COMPLETED)
def _do_fastcgi(msg, template):
import socket
s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
# relative file name (see job_agent.fastcgi_op)
s.connect(msg.fastcgiFile)
while True:
try:
m = pkjson.load_any(s.recv(int(1e8)))
m.runDir = pkio.py_path(m.runDir)
with pkio.save_chdir(m.runDir):
r = globals()['_do_' + m.jobCmd](
m,
sirepo.template.import_module(m.simulationType)
)
r = PKDict(r).pksetdefault(state=job.COMPLETED)
except Exception as e:
r = PKDict(
state=job.ERROR,
error=e.sr_args.error if isinstance(e, sirepo.util.UserAlert) else str(e),
stack=pkdexc(),
)
s.sendall(pkjson.dump_bytes(r) + b'\n')
def _do_get_simulation_frame(msg, template):
return template_common.sim_frame_dispatch(
msg.data.copy().pkupdate(run_dir=msg.runDir),
)
def _do_get_data_file(msg, template):
try:
f, c, _ = template.get_data_file(
msg.runDir,
msg.analysisModel,
msg.frame,
options=PKDict(suffix=msg.suffix),
)
requests.put(
msg.dataFileUri + f,
data=c,
verify=job.cfg.verify_tls,
).raise_for_status()
return PKDict()
except Exception as e:
return PKDict(state=job.ERROR, error=e, stack=pkdexc())
def _do_prepare_simulation(msg, template):
if 'libFileList' in msg:
msg.data.libFileList = msg.libFileList
return PKDict(
cmd=simulation_db.prepare_simulation(
msg.data,
run_dir=msg.runDir,
)[0],
)
def _do_sbatch_status(msg, template):
s = pkio.py_path(msg.stopSentinel)
while True:
if s.exists():
if job.COMPLETED not in s.read():
# told to stop for an error or otherwise
return None
_write_parallel_status(msg, template, False)
pkio.unchecked_remove(s)
return PKDict(state=job.COMPLETED)
_write_parallel_status(msg, template, True)
time.sleep(msg.nextRequestSeconds)
# DOES NOT RETURN
def _do_sequential_result(msg, template):
r = simulation_db.read_result(msg.runDir)
# Read this first: https://github.com/radiasoft/sirepo/issues/2007
if (r.state != job.ERROR and hasattr(template, 'prepare_output_file')
and 'models' in msg.data
):
template.prepare_output_file(msg.runDir, msg.data)
r = simulation_db.read_result(msg.runDir)
return r
def _mtime_or_now(path):
"""mtime for path if exists else time.time()
Args:
path (py.path):
Returns:
int: modification time
"""
return int(path.mtime() if path.exists() else time.time())
def _write_parallel_status(msg, template, is_running):
sys.stdout.write(
pkjson.dump_pretty(
PKDict(
state=job.RUNNING,
parallelStatus=_background_percent_complete(msg, template, is_running),
),
pretty=False,
) + '\n',
)
|
|
import numpy as np
import itertools as it
import gurobipy as gurobi
from gurobipy import GRB as G
from textwrap import dedent
from . import get_logger, freeze, subdict
_LOG = get_logger('adt17')
_STATUS = {
1: 'LOADED',
2: 'OPTIMAL',
3: 'INFEASIBLE',
4: 'INF_OR_UNBD',
5: 'UNBOUNDED',
6: 'CUTOFF',
7: 'ITERATION_LIMIT',
8: 'NODE_LIMIT',
9: 'TIME_LIMIT',
10: 'SOLUTION_LIMIT',
11: 'INTERRUPTED',
12: 'NUMERIC',
13: 'SUBOPTIMAL',
}
def dict2array(d):
indices = np.array(list(d.keys()))
if not len(indices):
return None
ndim = len(indices[0])
shape = [indices[:, dim].max() + 1 for dim in range(ndim)]
array = np.zeros(shape)
for index in map(tuple, indices):
array[index] = d[index].x
return array
class Problem(object):
def __init__(self, num_attributes, num_threads=0):
self.num_attributes = num_attributes
self.num_threads = num_threads
def infer(self, w, transform=(1, 0)):
"""Computes a highest-utility configuration w.r.t. the given weights.
Parameters
----------
w : ndarray of shape (num_attributes,)
A weight vector.
transform : tuple of (float, 1D ndarray)
The transformation parameters (a, b).
Returns
-------
x : ndarray of shape (num_attributes,)
An optimal configuration.
"""
a, b = transform
transformed_w = a * w + b
assert (transformed_w >= 0).all()
_LOG.debug(dedent('''
INFERENCE
w = {}
transformed w = {}
''').format(w, transformed_w))
model = gurobi.Model('inference')
model.params.OutputFlag = 0
model.params.Threads = self.num_threads
model.params.Seed = 0
x = [model.addVar(vtype=G.BINARY) for z in range(self.num_attributes)]
model.modelSense = G.MAXIMIZE
model.setObjective(gurobi.quicksum([w[i] * x[i] for i in range(self.num_attributes)]))
self._add_constraints(model, x)
model.optimize()
x = np.array([x[z].x for z in range(self.num_attributes)])
_LOG.debug('inferred {}'.format(x))
return x
def select_query(self, dataset, set_size, alpha, transform=(1, 0)):
"""Solves the set-wise max-margin problem for a given set size.
Parameters
----------
dataset : ndarray of shape (num_examples, num_attributes)
Array of collected feedback of the form $x^+ - x^-$.
set_size : int
Size of the query set.
alpha : tuple of float
Hyperparameters
transform : tuple of (float, 1D ndarray)
The transformation parameters (a, b).
Returns
-------
w : ndarray of shape (set_size, num_attributes)
Optimal weight vectors.
x : ndarray of shape (set_size, num_attributes)
Optimal configurations.
"""
w_min = np.zeros(self.num_attributes)
w_max = np.ones(self.num_attributes)
a, b = transform
w_min = a * w_min + b
w_max = a * w_max + b
assert (w_min >= 0).all() and (w_max >= 0).all()
w_top = w_max.max()
_LOG.debug(dedent('''\
SELECTING QUERY SET k={set_size} alpha={alpha}
dataset =
{dataset}
transform = {transform}
w_min = {w_min}
w_max = {w_max}
w_top = {w_top}
''').format(**subdict(locals(), nokeys=['self'])))
model = gurobi.Model('queryselection')
model.params.OutputFlag = 0
model.params.Threads = self.num_threads
model.params.Seed = 0
x = {(i, z): model.addVar(vtype=G.BINARY, name='x_{}_{}'.format(i, z))
for i in range(set_size) for z in range(self.num_attributes)}
w = {(i, z): model.addVar(lb=0, vtype=G.CONTINUOUS, name='w_{}_{}'.format(i, z))
for i in range(set_size) for z in range(self.num_attributes)}
p = {(i, j, z): model.addVar(lb=0, vtype=G.CONTINUOUS, name='p_{}_{}_{}'.format(i, j, z))
for i, j, z in it.product(range(set_size), range(set_size), range(self.num_attributes))}
slacks = {(i, s): model.addVar(lb=0, vtype=G.CONTINUOUS, name='slack_{}_{}'.format(i, s))
for i in range(set_size) for s in range(len(dataset))}
margin = model.addVar(vtype=G.CONTINUOUS, name='margin')
p_diag = [p[i,i,z] for i in range(set_size) for z in range(self.num_attributes)]
obj_slacks = 0
if len(slacks) > 0:
obj_slacks = gurobi.quicksum(slacks.values())
# eq 4
model.modelSense = G.MAXIMIZE
model.setObjective(margin
- alpha[0] * obj_slacks
- alpha[1] * gurobi.quicksum(w.values())
+ alpha[2] * gurobi.quicksum(p_diag))
# eq 5
for i in range(set_size):
for s, delta in enumerate(dataset):
udiff = gurobi.quicksum([w[i,z] * delta[z] for z in range(self.num_attributes)])
model.addConstr(udiff >= margin - slacks[i,s])
# eq 6
for i, j in it.product(range(set_size), repeat=2):
if i != j:
udiff = gurobi.quicksum([p[i,i,z] - p[i,j,z] for z in range(self.num_attributes)])
model.addConstr(udiff >= margin)
# eq 7
for i, z in it.product(range(set_size), range(self.num_attributes)):
model.addConstr(p[i,i,z] <= (w_top * x[i,z]))
for i, z in it.product(range(set_size), range(self.num_attributes)):
model.addConstr(p[i,i,z] <= w[i,z])
# eq 8
for i, j in it.product(range(set_size), repeat=2):
if i != j:
for z in range(self.num_attributes):
model.addConstr(p[i,j,z] >= (w[i,z] - 2 * w_top * (1 - x[j,z])))
# eq 9a
for i in range(set_size):
for z in range(self.num_attributes):
model.addConstr(w[i,z] >= w_min[z])
model.addConstr(w[i,z] <= w_max[z])
# work around unbounded problems
apply_workaround = set_size == 1 and len(dataset) == 0
if apply_workaround:
model.addConstr(margin == 0)
# add hard constraints
for i in range(set_size):
self._add_constraints(model, [x[i,z] for z in range(self.num_attributes)])
try:
model.optimize()
model.objVal
except gurobi.GurobiError:
status = _STATUS[model.status]
msg = dedent('''\
unsatisfiable, reason: {status}
set_size = {set_size}
alpha = {alpha}
dataset =
{dataset}
transform = {transform}
''').format(**locals())
model.write('failed.lp')
raise RuntimeError(msg)
x = dict2array(x)
w = dict2array(w)
p = dict2array(p)
slacks = dict2array(slacks)
margin = margin.x
_LOG.debug(dedent('''\
SELECTED QUERY SET:
utilities
w =
{w}
x =
{x}
p =
{p}
slacks =
{slacks}
margin = {margin}
''').format(**locals()))
if not apply_workaround and (w == 0).all():
_LOG.warning('all-zero weights are bad')
return w, x
|
|
from __future__ import print_function
from collections import namedtuple
from fileinput import input
from operator import itemgetter
Coord = namedtuple('Coord', 'y x')
Delta = namedtuple('Delta', 'y x')
Cell = namedtuple('Cell', 'y x value')
Quadrant = namedtuple('Quadrant', 'y_start y_end x_start x_end')
MOVE_LEFT = Delta(0, -1)
MOVE_RIGHT = Delta(0, 1)
MOVE_UP = Delta(-1, 0)
MOVE_DOWN = Delta(1, 0)
MOVE_DES = {
MOVE_DOWN: 'DOWN',
MOVE_LEFT: 'LEFT',
MOVE_UP: 'UP',
MOVE_RIGHT: 'RIGHT',
}
def debug(*args, **kwargs):
import sys
kwargs.setdefault('file', sys.stderr)
print(*args, **kwargs)
class Board(object):
_state = None
def __init__(self, grid_size, state):
self.grid_size = grid_size
self.state = state
@property
def state(self):
return self._state
@state.setter
def state(self, value):
self._state = [[char for char in line] for line in value.split('\n') if line]
def iter_state(self):
return (Cell(y, x, char) for y, row in enumerate(self.state) for x, char in enumerate(row))
def find(self, needle):
if isinstance(needle, Cell):
return needle
if isinstance(needle, Coord):
return Cell(needle.y, needle.x, self.state[needle.y][needle.x])
for cell in self.iter_state():
if cell.value == needle:
return cell
return None
def findall(self, value):
return [cell for cell in self.iter_state() if cell.value == value]
def set_cell(self, needle, value):
needle_cell = self.find(needle)
self.state[needle_cell.y][needle_cell.x] = value
def is_valid_coord(self, coord):
if not isinstance(coord, Coord):
return False
for axis in (coord.x, coord.y):
if axis < 0 or axis >= self.grid_size:
return False
return True
def resolve_delta(self, delta, ref):
if not isinstance(delta, Delta):
return delta
ref_cell = self.find(ref)
coord = Coord(ref_cell.y + delta.y, ref_cell.x + delta.x)
if not self.is_valid_coord(coord):
raise InvalidMove()
return coord
def find_delta(self, char, target):
char_cell, target_cell = self.find(char), self.find(target)
if char_cell is None or target_cell is None:
raise NoValidMove()
delta_y = target_cell.y - char_cell.y
delta_x = target_cell.x - char_cell.x
return Delta(delta_y, delta_x)
def move(self, a, b):
cell_a = self.find(a)
cell_b = self.find(self.resolve_delta(b, a))
self.set_cell(cell_a, '-')
self.set_cell(cell_b, cell_a.value)
def pformat(self):
return '\n'.join(''.join(row) for row in self.state if row)
class Bot(object):
def __init__(self, board=None, char='b'):
self.board = board
self._char = char
@property
def char(self):
return self.board.find(self._char)
@char.setter
def char(self, value):
self._char = value
def get_proximity(self, target):
char_cell = self.char
target_cell = self.board.find(target)
delta = self.board.find_delta(char_cell, target_cell)
return abs(delta.x) + abs(delta.y)
def choose_target(self, targets):
targets = [{'cell': target, 'proximity': self.get_proximity(target)} for target in targets]
for target in targets:
target['priority_inverse'] = target['proximity'] * 100
target['quadrants'] = []
grid_size = self.board.grid_size - 1
quadrant_size = grid_size // 2
quadrant_boxes = {
'q1': Quadrant(0, quadrant_size, 0, quadrant_size),
'q2': Quadrant(0, quadrant_size, quadrant_size, grid_size),
'q3': Quadrant(quadrant_size, grid_size, quadrant_size, grid_size),
'q4': Quadrant(quadrant_size, grid_size, 0, quadrant_size),
}
targets_by_quadrant = {
'q1': [],
'q2': [],
'q3': [],
'q4': [],
}
for quadrant_name, quadrant_box in quadrant_boxes.items():
for target in targets:
cell = target['cell']
if cell.y < quadrant_box.y_start or cell.y > quadrant_box.y_end:
continue
if cell.x < quadrant_box.x_start or cell.x > quadrant_box.x_end:
continue
target['quadrants'].append(quadrant_name)
targets_by_quadrant[quadrant_name].append(target)
char_cell = self.char
char_quadrants = []
for quadrant_name, quadrant_box in quadrant_boxes.items():
for target in [char_cell]:
cell = target
if cell.y < quadrant_box.y_start or cell.y > quadrant_box.y_end:
continue
if cell.x < quadrant_box.x_start or cell.x > quadrant_box.x_end:
continue
char_quadrants.append(quadrant_name)
for target in targets:
quadrant_priority = 1
quadrant_coefficient = 10
matching_coeffient = 1
corner_coefficient = 1
if not targets_by_quadrant['q1']:
if 'q2' in target['quadrants'] or 'q4' in target['quadrants']:
quadrant_priority = quadrant_coefficient
if not targets_by_quadrant['q2']:
if 'q3' in target['quadrants'] or 'q1' in target['quadrants']:
quadrant_priority = quadrant_coefficient
if not targets_by_quadrant['q3']:
if 'q4' in target['quadrants'] or 'q2' in target['quadrants']:
quadrant_priority = quadrant_coefficient
if not targets_by_quadrant['q4']:
if 'q1' in target['quadrants'] or 'q3' in target['quadrants']:
quadrant_priority = quadrant_coefficient
matching_quadrants = 0
for target_quadrant in target['quadrants']:
if target_quadrant in char_quadrants:
matching_quadrants += 1
corner_modifier = 0
corner_coords = [Coord(0, 0), Coord(grid_size, 0), Coord(grid_size, grid_size), Coord(0, grid_size)]
target_coord = Coord(target['cell'].x, target['cell'].y)
if target_coord in corner_coords:
corner_modifier = 1 * corner_coefficient
matching_quadrant_priority_multiplier = matching_quadrants * matching_coeffient + 1
target['matching_quadrant_priority_multiplier'] = matching_quadrant_priority_multiplier
quadrant_priority_multiplier = int(quadrant_priority / len(target['quadrants']))
target['quadrant_priority_multiplier'] = quadrant_priority_multiplier
target['corner_modifer'] = corner_modifier
target['priority_inverse'] //= (quadrant_priority_multiplier + matching_quadrant_priority_multiplier)
target['priority_inverse'] -= corner_modifier
sorted_targets = sorted(targets, key=itemgetter('priority_inverse', 'proximity'))
return sorted_targets[0]['cell']
def suggest_move(self, target, op='+'):
char_cell = self.char
target_cells = self.board.findall(target)
target_cell = self.choose_target(target_cells)
delta = self.board.find_delta(char_cell, target_cell)
if delta.y > 0:
return MOVE_DOWN
if delta.y < 0:
return MOVE_UP
if delta.x > 0:
return MOVE_RIGHT
if delta.x < 0:
return MOVE_LEFT
raise NoValidMove()
class InvalidMove(Exception):
pass
class NoValidMove(Exception):
pass
def next_move(posr, posc, grid):
grid_size = len(grid.split('\n') if isinstance(grid, str) else grid)
board = Board(grid_size, grid)
bot = Bot(board, 'b')
if not board.find('b'):
return 'CLEAN'
board.set_cell(Coord(posr, posc), 'b')
move = bot.suggest_move('d')
return MOVE_DES[move]
def parse_input(grid):
bot_pos = Coord(*map(int, grid.pop(0).split()))
grid_size = len(grid.split('\n') if isinstance(grid, str) else grid)
return bot_pos, grid_size, '\n'.join(grid)
def main():
bot_pos, grid_size, grid = parse_input([line.strip() for line in input()])
print(next_move(bot_pos.y, bot_pos.x, grid))
if __name__ == '__main__':
main()
|
|
from binascii import hexlify, unhexlify
import traceback
import sys
from electrum.util import bfh, bh2u, versiontuple, UserCancelled
from electrum.bitcoin import (b58_address_to_hash160, xpub_from_pubkey, deserialize_xpub,
TYPE_ADDRESS, TYPE_SCRIPT, is_address)
from electrum import constants
from electrum.i18n import _
from electrum.plugin import BasePlugin, Device
from electrum.transaction import deserialize, Transaction
from electrum.keystore import Hardware_KeyStore, is_xpubkey, parse_xpubkey
from electrum.base_wizard import ScriptTypeNotSupported
from ..hw_wallet import HW_PluginBase
from ..hw_wallet.plugin import is_any_tx_output_on_change_branch, trezor_validate_op_return_output_and_get_data
# TREZOR initialization methods
TIM_NEW, TIM_RECOVER, TIM_MNEMONIC, TIM_PRIVKEY = range(0, 4)
RECOVERY_TYPE_SCRAMBLED_WORDS, RECOVERY_TYPE_MATRIX = range(0, 2)
class TrezorKeyStore(Hardware_KeyStore):
hw_type = 'trezor'
device = 'TREZOR'
def get_derivation(self):
return self.derivation
def get_client(self, force_pair=True):
return self.plugin.get_client(self, force_pair)
def decrypt_message(self, sequence, message, password):
raise RuntimeError(_('Encryption and decryption are not implemented by {}').format(self.device))
def sign_message(self, sequence, message, password):
client = self.get_client()
address_path = self.get_derivation() + "/%d/%d"%sequence
address_n = client.expand_path(address_path)
msg_sig = client.sign_message(self.plugin.get_coin_name(), address_n, message)
return msg_sig.signature
def sign_transaction(self, tx, password):
if tx.is_complete():
return
# previous transactions used as inputs
prev_tx = {}
# path of the xpubs that are involved
xpub_path = {}
for txin in tx.inputs():
pubkeys, x_pubkeys = tx.get_sorted_pubkeys(txin)
tx_hash = txin['prevout_hash']
if txin.get('prev_tx') is None and not Transaction.is_segwit_input(txin):
raise Exception(_('Offline signing with {} is not supported for legacy inputs.').format(self.device))
prev_tx[tx_hash] = txin['prev_tx']
for x_pubkey in x_pubkeys:
if not is_xpubkey(x_pubkey):
continue
xpub, s = parse_xpubkey(x_pubkey)
if xpub == self.get_master_public_key():
xpub_path[xpub] = self.get_derivation()
self.plugin.sign_transaction(self, tx, prev_tx, xpub_path)
class TrezorPlugin(HW_PluginBase):
# Derived classes provide:
#
# class-static variables: client_class, firmware_URL, handler_class,
# libraries_available, libraries_URL, minimum_firmware,
# wallet_class, types
firmware_URL = 'https://wallet.trezor.io'
libraries_URL = 'https://github.com/trezor/python-trezor'
minimum_firmware = (1, 5, 2)
keystore_class = TrezorKeyStore
minimum_library = (0, 9, 0)
SUPPORTED_XTYPES = ('standard', 'p2wpkh-p2sh', 'p2wpkh', 'p2wsh-p2sh', 'p2wsh')
MAX_LABEL_LEN = 32
def __init__(self, parent, config, name):
HW_PluginBase.__init__(self, parent, config, name)
self.libraries_available = self.check_libraries_available()
if not self.libraries_available:
return
from . import client
from . import transport
import trezorlib.messages
self.client_class = client.TrezorClient
self.types = trezorlib.messages
self.DEVICE_IDS = ('TREZOR',)
self.transport_handler = transport.TrezorTransport()
self.device_manager().register_enumerate_func(self.enumerate)
def get_library_version(self):
import trezorlib
try:
return trezorlib.__version__
except AttributeError:
return 'unknown'
def enumerate(self):
devices = self.transport_handler.enumerate_devices()
return [Device(d.get_path(), -1, d.get_path(), 'TREZOR', 0) for d in devices]
def create_client(self, device, handler):
try:
self.print_error("connecting to device at", device.path)
transport = self.transport_handler.get_transport(device.path)
except BaseException as e:
self.print_error("cannot connect at", device.path, str(e))
return None
if not transport:
self.print_error("cannot connect at", device.path)
return
self.print_error("connected to device at", device.path)
client = self.client_class(transport, handler, self)
# Try a ping for device sanity
try:
client.ping('t')
except BaseException as e:
self.print_error("ping failed", str(e))
return None
if not client.atleast_version(*self.minimum_firmware):
msg = (_('Outdated {} firmware for device labelled {}. Please '
'download the updated firmware from {}')
.format(self.device, client.label(), self.firmware_URL))
self.print_error(msg)
if handler:
handler.show_error(msg)
else:
raise Exception(msg)
return None
return client
def get_client(self, keystore, force_pair=True):
devmgr = self.device_manager()
handler = keystore.handler
with devmgr.hid_lock:
client = devmgr.client_for_keystore(self, handler, keystore, force_pair)
# returns the client for a given keystore. can use xpub
if client:
client.used()
return client
def get_coin_name(self):
return "Testnet" if constants.net.TESTNET else "Bitcoin"
def initialize_device(self, device_id, wizard, handler):
# Initialization method
msg = _("Choose how you want to initialize your {}.\n\n"
"The first two methods are secure as no secret information "
"is entered into your computer.\n\n"
"For the last two methods you input secrets on your keyboard "
"and upload them to your {}, and so you should "
"only do those on a computer you know to be trustworthy "
"and free of malware."
).format(self.device, self.device)
choices = [
# Must be short as QT doesn't word-wrap radio button text
(TIM_NEW, _("Let the device generate a completely new seed randomly")),
(TIM_RECOVER, _("Recover from a seed you have previously written down")),
(TIM_MNEMONIC, _("Upload a BIP39 mnemonic to generate the seed")),
(TIM_PRIVKEY, _("Upload a master private key"))
]
devmgr = self.device_manager()
client = devmgr.client_by_id(device_id)
model = client.get_trezor_model()
def f(method):
import threading
settings = self.request_trezor_init_settings(wizard, method, model)
t = threading.Thread(target=self._initialize_device_safe, args=(settings, method, device_id, wizard, handler))
t.setDaemon(True)
t.start()
exit_code = wizard.loop.exec_()
if exit_code != 0:
# this method (initialize_device) was called with the expectation
# of leaving the device in an initialized state when finishing.
# signal that this is not the case:
raise UserCancelled()
wizard.choice_dialog(title=_('Initialize Device'), message=msg, choices=choices, run_next=f)
def _initialize_device_safe(self, settings, method, device_id, wizard, handler):
exit_code = 0
try:
self._initialize_device(settings, method, device_id, wizard, handler)
except UserCancelled:
exit_code = 1
except BaseException as e:
traceback.print_exc(file=sys.stderr)
handler.show_error(str(e))
exit_code = 1
finally:
wizard.loop.exit(exit_code)
def _initialize_device(self, settings, method, device_id, wizard, handler):
item, label, pin_protection, passphrase_protection, recovery_type = settings
if method == TIM_RECOVER and recovery_type == RECOVERY_TYPE_SCRAMBLED_WORDS:
handler.show_error(_(
"You will be asked to enter 24 words regardless of your "
"seed's actual length. If you enter a word incorrectly or "
"misspell it, you cannot change it or go back - you will need "
"to start again from the beginning.\n\nSo please enter "
"the words carefully!"),
blocking=True)
language = 'english'
devmgr = self.device_manager()
client = devmgr.client_by_id(device_id)
if method == TIM_NEW:
strength = 64 * (item + 2) # 128, 192 or 256
u2f_counter = 0
skip_backup = False
client.reset_device(True, strength, passphrase_protection,
pin_protection, label, language,
u2f_counter, skip_backup)
elif method == TIM_RECOVER:
word_count = 6 * (item + 2) # 12, 18 or 24
client.step = 0
if recovery_type == RECOVERY_TYPE_SCRAMBLED_WORDS:
recovery_type_trezor = self.types.RecoveryDeviceType.ScrambledWords
else:
recovery_type_trezor = self.types.RecoveryDeviceType.Matrix
client.recovery_device(word_count, passphrase_protection,
pin_protection, label, language,
type=recovery_type_trezor)
if recovery_type == RECOVERY_TYPE_MATRIX:
handler.close_matrix_dialog()
elif method == TIM_MNEMONIC:
pin = pin_protection # It's the pin, not a boolean
client.load_device_by_mnemonic(str(item), pin,
passphrase_protection,
label, language)
else:
pin = pin_protection # It's the pin, not a boolean
client.load_device_by_xprv(item, pin, passphrase_protection,
label, language)
def _make_node_path(self, xpub, address_n):
_, depth, fingerprint, child_num, chain_code, key = deserialize_xpub(xpub)
node = self.types.HDNodeType(
depth=depth,
fingerprint=int.from_bytes(fingerprint, 'big'),
child_num=int.from_bytes(child_num, 'big'),
chain_code=chain_code,
public_key=key,
)
return self.types.HDNodePathType(node=node, address_n=address_n)
def setup_device(self, device_info, wizard, purpose):
devmgr = self.device_manager()
device_id = device_info.device.id_
client = devmgr.client_by_id(device_id)
if client is None:
raise Exception(_('Failed to create a client for this device.') + '\n' +
_('Make sure it is in the correct state.'))
# fixme: we should use: client.handler = wizard
client.handler = self.create_handler(wizard)
if not device_info.initialized:
self.initialize_device(device_id, wizard, client.handler)
client.get_xpub('m', 'standard')
client.used()
def get_xpub(self, device_id, derivation, xtype, wizard):
if xtype not in self.SUPPORTED_XTYPES:
raise ScriptTypeNotSupported(_('This type of script is not supported with {}.').format(self.device))
devmgr = self.device_manager()
client = devmgr.client_by_id(device_id)
client.handler = wizard
xpub = client.get_xpub(derivation, xtype)
client.used()
return xpub
def get_trezor_input_script_type(self, electrum_txin_type: str):
if electrum_txin_type in ('p2wpkh', 'p2wsh'):
return self.types.InputScriptType.SPENDWITNESS
if electrum_txin_type in ('p2wpkh-p2sh', 'p2wsh-p2sh'):
return self.types.InputScriptType.SPENDP2SHWITNESS
if electrum_txin_type in ('p2pkh', ):
return self.types.InputScriptType.SPENDADDRESS
if electrum_txin_type in ('p2sh', ):
return self.types.InputScriptType.SPENDMULTISIG
raise ValueError('unexpected txin type: {}'.format(electrum_txin_type))
def get_trezor_output_script_type(self, electrum_txin_type: str):
if electrum_txin_type in ('p2wpkh', 'p2wsh'):
return self.types.OutputScriptType.PAYTOWITNESS
if electrum_txin_type in ('p2wpkh-p2sh', 'p2wsh-p2sh'):
return self.types.OutputScriptType.PAYTOP2SHWITNESS
if electrum_txin_type in ('p2pkh', ):
return self.types.OutputScriptType.PAYTOADDRESS
if electrum_txin_type in ('p2sh', ):
return self.types.OutputScriptType.PAYTOMULTISIG
raise ValueError('unexpected txin type: {}'.format(electrum_txin_type))
def sign_transaction(self, keystore, tx, prev_tx, xpub_path):
self.prev_tx = prev_tx
self.xpub_path = xpub_path
client = self.get_client(keystore)
inputs = self.tx_inputs(tx, True)
outputs = self.tx_outputs(keystore.get_derivation(), tx)
signatures = client.sign_tx(self.get_coin_name(), inputs, outputs, lock_time=tx.locktime)[0]
signatures = [(bh2u(x) + '01') for x in signatures]
tx.update_signatures(signatures)
def show_address(self, wallet, address, keystore=None):
if keystore is None:
keystore = wallet.get_keystore()
if not self.show_address_helper(wallet, address, keystore):
return
client = self.get_client(keystore)
if not client.atleast_version(1, 3):
keystore.handler.show_error(_("Your device firmware is too old"))
return
change, index = wallet.get_address_index(address)
derivation = keystore.derivation
address_path = "%s/%d/%d"%(derivation, change, index)
address_n = client.expand_path(address_path)
xpubs = wallet.get_master_public_keys()
if len(xpubs) == 1:
script_type = self.get_trezor_input_script_type(wallet.txin_type)
client.get_address(self.get_coin_name(), address_n, True, script_type=script_type)
else:
def f(xpub):
return self._make_node_path(xpub, [change, index])
pubkeys = wallet.get_public_keys(address)
# sort xpubs using the order of pubkeys
sorted_pubkeys, sorted_xpubs = zip(*sorted(zip(pubkeys, xpubs)))
pubkeys = list(map(f, sorted_xpubs))
multisig = self.types.MultisigRedeemScriptType(
pubkeys=pubkeys,
signatures=[b''] * wallet.n,
m=wallet.m,
)
script_type = self.get_trezor_input_script_type(wallet.txin_type)
client.get_address(self.get_coin_name(), address_n, True, multisig=multisig, script_type=script_type)
def tx_inputs(self, tx, for_sig=False):
inputs = []
for txin in tx.inputs():
txinputtype = self.types.TxInputType()
if txin['type'] == 'coinbase':
prev_hash = b"\x00"*32
prev_index = 0xffffffff # signed int -1
else:
if for_sig:
x_pubkeys = txin['x_pubkeys']
if len(x_pubkeys) == 1:
x_pubkey = x_pubkeys[0]
xpub, s = parse_xpubkey(x_pubkey)
xpub_n = self.client_class.expand_path(self.xpub_path[xpub])
txinputtype._extend_address_n(xpub_n + s)
txinputtype.script_type = self.get_trezor_input_script_type(txin['type'])
else:
def f(x_pubkey):
if is_xpubkey(x_pubkey):
xpub, s = parse_xpubkey(x_pubkey)
else:
xpub = xpub_from_pubkey(0, bfh(x_pubkey))
s = []
return self._make_node_path(xpub, s)
pubkeys = list(map(f, x_pubkeys))
multisig = self.types.MultisigRedeemScriptType(
pubkeys=pubkeys,
signatures=list(map(lambda x: bfh(x)[:-1] if x else b'', txin.get('signatures'))),
m=txin.get('num_sig'),
)
script_type = self.get_trezor_input_script_type(txin['type'])
txinputtype = self.types.TxInputType(
script_type=script_type,
multisig=multisig
)
# find which key is mine
for x_pubkey in x_pubkeys:
if is_xpubkey(x_pubkey):
xpub, s = parse_xpubkey(x_pubkey)
if xpub in self.xpub_path:
xpub_n = self.client_class.expand_path(self.xpub_path[xpub])
txinputtype._extend_address_n(xpub_n + s)
break
prev_hash = unhexlify(txin['prevout_hash'])
prev_index = txin['prevout_n']
if 'value' in txin:
txinputtype.amount = txin['value']
txinputtype.prev_hash = prev_hash
txinputtype.prev_index = prev_index
if txin.get('scriptSig') is not None:
script_sig = bfh(txin['scriptSig'])
txinputtype.script_sig = script_sig
txinputtype.sequence = txin.get('sequence', 0xffffffff - 1)
inputs.append(txinputtype)
return inputs
def tx_outputs(self, derivation, tx):
def create_output_by_derivation():
script_type = self.get_trezor_output_script_type(info.script_type)
if len(xpubs) == 1:
address_n = self.client_class.expand_path(derivation + "/%d/%d" % index)
txoutputtype = self.types.TxOutputType(
amount=amount,
script_type=script_type,
address_n=address_n,
)
else:
address_n = self.client_class.expand_path("/%d/%d" % index)
pubkeys = [self._make_node_path(xpub, address_n) for xpub in xpubs]
multisig = self.types.MultisigRedeemScriptType(
pubkeys=pubkeys,
signatures=[b''] * len(pubkeys),
m=m)
txoutputtype = self.types.TxOutputType(
multisig=multisig,
amount=amount,
address_n=self.client_class.expand_path(derivation + "/%d/%d" % index),
script_type=script_type)
return txoutputtype
def create_output_by_address():
txoutputtype = self.types.TxOutputType()
txoutputtype.amount = amount
if _type == TYPE_SCRIPT:
txoutputtype.script_type = self.types.OutputScriptType.PAYTOOPRETURN
txoutputtype.op_return_data = trezor_validate_op_return_output_and_get_data(o)
elif _type == TYPE_ADDRESS:
txoutputtype.script_type = self.types.OutputScriptType.PAYTOADDRESS
txoutputtype.address = address
return txoutputtype
outputs = []
has_change = False
any_output_on_change_branch = is_any_tx_output_on_change_branch(tx)
for o in tx.outputs():
_type, address, amount = o.type, o.address, o.value
use_create_by_derivation = False
info = tx.output_info.get(address)
if info is not None and not has_change:
index, xpubs, m = info.address_index, info.sorted_xpubs, info.num_sig
on_change_branch = index[0] == 1
# prioritise hiding outputs on the 'change' branch from user
# because no more than one change address allowed
# note: ^ restriction can be removed once we require fw
# that has https://github.com/trezor/trezor-mcu/pull/306
if on_change_branch == any_output_on_change_branch:
use_create_by_derivation = True
has_change = True
if use_create_by_derivation:
txoutputtype = create_output_by_derivation()
else:
txoutputtype = create_output_by_address()
outputs.append(txoutputtype)
return outputs
def electrum_tx_to_txtype(self, tx):
t = self.types.TransactionType()
if tx is None:
# probably for segwit input and we don't need this prev txn
return t
d = deserialize(tx.raw)
t.version = d['version']
t.lock_time = d['lockTime']
inputs = self.tx_inputs(tx)
t._extend_inputs(inputs)
for vout in d['outputs']:
o = t._add_bin_outputs()
o.amount = vout['value']
o.script_pubkey = bfh(vout['scriptPubKey'])
return t
# This function is called from the TREZOR libraries (via tx_api)
def get_tx(self, tx_hash):
tx = self.prev_tx[tx_hash]
return self.electrum_tx_to_txtype(tx)
|
|
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'EventProcessingException'
db.create_table(u'payments_eventprocessingexception', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('event', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['payments.Event'], null=True)),
('data', self.gf('django.db.models.fields.TextField')()),
('message', self.gf('django.db.models.fields.CharField')(max_length=500)),
('traceback', self.gf('django.db.models.fields.TextField')()),
('created_at', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now)),
))
db.send_create_signal(u'payments', ['EventProcessingException'])
# Adding model 'Event'
db.create_table(u'payments_event', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('stripe_id', self.gf('django.db.models.fields.CharField')(unique=True, max_length=255)),
('created_at', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now)),
('kind', self.gf('django.db.models.fields.CharField')(max_length=250)),
('livemode', self.gf('django.db.models.fields.BooleanField')(default=False)),
('customer', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['payments.Customer'], null=True)),
('webhook_message', self.gf('jsonfield.fields.JSONField')(default={})),
('validated_message', self.gf('jsonfield.fields.JSONField')(null=True)),
('valid', self.gf('django.db.models.fields.NullBooleanField')(null=True, blank=True)),
('processed', self.gf('django.db.models.fields.BooleanField')(default=False)),
))
db.send_create_signal(u'payments', ['Event'])
# Adding model 'Transfer'
db.create_table(u'payments_transfer', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('stripe_id', self.gf('django.db.models.fields.CharField')(unique=True, max_length=255)),
('created_at', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now)),
('event', self.gf('django.db.models.fields.related.ForeignKey')(related_name='transfers', to=orm['payments.Event'])),
('amount', self.gf('django.db.models.fields.DecimalField')(max_digits=9, decimal_places=2)),
('currency', self.gf('django.db.models.fields.CharField')(default='usd', max_length=25)),
('status', self.gf('django.db.models.fields.CharField')(max_length=25)),
('date', self.gf('django.db.models.fields.DateTimeField')()),
('description', self.gf('django.db.models.fields.TextField')(null=True, blank=True)),
('adjustment_count', self.gf('django.db.models.fields.IntegerField')(null=True)),
('adjustment_fees', self.gf('django.db.models.fields.DecimalField')(null=True, max_digits=9, decimal_places=2)),
('adjustment_gross', self.gf('django.db.models.fields.DecimalField')(null=True, max_digits=9, decimal_places=2)),
('charge_count', self.gf('django.db.models.fields.IntegerField')(null=True)),
('charge_fees', self.gf('django.db.models.fields.DecimalField')(null=True, max_digits=9, decimal_places=2)),
('charge_gross', self.gf('django.db.models.fields.DecimalField')(null=True, max_digits=9, decimal_places=2)),
('collected_fee_count', self.gf('django.db.models.fields.IntegerField')(null=True)),
('collected_fee_gross', self.gf('django.db.models.fields.DecimalField')(null=True, max_digits=9, decimal_places=2)),
('net', self.gf('django.db.models.fields.DecimalField')(null=True, max_digits=9, decimal_places=2)),
('refund_count', self.gf('django.db.models.fields.IntegerField')(null=True)),
('refund_fees', self.gf('django.db.models.fields.DecimalField')(null=True, max_digits=9, decimal_places=2)),
('refund_gross', self.gf('django.db.models.fields.DecimalField')(null=True, max_digits=9, decimal_places=2)),
('validation_count', self.gf('django.db.models.fields.IntegerField')(null=True)),
('validation_fees', self.gf('django.db.models.fields.DecimalField')(null=True, max_digits=9, decimal_places=2)),
))
db.send_create_signal(u'payments', ['Transfer'])
# Adding model 'TransferChargeFee'
db.create_table(u'payments_transferchargefee', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('transfer', self.gf('django.db.models.fields.related.ForeignKey')(related_name='charge_fee_details', to=orm['payments.Transfer'])),
('amount', self.gf('django.db.models.fields.DecimalField')(max_digits=9, decimal_places=2)),
('currency', self.gf('django.db.models.fields.CharField')(default='usd', max_length=10)),
('application', self.gf('django.db.models.fields.TextField')(null=True, blank=True)),
('description', self.gf('django.db.models.fields.TextField')(null=True, blank=True)),
('kind', self.gf('django.db.models.fields.CharField')(max_length=150)),
('created_at', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now)),
))
db.send_create_signal(u'payments', ['TransferChargeFee'])
# Adding model 'Customer'
db.create_table(u'payments_customer', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('stripe_id', self.gf('django.db.models.fields.CharField')(unique=True, max_length=255)),
('created_at', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now)),
('user', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['auth.User'], unique=True, null=True)),
('card_fingerprint', self.gf('django.db.models.fields.CharField')(max_length=200, blank=True)),
('card_last_4', self.gf('django.db.models.fields.CharField')(max_length=4, blank=True)),
('card_kind', self.gf('django.db.models.fields.CharField')(max_length=50, blank=True)),
('date_purged', self.gf('django.db.models.fields.DateTimeField')(null=True)),
))
db.send_create_signal(u'payments', ['Customer'])
# Adding model 'CurrentSubscription'
db.create_table(u'payments_currentsubscription', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('customer', self.gf('django.db.models.fields.related.OneToOneField')(related_name='current_subscription', unique=True, null=True, to=orm['payments.Customer'])),
('plan', self.gf('django.db.models.fields.CharField')(max_length=100)),
('quantity', self.gf('django.db.models.fields.IntegerField')()),
('start', self.gf('django.db.models.fields.DateTimeField')()),
('status', self.gf('django.db.models.fields.CharField')(max_length=25)),
('cancel_at_period_end', self.gf('django.db.models.fields.BooleanField')(default=False)),
('canceled_at', self.gf('django.db.models.fields.DateTimeField')(null=True, blank=True)),
('current_period_end', self.gf('django.db.models.fields.DateTimeField')(null=True, blank=True)),
('current_period_start', self.gf('django.db.models.fields.DateTimeField')(null=True, blank=True)),
('ended_at', self.gf('django.db.models.fields.DateTimeField')(null=True, blank=True)),
('trial_end', self.gf('django.db.models.fields.DateTimeField')(null=True, blank=True)),
('trial_start', self.gf('django.db.models.fields.DateTimeField')(null=True, blank=True)),
('amount', self.gf('django.db.models.fields.DecimalField')(max_digits=9, decimal_places=2)),
('currency', self.gf('django.db.models.fields.CharField')(default='usd', max_length=10)),
('created_at', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now)),
))
db.send_create_signal(u'payments', ['CurrentSubscription'])
# Adding model 'Invoice'
db.create_table(u'payments_invoice', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('stripe_id', self.gf('django.db.models.fields.CharField')(max_length=255)),
('customer', self.gf('django.db.models.fields.related.ForeignKey')(related_name='invoices', to=orm['payments.Customer'])),
('attempted', self.gf('django.db.models.fields.NullBooleanField')(null=True, blank=True)),
('attempts', self.gf('django.db.models.fields.PositiveIntegerField')(null=True)),
('closed', self.gf('django.db.models.fields.BooleanField')(default=False)),
('paid', self.gf('django.db.models.fields.BooleanField')(default=False)),
('period_end', self.gf('django.db.models.fields.DateTimeField')()),
('period_start', self.gf('django.db.models.fields.DateTimeField')()),
('subtotal', self.gf('django.db.models.fields.DecimalField')(max_digits=9, decimal_places=2)),
('total', self.gf('django.db.models.fields.DecimalField')(max_digits=9, decimal_places=2)),
('currency', self.gf('django.db.models.fields.CharField')(default='usd', max_length=10)),
('date', self.gf('django.db.models.fields.DateTimeField')()),
('charge', self.gf('django.db.models.fields.CharField')(max_length=50, blank=True)),
('created_at', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now)),
))
db.send_create_signal(u'payments', ['Invoice'])
# Adding model 'InvoiceItem'
db.create_table(u'payments_invoiceitem', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('stripe_id', self.gf('django.db.models.fields.CharField')(max_length=255)),
('created_at', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now)),
('invoice', self.gf('django.db.models.fields.related.ForeignKey')(related_name='items', to=orm['payments.Invoice'])),
('amount', self.gf('django.db.models.fields.DecimalField')(max_digits=9, decimal_places=2)),
('currency', self.gf('django.db.models.fields.CharField')(default='usd', max_length=10)),
('period_start', self.gf('django.db.models.fields.DateTimeField')()),
('period_end', self.gf('django.db.models.fields.DateTimeField')()),
('proration', self.gf('django.db.models.fields.BooleanField')(default=False)),
('line_type', self.gf('django.db.models.fields.CharField')(max_length=50)),
('description', self.gf('django.db.models.fields.CharField')(max_length=200, blank=True)),
('plan', self.gf('django.db.models.fields.CharField')(max_length=100, blank=True)),
('quantity', self.gf('django.db.models.fields.IntegerField')(null=True)),
))
db.send_create_signal(u'payments', ['InvoiceItem'])
# Adding model 'Charge'
db.create_table(u'payments_charge', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('stripe_id', self.gf('django.db.models.fields.CharField')(unique=True, max_length=255)),
('created_at', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now)),
('customer', self.gf('django.db.models.fields.related.ForeignKey')(related_name='charges', to=orm['payments.Customer'])),
('invoice', self.gf('django.db.models.fields.related.ForeignKey')(related_name='charges', null=True, to=orm['payments.Invoice'])),
('card_last_4', self.gf('django.db.models.fields.CharField')(max_length=4, blank=True)),
('card_kind', self.gf('django.db.models.fields.CharField')(max_length=50, blank=True)),
('currency', self.gf('django.db.models.fields.CharField')(default='usd', max_length=10)),
('amount', self.gf('django.db.models.fields.DecimalField')(null=True, max_digits=9, decimal_places=2)),
('amount_refunded', self.gf('django.db.models.fields.DecimalField')(null=True, max_digits=9, decimal_places=2)),
('description', self.gf('django.db.models.fields.TextField')(blank=True)),
('paid', self.gf('django.db.models.fields.NullBooleanField')(null=True, blank=True)),
('disputed', self.gf('django.db.models.fields.NullBooleanField')(null=True, blank=True)),
('refunded', self.gf('django.db.models.fields.NullBooleanField')(null=True, blank=True)),
('captured', self.gf('django.db.models.fields.NullBooleanField')(null=True, blank=True)),
('fee', self.gf('django.db.models.fields.DecimalField')(null=True, max_digits=9, decimal_places=2)),
('receipt_sent', self.gf('django.db.models.fields.BooleanField')(default=False)),
('charge_created', self.gf('django.db.models.fields.DateTimeField')(null=True, blank=True)),
))
db.send_create_signal(u'payments', ['Charge'])
def backwards(self, orm):
# Deleting model 'EventProcessingException'
db.delete_table(u'payments_eventprocessingexception')
# Deleting model 'Event'
db.delete_table(u'payments_event')
# Deleting model 'Transfer'
db.delete_table(u'payments_transfer')
# Deleting model 'TransferChargeFee'
db.delete_table(u'payments_transferchargefee')
# Deleting model 'Customer'
db.delete_table(u'payments_customer')
# Deleting model 'CurrentSubscription'
db.delete_table(u'payments_currentsubscription')
# Deleting model 'Invoice'
db.delete_table(u'payments_invoice')
# Deleting model 'InvoiceItem'
db.delete_table(u'payments_invoiceitem')
# Deleting model 'Charge'
db.delete_table(u'payments_charge')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'payments.charge': {
'Meta': {'object_name': 'Charge'},
'amount': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '9', 'decimal_places': '2'}),
'amount_refunded': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '9', 'decimal_places': '2'}),
'captured': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'card_kind': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'card_last_4': ('django.db.models.fields.CharField', [], {'max_length': '4', 'blank': 'True'}),
'charge_created': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'currency': ('django.db.models.fields.CharField', [], {'default': "'usd'", 'max_length': '10'}),
'customer': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'charges'", 'to': u"orm['payments.Customer']"}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'disputed': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'fee': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '9', 'decimal_places': '2'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'invoice': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'charges'", 'null': 'True', 'to': u"orm['payments.Invoice']"}),
'paid': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'receipt_sent': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'refunded': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'stripe_id': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'})
},
u'payments.currentsubscription': {
'Meta': {'object_name': 'CurrentSubscription'},
'amount': ('django.db.models.fields.DecimalField', [], {'max_digits': '9', 'decimal_places': '2'}),
'cancel_at_period_end': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'canceled_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'currency': ('django.db.models.fields.CharField', [], {'default': "'usd'", 'max_length': '10'}),
'current_period_end': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'current_period_start': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'customer': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'current_subscription'", 'unique': 'True', 'null': 'True', 'to': u"orm['payments.Customer']"}),
'ended_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'plan': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'quantity': ('django.db.models.fields.IntegerField', [], {}),
'start': ('django.db.models.fields.DateTimeField', [], {}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '25'}),
'trial_end': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'trial_start': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'})
},
u'payments.customer': {
'Meta': {'object_name': 'Customer'},
'card_fingerprint': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'}),
'card_kind': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'card_last_4': ('django.db.models.fields.CharField', [], {'max_length': '4', 'blank': 'True'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'date_purged': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'stripe_id': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['auth.User']", 'unique': 'True', 'null': 'True'})
},
u'payments.event': {
'Meta': {'object_name': 'Event'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'customer': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['payments.Customer']", 'null': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'kind': ('django.db.models.fields.CharField', [], {'max_length': '250'}),
'livemode': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'processed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'stripe_id': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'valid': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'validated_message': ('jsonfield.fields.JSONField', [], {'null': 'True'}),
'webhook_message': ('jsonfield.fields.JSONField', [], {'default': '{}'})
},
u'payments.eventprocessingexception': {
'Meta': {'object_name': 'EventProcessingException'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'data': ('django.db.models.fields.TextField', [], {}),
'event': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['payments.Event']", 'null': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'message': ('django.db.models.fields.CharField', [], {'max_length': '500'}),
'traceback': ('django.db.models.fields.TextField', [], {})
},
u'payments.invoice': {
'Meta': {'ordering': "['-date']", 'object_name': 'Invoice'},
'attempted': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'attempts': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True'}),
'charge': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'closed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'currency': ('django.db.models.fields.CharField', [], {'default': "'usd'", 'max_length': '10'}),
'customer': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'invoices'", 'to': u"orm['payments.Customer']"}),
'date': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'paid': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'period_end': ('django.db.models.fields.DateTimeField', [], {}),
'period_start': ('django.db.models.fields.DateTimeField', [], {}),
'stripe_id': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'subtotal': ('django.db.models.fields.DecimalField', [], {'max_digits': '9', 'decimal_places': '2'}),
'total': ('django.db.models.fields.DecimalField', [], {'max_digits': '9', 'decimal_places': '2'})
},
u'payments.invoiceitem': {
'Meta': {'object_name': 'InvoiceItem'},
'amount': ('django.db.models.fields.DecimalField', [], {'max_digits': '9', 'decimal_places': '2'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'currency': ('django.db.models.fields.CharField', [], {'default': "'usd'", 'max_length': '10'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'invoice': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'items'", 'to': u"orm['payments.Invoice']"}),
'line_type': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'period_end': ('django.db.models.fields.DateTimeField', [], {}),
'period_start': ('django.db.models.fields.DateTimeField', [], {}),
'plan': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'proration': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'quantity': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'stripe_id': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
u'payments.transfer': {
'Meta': {'object_name': 'Transfer'},
'adjustment_count': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'adjustment_fees': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '9', 'decimal_places': '2'}),
'adjustment_gross': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '9', 'decimal_places': '2'}),
'amount': ('django.db.models.fields.DecimalField', [], {'max_digits': '9', 'decimal_places': '2'}),
'charge_count': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'charge_fees': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '9', 'decimal_places': '2'}),
'charge_gross': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '9', 'decimal_places': '2'}),
'collected_fee_count': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'collected_fee_gross': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '9', 'decimal_places': '2'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'currency': ('django.db.models.fields.CharField', [], {'default': "'usd'", 'max_length': '25'}),
'date': ('django.db.models.fields.DateTimeField', [], {}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'event': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'transfers'", 'to': u"orm['payments.Event']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'net': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '9', 'decimal_places': '2'}),
'refund_count': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'refund_fees': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '9', 'decimal_places': '2'}),
'refund_gross': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '9', 'decimal_places': '2'}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '25'}),
'stripe_id': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'validation_count': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'validation_fees': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '9', 'decimal_places': '2'})
},
u'payments.transferchargefee': {
'Meta': {'object_name': 'TransferChargeFee'},
'amount': ('django.db.models.fields.DecimalField', [], {'max_digits': '9', 'decimal_places': '2'}),
'application': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'currency': ('django.db.models.fields.CharField', [], {'default': "'usd'", 'max_length': '10'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'kind': ('django.db.models.fields.CharField', [], {'max_length': '150'}),
'transfer': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'charge_fee_details'", 'to': u"orm['payments.Transfer']"})
}
}
complete_apps = ['payments']
|
|
from core.himesis import Himesis, HimesisPreConditionPatternLHS
import uuid
class HPP3_CompleteLHS(HimesisPreConditionPatternLHS):
def __init__(self):
"""
Creates the himesis graph representing the AToM3 model HPP3_CompleteLHS
"""
# Flag this instance as compiled now
self.is_compiled = True
super(HPP3_CompleteLHS, self).__init__(name='HPP3_CompleteLHS', num_nodes=0, edges=[])
# Add the edges
self.add_edges([])
# Set the graph attributes
self["mm__"] = ['MT_pre__FamiliesToPersonsMM', 'MoTifRule']
self["MT_constraint__"] = """return True"""
self["name"] = """"""
self["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'HPP3_CompleteLHS')
self["equations"] = []
# Set the node attributes
# match class State(0.32.m.0State) node
self.add_node()
self.vs[0]["MT_pre__attr1"] = """return True"""
self.vs[0]["MT_label__"] = """1"""
self.vs[0]["mm__"] = """MT_pre__State"""
self.vs[0]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'0.32.m.0State')
# match class ExitPoint(0.32.m.1ExitPoint) node
self.add_node()
self.vs[1]["MT_pre__attr1"] = """return True"""
self.vs[1]["MT_label__"] = """2"""
self.vs[1]["mm__"] = """MT_pre__ExitPoint"""
self.vs[1]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'0.32.m.1ExitPoint')
# match class Transition(0.32.m.2Transition) node
self.add_node()
self.vs[2]["MT_pre__attr1"] = """return True"""
self.vs[2]["MT_label__"] = """3"""
self.vs[2]["mm__"] = """MT_pre__Transition"""
self.vs[2]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'0.32.m.2Transition')
# match class SIBLING0(0.32.m.3SIBLING0) node
self.add_node()
self.vs[3]["MT_pre__attr1"] = """return True"""
self.vs[3]["MT_label__"] = """4"""
self.vs[3]["mm__"] = """MT_pre__SIBLING0"""
self.vs[3]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'0.32.m.3SIBLING0')
# apply class Par(0.32.a.0Par) node
self.add_node()
self.vs[4]["MT_pre__attr1"] = """return True"""
self.vs[4]["MT_label__"] = """5"""
self.vs[4]["mm__"] = """MT_pre__Par"""
self.vs[4]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'0.32.a.0Par')
# apply class Inst(0.32.a.1Inst) node
self.add_node()
self.vs[5]["MT_pre__attr1"] = """return True"""
self.vs[5]["MT_label__"] = """6"""
self.vs[5]["mm__"] = """MT_pre__Inst"""
self.vs[5]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'0.32.a.1Inst')
# apply class Name(0.32.a.2Name) node
self.add_node()
self.vs[6]["MT_pre__attr1"] = """return True"""
self.vs[6]["MT_label__"] = """7"""
self.vs[6]["mm__"] = """MT_pre__Name"""
self.vs[6]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'0.32.a.2Name')
# apply class Name(0.32.a.3Name) node
self.add_node()
self.vs[7]["MT_pre__attr1"] = """return True"""
self.vs[7]["MT_label__"] = """8"""
self.vs[7]["mm__"] = """MT_pre__Name"""
self.vs[7]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'0.32.a.3Name')
# apply class Name(0.32.a.4Name) node
self.add_node()
self.vs[8]["MT_pre__attr1"] = """return True"""
self.vs[8]["MT_label__"] = """9"""
self.vs[8]["mm__"] = """MT_pre__Name"""
self.vs[8]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'0.32.a.4Name')
# apply class Name(0.32.a.5Name) node
self.add_node()
self.vs[9]["MT_pre__attr1"] = """return True"""
self.vs[9]["MT_label__"] = """10"""
self.vs[9]["mm__"] = """MT_pre__Name"""
self.vs[9]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'0.32.a.5Name')
# apply class Trigger(0.32.a.6Trigger) node
self.add_node()
self.vs[10]["MT_pre__attr1"] = """return True"""
self.vs[10]["MT_label__"] = """11"""
self.vs[10]["mm__"] = """MT_pre__Trigger"""
self.vs[10]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'0.32.a.6Trigger')
# match association ExitPoint--outgoingTransitions-->Transitionnode
self.add_node()
self.vs[11]["MT_pre__attr1"] = """return attr_value == "outgoingTransitions" """
self.vs[11]["MT_label__"] = """12"""
self.vs[11]["mm__"] = """MT_pre__directLink_S"""
self.vs[11]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'0.32.m.1ExitPointassoc110.32.m.2Transition')
# match association Transition--type-->SIBLING0node
self.add_node()
self.vs[12]["MT_pre__attr1"] = """return attr_value == "type" """
self.vs[12]["MT_label__"] = """13"""
self.vs[12]["mm__"] = """MT_pre__directLink_S"""
self.vs[12]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'0.32.m.2Transitionassoc120.32.m.3SIBLING0')
# match association State--exitPoints-->ExitPointnode
self.add_node()
self.vs[13]["MT_pre__attr1"] = """return attr_value == "exitPoints" """
self.vs[13]["MT_label__"] = """14"""
self.vs[13]["mm__"] = """MT_pre__directLink_S"""
self.vs[13]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'0.32.m.0Stateassoc130.32.m.1ExitPoint')
# apply association Par--p-->Instnode
self.add_node()
self.vs[14]["MT_pre__attr1"] = """return attr_value == "p" """
self.vs[14]["MT_label__"] = """15"""
self.vs[14]["mm__"] = """MT_pre__directLink_T"""
self.vs[14]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'0.32.a.0Parassoc140.32.a.1Inst')
# apply association Inst--channelNames-->Namenode
self.add_node()
self.vs[15]["MT_pre__attr1"] = """return attr_value == "channelNames" """
self.vs[15]["MT_label__"] = """16"""
self.vs[15]["mm__"] = """MT_pre__directLink_T"""
self.vs[15]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'0.32.a.1Instassoc150.32.a.2Name')
# apply association Inst--channelNames-->Namenode
self.add_node()
self.vs[16]["MT_pre__attr1"] = """return attr_value == "channelNames" """
self.vs[16]["MT_label__"] = """17"""
self.vs[16]["mm__"] = """MT_pre__directLink_T"""
self.vs[16]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'0.32.a.1Instassoc160.32.a.3Name')
# apply association Inst--channelNames-->Namenode
self.add_node()
self.vs[17]["MT_pre__attr1"] = """return attr_value == "channelNames" """
self.vs[17]["MT_label__"] = """18"""
self.vs[17]["mm__"] = """MT_pre__directLink_T"""
self.vs[17]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'0.32.a.1Instassoc170.32.a.5Name')
# apply association Inst--channelNames-->Namenode
self.add_node()
self.vs[18]["MT_pre__attr1"] = """return attr_value == "channelNames" """
self.vs[18]["MT_label__"] = """19"""
self.vs[18]["mm__"] = """MT_pre__directLink_T"""
self.vs[18]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'0.32.a.1Instassoc180.32.a.4Name')
# apply association Par--p-->Triggernode
self.add_node()
self.vs[19]["MT_pre__attr1"] = """return attr_value == "p" """
self.vs[19]["MT_label__"] = """20"""
self.vs[19]["mm__"] = """MT_pre__directLink_T"""
self.vs[19]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'0.32.a.0Parassoc190.32.a.6Trigger')
# trace association Par--trace-->ExitPointnode
self.add_node()
self.vs[20]["MT_label__"] = """21"""
self.vs[20]["mm__"] = """MT_pre__trace_link"""
self.vs[20]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'0.32.a.0Parassoc200.32.m.1ExitPoint')
# trace association Trigger--trace-->ExitPointnode
self.add_node()
self.vs[21]["MT_label__"] = """22"""
self.vs[21]["mm__"] = """MT_pre__trace_link"""
self.vs[21]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'0.32.a.6Triggerassoc210.32.m.1ExitPoint')
# trace association Inst--trace-->Transitionnode
self.add_node()
self.vs[22]["MT_label__"] = """23"""
self.vs[22]["mm__"] = """MT_pre__trace_link"""
self.vs[22]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'0.32.a.1Instassoc220.32.m.2Transition')
# trace association Name--trace-->Transitionnode
self.add_node()
self.vs[23]["MT_label__"] = """24"""
self.vs[23]["mm__"] = """MT_pre__trace_link"""
self.vs[23]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'0.32.a.2Nameassoc230.32.m.2Transition')
# trace association Name--trace-->Transitionnode
self.add_node()
self.vs[24]["MT_label__"] = """25"""
self.vs[24]["mm__"] = """MT_pre__trace_link"""
self.vs[24]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'0.32.a.3Nameassoc240.32.m.2Transition')
# trace association Name--trace-->Transitionnode
self.add_node()
self.vs[25]["MT_label__"] = """26"""
self.vs[25]["mm__"] = """MT_pre__trace_link"""
self.vs[25]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'0.32.a.4Nameassoc250.32.m.2Transition')
# trace association Name--trace-->Transitionnode
self.add_node()
self.vs[26]["MT_label__"] = """27"""
self.vs[26]["mm__"] = """MT_pre__trace_link"""
self.vs[26]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'0.32.a.5Nameassoc260.32.m.2Transition')
self['equations'].append(((10,'channel'),('constant','sh_in')))
# Add the edges
self.add_edges([
(1,11), # match class ExitPoint(0.32.m.1ExitPoint) -> association outgoingTransitions
(11,2), # association Transition -> match class Transition(0.32.m.2Transition)
(2,12), # match class Transition(0.32.m.2Transition) -> association type
(12,3), # association SIBLING0 -> match class SIBLING0(0.32.m.3SIBLING0)
(0,13), # match class State(0.32.m.0State) -> association exitPoints
(13,1), # association ExitPoint -> match class ExitPoint(0.32.m.1ExitPoint)
(4,14), # apply class Par(0.32.a.0Par) -> association p
(14,5), # association Inst -> apply class Inst(0.32.a.1Inst)
(5,15), # apply class Inst(0.32.a.1Inst) -> association channelNames
(15,6), # association Name -> apply class Name(0.32.a.2Name)
(5,16), # apply class Inst(0.32.a.1Inst) -> association channelNames
(16,7), # association Name -> apply class Name(0.32.a.3Name)
(5,17), # apply class Inst(0.32.a.1Inst) -> association channelNames
(17,9), # association Name -> apply class Name(0.32.a.5Name)
(5,18), # apply class Inst(0.32.a.1Inst) -> association channelNames
(18,8), # association Name -> apply class Name(0.32.a.4Name)
(4,19), # apply class Par(0.32.a.0Par) -> association p
(19,10), # association Trigger -> apply class Trigger(0.32.a.6Trigger)
(4,20), # apply class Par(0.32.m.1ExitPoint) -> backward_association
(20,1), # backward_associationExitPoint -> match_class ExitPoint(0.32.m.1ExitPoint)
(10,21), # apply class Trigger(0.32.m.1ExitPoint) -> backward_association
(21,1), # backward_associationExitPoint -> match_class ExitPoint(0.32.m.1ExitPoint)
(5,22), # apply class Inst(0.32.m.2Transition) -> backward_association
(22,2), # backward_associationTransition -> match_class Transition(0.32.m.2Transition)
(6,23), # apply class Name(0.32.m.2Transition) -> backward_association
(23,2), # backward_associationTransition -> match_class Transition(0.32.m.2Transition)
(7,24), # apply class Name(0.32.m.2Transition) -> backward_association
(24,2), # backward_associationTransition -> match_class Transition(0.32.m.2Transition)
(8,25), # apply class Name(0.32.m.2Transition) -> backward_association
(25,2), # backward_associationTransition -> match_class Transition(0.32.m.2Transition)
(9,26), # apply class Name(0.32.m.2Transition) -> backward_association
(26,2), # backward_associationTransition -> match_class Transition(0.32.m.2Transition)
])
# define evaluation methods for each match class.
def eval_attr11(self, attr_value, this):
return True
def eval_attr12(self, attr_value, this):
return True
def eval_attr13(self, attr_value, this):
return True
def eval_attr14(self, attr_value, this):
return True
# define evaluation methods for each apply class.
def eval_attr15(self, attr_value, this):
return True
def eval_attr16(self, attr_value, this):
return True
def eval_attr17(self, attr_value, this):
return True
def eval_attr18(self, attr_value, this):
return True
def eval_attr19(self, attr_value, this):
return True
def eval_attr110(self, attr_value, this):
return True
def eval_attr111(self, attr_value, this):
return True
# define evaluation methods for each match association.
def eval_attr112(self, attr_value, this):
return attr_value == "outgoingTransitions"
def eval_attr113(self, attr_value, this):
return attr_value == "type"
def eval_attr114(self, attr_value, this):
return attr_value == "exitPoints"
# define evaluation methods for each apply association.
def eval_attr115(self, attr_value, this):
return attr_value == "p"
def eval_attr116(self, attr_value, this):
return attr_value == "channelNames"
def eval_attr117(self, attr_value, this):
return attr_value == "channelNames"
def eval_attr118(self, attr_value, this):
return attr_value == "channelNames"
def eval_attr119(self, attr_value, this):
return attr_value == "channelNames"
def eval_attr120(self, attr_value, this):
return attr_value == "p"
def constraint(self, PreNode, graph):
return True
|
|
from datetime import datetime
from typing import Dict, Optional
from kombu.exceptions import OperationalError
from abonapp.tasks import customer_nas_command, customer_nas_remove
from agent.commands.dhcp import dhcp_commit, dhcp_expiry, dhcp_release
from devapp.models import Device, Port as DevPort
from django.conf import settings
from django.contrib import messages
from django.contrib.auth.decorators import login_required
from django.contrib.auth.mixins import LoginRequiredMixin, PermissionRequiredMixin
from django.core.exceptions import PermissionDenied, ValidationError
from django.db import IntegrityError, transaction
from django.db.models import Count
from django.http import (
HttpResponse, HttpResponseBadRequest,
HttpResponseRedirect
)
from django.shortcuts import render, redirect, get_object_or_404, resolve_url
from django.urls import reverse_lazy
from django.utils.decorators import method_decorator
from django.utils.translation import gettext_lazy as _
from django.views.generic import ListView, UpdateView, CreateView, DeleteView, DetailView
from djing import lib
from djing.global_base_views import OrderedFilteredList, SecureApiView
from djing.lib.decorators import json_view, only_admins
from djing.lib.mixins import (
OnlyAdminsMixin,
LoginAdminPermissionMixin,
LoginAdminMixin
)
from group_app.models import Group
from guardian.decorators import \
permission_required_or_403 as permission_required
from guardian.shortcuts import get_objects_for_user, assign_perm
from gw_app.models import NASModel
from gw_app.nas_managers import NasFailedResult, NasNetworkError
from ip_pool.models import NetworkModel
from tariff_app.models import Tariff
from taskapp.models import Task
from abonapp import forms
from abonapp import models
class PeoplesListView(LoginRequiredMixin, OnlyAdminsMixin,
OrderedFilteredList):
template_name = 'abonapp/peoples.html'
def get_queryset(self):
street_id = lib.safe_int(self.request.GET.get('street'))
gid = lib.safe_int(self.kwargs.get('gid'))
peoples_list = models.Abon.objects.filter(group__pk=gid)
if street_id > 0:
peoples_list = peoples_list.filter(street=street_id)
peoples_list = peoples_list.select_related(
'group', 'street', 'current_tariff__tariff', 'statcache'
).only(
'group', 'street', 'fio', 'birth_day',
'street', 'house', 'telephone', 'ballance', 'markers',
'username', 'is_active', 'current_tariff', 'ip_address'
)
ordering = self.get_ordering()
if ordering and isinstance(ordering, str):
ordering = (ordering,)
peoples_list = peoples_list.order_by(*ordering)
return peoples_list
def get_context_data(self, **kwargs):
gid = lib.safe_int(self.kwargs.get('gid'))
if gid < 1:
return HttpResponseBadRequest('group id is broken')
group = get_object_or_404(Group, pk=gid)
if not self.request.user.has_perm('group_app.view_group', group):
raise PermissionDenied
context = super(PeoplesListView, self).get_context_data(**kwargs)
context['streets'] = models.AbonStreet.objects.filter(
group=gid
).only('name')
context['street_id'] = lib.safe_int(self.request.GET.get('street'))
context['group'] = group
return context
class GroupListView(LoginRequiredMixin, OnlyAdminsMixin, OrderedFilteredList):
context_object_name = 'groups'
template_name = 'abonapp/group_list.html'
def get_queryset(self):
queryset = get_objects_for_user(
self.request.user,
'group_app.view_group', klass=Group,
use_groups=False,
accept_global_perms=False
)
return queryset.annotate(usercount=Count('abon'))
class AbonCreateView(LoginRequiredMixin, OnlyAdminsMixin,
PermissionRequiredMixin, CreateView):
permission_required = 'abonapp.add_abon'
group = None
abon = None
form_class = forms.AbonForm
model = models.Abon
template_name = 'abonapp/addAbon.html'
context_object_name = 'group'
def dispatch(self, request, *args, **kwargs):
group = get_object_or_404(Group, pk=self.kwargs.get('gid'))
if not request.user.has_perm('group_app.view_group', group):
raise PermissionDenied
self.group = group
return super(AbonCreateView, self).dispatch(request, *args, **kwargs)
def get_initial(self):
return {
'group': self.group,
'address': _('Address'),
'is_active': False
}
def get_context_data(self, **kwargs):
context = super(AbonCreateView, self).get_context_data(**kwargs)
context['group'] = self.group
return context
def form_valid(self, form):
try:
abon = form.save()
me = self.request.user
assign_perm("abonapp.change_abon", me, abon)
assign_perm("abonapp.delete_abon", me, abon)
assign_perm("abonapp.can_buy_tariff", me, abon)
assign_perm('abonapp.can_add_ballance', me, abon)
me.log(self.request.META, 'cusr', '%s, "%s", %s' % (
abon.username, abon.fio,
abon.group.title if abon.group else ''
))
messages.success(self.request, _('create abon success msg'))
self.abon = abon
return super(AbonCreateView, self).form_valid(form)
except (
IntegrityError,
NasFailedResult,
NasNetworkError,
lib.LogicError
) as e:
messages.error(self.request, e)
except lib.MultipleException as errs:
for err in errs.err_list:
messages.error(self.request, err)
return self.render_to_response(self.get_context_data(form=form))
def form_invalid(self, form):
messages.error(self.request, _('fix form errors'))
return super(AbonCreateView, self).form_invalid(form)
class DelAbonDeleteView(LoginAdminMixin, PermissionRequiredMixin, DeleteView):
permission_required = 'abonapp.delete_abon'
model = models.Abon
slug_url_kwarg = 'uname'
slug_field = 'username'
success_url = reverse_lazy('abonapp:group_list')
context_object_name = 'abon'
def get_object(self, queryset=None):
abon = super(DelAbonDeleteView, self).get_object(queryset)
if not self.request.user.has_perm('group_app.view_group', abon.group):
raise PermissionDenied
return abon
def delete(self, request, *args, **kwargs):
try:
abon = self.get_object()
gid = abon.group.id
if abon.current_tariff:
abon_tariff = abon.current_tariff.tariff
customer_nas_remove.delay(
customer_uid=abon.pk, ip_addr=abon.ip_address,
speed=(abon_tariff.speedIn, abon_tariff.speedOut),
is_access=abon.is_access(), nas_pk=abon.nas_id
)
abon.delete()
request.user.log(request.META, 'dusr', (
'%(uname)s, "%(fio)s", %(group)s %(street)s %(house)s' % {
'uname': abon.username,
'fio': abon.fio or '-',
'group': abon.group.title if abon.group else '',
'street': abon.street.name if abon.street else '',
'house': abon.house or ''
}).strip())
messages.success(request, _('delete abon success msg'))
return redirect('abonapp:people_list', gid=gid)
except (NasNetworkError, OperationalError) as e:
messages.error(self.request, e)
except NasFailedResult as e:
messages.error(self.request, _("NAS says: '%s'") % e)
except lib.MultipleException as errs:
for err in errs.err_list:
messages.error(self.request, err)
return HttpResponseRedirect(self.success_url)
@login_required
@only_admins
@permission_required('abonapp.can_add_ballance')
@transaction.atomic
def abonamount(request, gid: int, uname):
abon = get_object_or_404(models.Abon, username=uname)
frm = None
try:
if request.method == 'POST':
frm = forms.AmountMoneyForm(request.POST)
if frm.is_valid():
amnt = frm.cleaned_data.get('amount')
comment = frm.cleaned_data.get('comment')
if not comment:
comment = _('fill account through admin side')
abon.add_ballance(request.user, amnt, comment=comment)
abon.save(update_fields=('ballance',))
messages.success(
request, _('Account filled successfully on %.2f') % amnt
)
return redirect('abonapp:abon_home', gid=gid, uname=uname)
else:
messages.error(request, _('I not know the account id'))
else:
frm = forms.AmountMoneyForm()
except (NasNetworkError, NasFailedResult) as e:
messages.error(request, e)
except lib.MultipleException as errs:
for err in errs.err_list:
messages.error(request, err)
return render(request, 'abonapp/modal_abonamount.html', {
'abon': abon,
'group_id': gid,
'form': frm
})
class DebtsListView(LoginAdminPermissionMixin, OrderedFilteredList):
permission_required = 'group_app.view_group'
context_object_name = 'invoices'
template_name = 'abonapp/invoiceForPayment.html'
def get_permission_object(self):
if not hasattr(self, 'abon'):
abon = get_object_or_404(models.Abon, username=self.kwargs.get('uname'))
self.abon = abon
return self.abon.group
def get_queryset(self):
if not hasattr(self, 'abon'):
abon = get_object_or_404(models.Abon, username=self.kwargs.get('uname'))
if not hasattr(self, 'abon'):
self.abon = abon
return models.InvoiceForPayment.objects.filter(abon=self.abon)
def get_context_data(self, **kwargs):
context = super(DebtsListView, self).get_context_data(**kwargs)
context['group'] = self.abon.group
context['abon'] = self.abon
return context
class AbonServices(LoginAdminMixin, DetailView):
model = models.Abon
slug_url_kwarg = 'uname'
slug_field = 'username'
template_name = 'abonapp/service.html'
context_object_name = 'abon'
def get_object(self, queryset=None):
gid = self.kwargs.get('gid')
abon = super().get_object(queryset)
if abon.group.pk != gid:
messages.warning(
self.request,
_("User group id is not matches with group in url")
)
return redirect('abonapp:abon_services', abon.group.pk, abon.username)
if not self.request.user.has_perm('group_app.view_group', abon.group):
raise PermissionDenied
return abon
def get_context_data(self, **kwargs):
abon = self.object
periodic_pay = models.PeriodicPayForId.objects.filter(
account=abon
).first()
context = {
'abon_tariff': abon.current_tariff,
'group': abon.group,
'services': Tariff.objects.get_tariffs_by_group(abon.group.pk),
'periodic_pay': periodic_pay
}
context.update(kwargs)
return super().get_context_data(**context)
class AbonHomeUpdateView(LoginAdminMixin, PermissionRequiredMixin, UpdateView):
permission_required = 'abonapp.view_abon'
model = models.Abon
form_class = forms.AbonForm
slug_field = 'username'
slug_url_kwarg = 'uname'
template_name = 'abonapp/editAbon.html'
context_object_name = 'abon'
group = None
def dispatch(self, request, *args, **kwargs):
try:
return super(AbonHomeUpdateView, self).dispatch(
request, *args,
**kwargs
)
except lib.LogicError as e:
messages.error(request, e)
except (NasFailedResult, NasNetworkError) as e:
messages.error(request, e)
except lib.MultipleException as errs:
for err in errs.err_list:
messages.error(request, err)
return self.render_to_response(self.get_context_data())
def get_object(self, queryset=None):
gid = self.kwargs.get('gid')
self.group = get_object_or_404(Group, pk=gid)
if not self.request.user.has_perm('group_app.view_group', self.group):
raise PermissionDenied
return super(AbonHomeUpdateView, self).get_object(queryset)
def form_valid(self, form):
r = super(AbonHomeUpdateView, self).form_valid(form)
abon = self.object
try:
customer_nas_command.delay(abon.pk, 'sync')
except OperationalError as e:
messages.error(self.request, str(e))
else:
messages.success(self.request, _('edit abon success msg'))
return r
def form_invalid(self, form):
messages.warning(self.request, _('fix form errors'))
return super(AbonHomeUpdateView, self).form_invalid(form)
def get(self, request, *args, **kwargs):
r = super(AbonHomeUpdateView, self).get(request, *args, **kwargs)
abon = self.object
if abon.device is None:
messages.warning(request, _('User device was not found'))
return r
def get_initial(self):
abon = self.object
if self.initial:
return self.initial
try:
passw = models.AbonRawPassword.objects.get(
account=abon
).passw_text
return {
'password': passw
}
except models.AbonRawPassword.DoesNotExist:
messages.warning(
self.request,
_('User has not have password, and cannot login')
)
return {'password': ''}
def get_context_data(self, **kwargs):
abon = self.object
device = getattr(abon, 'device')
context = {
'group': self.group,
'device': device,
'dev_ports': DevPort.objects.filter(
device=device) if device else None
}
context.update(kwargs)
return super(AbonHomeUpdateView, self).get_context_data(**context)
@login_required
@only_admins
@permission_required('abonapp.add_invoiceforpayment')
def add_invoice(request, gid: int, uname: str):
abon = get_object_or_404(models.Abon, username=uname)
grp = get_object_or_404(Group, pk=gid)
try:
if request.method == 'POST':
curr_amount = lib.safe_int(request.POST.get('curr_amount'))
comment = request.POST.get('comment')
newinv = models.InvoiceForPayment()
newinv.abon = abon
newinv.amount = curr_amount
newinv.comment = comment
if request.POST.get('status') == 'on':
newinv.status = True
newinv.author = request.user
newinv.save()
messages.success(request, _('Receipt has been created'))
return redirect('abonapp:abon_debts', gid=gid, uname=uname)
except (NasNetworkError, NasFailedResult) as e:
messages.error(request, e)
except lib.MultipleException as errs:
for err in errs.err_list:
messages.error(request, err)
return render(request, 'abonapp/addInvoice.html', {
'abon': abon,
'invcount': models.InvoiceForPayment.objects.filter(abon=abon).count(),
'group': grp
})
@login_required
@only_admins
@permission_required('abonapp.can_buy_tariff')
def pick_tariff(request, gid: int, uname):
grp = get_object_or_404(Group, pk=gid)
abon = get_object_or_404(models.Abon, username=uname)
tariffs = Tariff.objects.get_tariffs_by_group(grp.pk)
try:
if request.method == 'POST':
trf = Tariff.objects.get(pk=request.POST.get('tariff'))
deadline = request.POST.get('deadline')
log_comment = _(
"Service '%(service_name)s' "
"has connected via admin until %(deadline)s") % {
'service_name': trf.title,
'deadline': deadline
}
if deadline:
deadline = datetime.strptime(deadline, '%Y-%m-%dT%H:%M')
abon.pick_tariff(trf, request.user, deadline=deadline, comment=log_comment)
customer_nas_command.delay(abon.pk, 'sync')
messages.success(request, _('Tariff has been picked'))
return redirect('abonapp:abon_services', gid=gid,
uname=abon.username)
except (lib.LogicError, NasFailedResult, NasNetworkError, OperationalError) as e:
messages.error(request, e)
return redirect('abonapp:abon_services', gid=gid, uname=abon.username)
except Tariff.DoesNotExist:
messages.error(request, _('Tariff your picked does not exist'))
except lib.MultipleException as errs:
for err in errs.err_list:
messages.error(request, err)
except ValueError as e:
messages.error(request, "%s: %s" % (_('fix form errors'), e))
selected_tariff = request.GET.get('selected_tariff')
if selected_tariff:
selected_tariff = get_object_or_404(Tariff, pk=selected_tariff)
return render(request, 'abonapp/buy_tariff.html', {
'tariffs': tariffs,
'abon': abon,
'group': grp,
'selected_tariff': selected_tariff
})
@login_required
@only_admins
@permission_required('abonapp.can_complete_service')
def unsubscribe_service(request, gid: int, uname, abon_tariff_id: int):
try:
abon_tariff = get_object_or_404(models.AbonTariff,
pk=int(abon_tariff_id))
abon = abon_tariff.abon
trf = abon_tariff.tariff
customer_nas_remove.delay(
customer_uid=abon.pk, ip_addr=abon.ip_address,
speed=(trf.speedIn, trf.speedOut),
is_access=abon.is_access(), nas_pk=abon.nas_id
)
abon_tariff.delete()
messages.success(request, _('User has been detached from service'))
except NasFailedResult as e:
messages.error(request, e)
except (NasNetworkError, OperationalError) as e:
messages.warning(request, e)
except lib.MultipleException as errs:
for err in errs.err_list:
messages.error(request, err)
return redirect('abonapp:abon_services', gid=gid, uname=uname)
class LogListView(LoginAdminPermissionMixin, ListView):
permission_required = 'abonapp.view_abonlog'
paginate_by = getattr(settings, 'PAGINATION_ITEMS_PER_PAGE', 10)
http_method_names = ('get',)
context_object_name = 'logs'
template_name = 'abonapp/log.html'
model = models.AbonLog
class DebtorsListView(LoginAdminPermissionMixin, ListView):
permission_required = 'abonapp.view_invoiceforpayment'
paginate_by = getattr(settings, 'PAGINATION_ITEMS_PER_PAGE', 10)
http_method_names = ('get',)
context_object_name = 'invoices'
template_name = 'abonapp/debtors.html'
queryset = models.InvoiceForPayment.objects.filter(status=True)
class TaskLogListView(LoginAdminPermissionMixin, ListView):
permission_required = 'group_app.view_group'
paginate_by = getattr(settings, 'PAGINATION_ITEMS_PER_PAGE', 10)
http_method_names = ('get',)
context_object_name = 'tasks'
template_name = 'abonapp/task_log.html'
def get_permission_object(self):
if hasattr(self, 'abon'):
return self.abon.group
else:
return get_object_or_404(models.Group, pk=self.kwargs.get('gid'))
def get_queryset(self):
abon = get_object_or_404(models.Abon,
username=self.kwargs.get('uname'))
self.abon = abon
return Task.objects.filter(abon=abon)
def get_context_data(self, **kwargs):
context = super(TaskLogListView, self).get_context_data(**kwargs)
context['group'] = self.abon.group
context['abon'] = self.abon
return context
class PassportUpdateView(LoginAdminPermissionMixin, UpdateView):
permission_required = 'abonapp.view_passportinfo'
form_class = forms.PassportForm
model = models.PassportInfo
template_name = 'abonapp/modal_passport_view.html'
def get_object(self, queryset=None):
self.abon = get_object_or_404(models.Abon,
username=self.kwargs.get('uname'))
try:
passport_instance = models.PassportInfo.objects.get(
abon=self.abon
)
except models.PassportInfo.DoesNotExist:
passport_instance = None
return passport_instance
def form_valid(self, form):
pi = form.save(commit=False)
pi.abon = self.abon
pi.save()
messages.success(
self.request,
_('Passport information has been saved')
)
return super(PassportUpdateView, self).form_valid(form)
def get_success_url(self):
return resolve_url(
'abonapp:abon_home',
gid=self.kwargs.get('gid'),
uname=self.kwargs.get('uname')
)
def form_invalid(self, form):
messages.error(self.request, _('fix form errors'))
return super(PassportUpdateView, self).form_invalid(form)
def get_context_data(self, **kwargs):
context = {
'group': get_object_or_404(Group, pk=self.kwargs.get('gid')),
'abon': self.abon
}
context.update(kwargs)
return super(PassportUpdateView, self).get_context_data(**context)
class IpUpdateView(LoginAdminPermissionMixin, UpdateView):
permission_required = 'abonapp.change_abon'
form_class = forms.AddIpForm
model = models.Abon
slug_url_kwarg = 'uname'
slug_field = 'username'
template_name = 'abonapp/modal_ip_form.html'
def dispatch(self, request, *args, **kwargs):
try:
return super(IpUpdateView, self).dispatch(request, *args, **kwargs)
except lib.LogicError as e:
messages.error(request, e)
except IntegrityError as e:
str_text = str(e)
if 'abonent_ip_address_nas_id' in str_text and 'duplicate key value' in str_text:
messages.error(request, _('IP address conflict'))
else:
messages.error(request, e)
return self.render_to_response(self.get_context_data(**kwargs))
def form_valid(self, form):
r = super(IpUpdateView, self).form_valid(form)
abon = self.object
try:
customer_nas_command.delay(abon.pk, 'sync')
except OperationalError as e:
messages.error(self.request, str(e))
else:
messages.success(self.request, _('Ip successfully updated'))
return r
def get_context_data(self, **kwargs):
context = super(IpUpdateView, self).get_context_data(**kwargs)
context['group'] = self.object.group
context['abon'] = self.object
return context
@login_required
@only_admins
def chgroup_tariff(request, gid):
grp = get_object_or_404(Group, pk=gid)
if not request.user.has_perm('group_app.change_group', grp):
raise PermissionDenied
if request.method == 'POST':
ma = frozenset(t.id for t in grp.tariff_set.all())
mb = frozenset(map(int, request.POST.getlist('tr')))
sub = ma - mb
add = mb - ma
grp.tariff_set.remove(*sub)
grp.tariff_set.add(*add)
models.Abon.objects.filter(
group=grp,
last_connected_tariff__in=sub
).update(last_connected_tariff=None)
messages.success(request, _('Successfully saved'))
return redirect('abonapp:ch_group_tariff', gid)
tariffs = Tariff.objects.all()
seleted_tariffs_id = tuple(
pk[0] for pk in grp.tariff_set.only('pk').values_list('pk')
)
return render(request, 'abonapp/group_tariffs.html', {
'group': grp,
'seleted_tariffs': seleted_tariffs_id,
'tariffs': tariffs
})
@login_required
@only_admins
@permission_required('abonapp.change_abon')
def dev(request, gid: int, uname):
abon_dev = None
try:
abon = models.Abon.objects.get(username=uname)
if request.method == 'POST':
abon.device = Device.objects.get(pk=request.POST.get('dev'))
abon.save(update_fields=('device',))
messages.success(request, _('Device has successfully attached'))
return redirect('abonapp:abon_home', gid=gid, uname=uname)
else:
abon_dev = abon.device
except Device.DoesNotExist:
messages.warning(
request,
_('Device your selected already does not exist')
)
except models.Abon.DoesNotExist:
messages.error(request, _('Abon does not exist'))
return redirect('abonapp:people_list', gid=gid)
return render(request, 'abonapp/modal_dev.html', {
'devices': Device.objects.filter(group=gid),
'dev': abon_dev,
'gid': gid, 'uname': uname
})
@login_required
@only_admins
@permission_required('abonapp.change_abon')
@permission_required('group_app.view_group', (Group, 'pk', 'gid'))
def clear_dev(request, gid: int, uname):
try:
abon = models.Abon.objects.get(username=uname)
abon.device = None
abon.dev_port = None
abon.is_dynamic_ip = False
abon.save(update_fields=('device', 'dev_port', 'is_dynamic_ip'))
messages.success(request, _('Device has successfully unattached'))
except models.Abon.DoesNotExist:
messages.error(request, _('Abon does not exist'))
return redirect('abonapp:people_list', gid=gid)
return redirect('abonapp:abon_home', gid=gid, uname=uname)
@login_required
@only_admins
@permission_required('abonapp.can_ping')
@json_view
def abon_ping(request, gid: int, uname):
ip = request.GET.get('cmd_param')
status = 1
text = '<span class="glyphicon glyphicon-exclamation-sign"></span> %s' % _('no ping')
abon = get_object_or_404(models.Abon, username=uname)
try:
if ip is None:
raise lib.LogicError(_('Ip not passed'))
if abon.nas is None:
return {
'status': 1,
'dat': '<span class="glyphicon glyphicon-exclamation-sign">'
'</span> %s' % _('gateway required')
}
mngr = abon.nas.get_nas_manager()
ping_result = mngr.ping(ip)
if ping_result is None:
return {
'status': 1,
'dat': text
}
if isinstance(ping_result, tuple):
received, sent = ping_result
if received == 0:
ping_result = mngr.ping(ip, arp=True)
if ping_result is not None and isinstance(ping_result, tuple):
received, sent = ping_result
else:
return {
'status': 1,
'dat': text
}
loses_percent = (
received / sent if sent != 0 else 1
)
ping_result = {'return': received, 'all': sent}
if loses_percent > 1.0:
text = '<span class="glyphicon glyphicon-exclamation-sign"></span> %s' % _(
'IP Conflict! %(return)d/%(all)d results'
) % ping_result
elif loses_percent > 0.5:
text = '<span class="glyphicon glyphicon-ok"></span> %s' % _(
'ok ping, %(return)d/%(all)d loses'
) % ping_result
status = 0
else:
text = '<span class="glyphicon glyphicon-exclamation-sign"></span> %s' % _(
'no ping, %(return)d/%(all)d loses'
) % ping_result
except (NasFailedResult, lib.LogicError) as e:
text = str(e)
except NasNetworkError as e:
text = str(e)
return {
'status': status,
'dat': text
}
@login_required
@only_admins
@json_view
def set_auto_continue_service(request, gid: int, uname):
checked = request.GET.get('checked')
checked = checked == 'true'
abon = get_object_or_404(models.Abon, username=uname)
abon.autoconnect_service = checked
abon.save(update_fields=('autoconnect_service',))
return {
'status': 0
}
@login_required
@only_admins
def vcards(r):
users = models.Abon.objects.exclude(group=None).select_related(
'group',
'street'
).only(
'username', 'fio', 'group__title', 'telephone',
'street__name', 'house'
)
additional_tels = models.AdditionalTelephone.objects.select_related(
'abon',
'abon__group',
'abon__street'
)
response = HttpResponse(content_type='text/x-vcard')
response['Content-Disposition'] = 'attachment; filename="contacts.vcard"'
tmpl = ("BEGIN:VCARD\r\n"
"VERSION:4.0\r\n"
"FN:%(uname)s. %(group_name)s, %(street)s %(house)s\r\n"
"IMPP:sip:%(abon_telephone)[email protected]\r\n"
"END:VCARD\r\n")
def _make_vcard():
for ab in users.iterator():
tel = ab.telephone
if tel:
yield tmpl % {
'uname': ab.get_full_name(),
'group_name': ab.group.title,
'street': ab.street.name if ab.street else '',
'house': ab.house,
'abon_telephone': tel
}
if not additional_tels.exists():
return
for add_tel in additional_tels.iterator():
abon = add_tel.abon
yield tmpl % {
'uname': "%s (%s)" % (
add_tel.owner_name, abon.get_full_name()),
'group_name': abon.group.title,
'abon_telephone': add_tel.telephone,
'street': abon.street.name if abon.street else '',
'house': abon.house
}
response.content = _make_vcard()
return response
@login_required
@only_admins
@permission_required('abonapp.change_abon')
def save_user_dev_port(request, gid: int, uname):
if request.method != 'POST':
messages.error(request, _('Method is not POST'))
return redirect('abonapp:abon_home', gid, uname)
user_port = lib.safe_int(request.POST.get('user_port'))
is_dynamic_ip = request.POST.get('is_dynamic_ip')
is_dynamic_ip = is_dynamic_ip == 'on'
try:
abon = models.Abon.objects.get(username=uname)
if user_port == 0:
port = None
else:
port = DevPort.objects.get(pk=user_port)
if abon.device is not None:
try:
other_abon = models.Abon.objects.get(
device=abon.device,
dev_port=port
)
if other_abon != abon:
user_url = resolve_url(
'abonapp:abon_home',
other_abon.group.id,
other_abon.username
)
messages.error(
request, _("<a href='%(user_url)s'>%(user_name)s</a> already pinned to this port on this device") % {
'user_url': user_url,
'user_name': other_abon.get_full_name()
}
)
return redirect('abonapp:abon_home', gid, uname)
except models.Abon.DoesNotExist:
pass
except models.Abon.MultipleObjectsReturned:
messages.error(request,
_('Multiple users on the same device port'))
return redirect('devapp:view', abon.device.group.pk,
abon.device.pk)
abon.dev_port = port
if abon.is_dynamic_ip != is_dynamic_ip:
abon.is_dynamic_ip = is_dynamic_ip
abon.save(update_fields=('dev_port', 'is_dynamic_ip'))
else:
abon.save(update_fields=('dev_port',))
messages.success(request, _('User port has been saved'))
except DevPort.DoesNotExist:
messages.error(request, _('Selected port does not exist'))
except models.Abon.DoesNotExist:
messages.error(request, _('User does not exist'))
return redirect('abonapp:abon_home', gid, uname)
@login_required
@only_admins
@permission_required('abonapp.add_abonstreet')
@permission_required('group_app.view_group', (Group, 'pk', 'gid'))
def street_add(request, gid):
if request.method == 'POST':
frm = forms.AbonStreetForm(request.POST)
if frm.is_valid():
frm.save()
messages.success(request, _('Street successfully saved'))
return redirect('abonapp:people_list', gid)
else:
messages.error(request, _('fix form errors'))
else:
frm = forms.AbonStreetForm(initial={'group': gid})
return render(request, 'abonapp/modal_addstreet.html', {
'form': frm,
'gid': gid
})
@login_required
@only_admins
@permission_required('abonapp.change_abonstreet')
@permission_required('group_app.view_group', (Group, 'pk', 'gid'))
def street_edit(request, gid):
try:
if request.method == 'POST':
for sid, sname in zip(request.POST.getlist('sid'),
request.POST.getlist('sname')):
street = models.AbonStreet.objects.get(pk=sid)
street.name = sname
street.save()
messages.success(request, _('Streets has been saved'))
else:
return render(request, 'abonapp/modal_editstreet.html', {
'gid': gid,
'streets': models.AbonStreet.objects.filter(group=gid)
})
except models.AbonStreet.DoesNotExist:
messages.error(request, _('One of these streets has not been found'))
return redirect('abonapp:people_list', gid)
@login_required
@only_admins
@permission_required('abonapp.delete_abonstreet')
@permission_required('group_app.view_group', (Group, 'pk', 'gid'))
def street_del(request, gid: int, sid: int):
try:
models.AbonStreet.objects.get(pk=sid, group=gid).delete()
messages.success(request, _('The street successfully deleted'))
except models.AbonStreet.DoesNotExist:
messages.error(request, _('The street has not been found'))
return redirect('abonapp:people_list', gid)
@login_required
@only_admins
@permission_required('group_app.view_group', (Group, 'pk', 'gid'))
def active_nets(request, gid):
nets = NetworkModel.objects.filter(groups__id=gid)
return render(request, 'abonapp/modal_current_networks.html', {
'networks': nets
})
@login_required
@only_admins
@permission_required('abonapp.view_additionaltelephones')
@permission_required('group_app.view_group', (Group, 'pk', 'gid'))
def tels(request, gid: int, uname):
abon = get_object_or_404(models.Abon, username=uname)
telephones = abon.additional_telephones.all()
return render(request, 'abonapp/modal_additional_telephones.html', {
'telephones': telephones,
'gid': gid,
'uname': uname
})
@login_required
@only_admins
@permission_required('abonapp.add_additionaltelephone')
def tel_add(request, gid: int, uname):
if request.method == 'POST':
frm = forms.AdditionalTelephoneForm(request.POST)
if frm.is_valid():
new_tel = frm.save(commit=False)
abon = get_object_or_404(models.Abon, username=uname)
new_tel.abon = abon
new_tel.save()
messages.success(request, _('New telephone has been saved'))
return redirect('abonapp:abon_home', gid, uname)
else:
messages.error(request, _('fix form errors'))
else:
frm = forms.AdditionalTelephoneForm()
return render(request, 'abonapp/modal_add_phone.html', {
'form': frm,
'gid': gid,
'uname': uname
})
@login_required
@only_admins
@permission_required('abonapp.delete_additionaltelephone')
def tel_del(request, gid: int, uname):
try:
tid = lib.safe_int(request.GET.get('tid'))
tel = models.AdditionalTelephone.objects.get(pk=tid)
tel.delete()
messages.success(request,
_('Additional telephone successfully deleted'))
except models.AdditionalTelephone.DoesNotExist:
messages.error(request, _('Telephone not found'))
return redirect('abonapp:abon_home', gid, uname)
@login_required
@only_admins
@permission_required('group_app.view_group', (Group, 'pk', 'gid'))
def phonebook(request, gid):
res_format = request.GET.get('f')
t1 = models.Abon.objects.filter(
group__id=int(gid)
).only('telephone', 'fio').values_list(
'telephone', 'fio'
)
t2 = models.AdditionalTelephone.objects.filter(
abon__group__id=gid
).only(
'telephone', 'owner_name'
).values_list(
'telephone', 'owner_name'
)
telephones = tuple(t1) + tuple(t2)
if res_format == 'csv':
import csv
response = HttpResponse(content_type='text/csv')
response['Content-Disposition'] = 'attachment; filename="phones.csv"'
writer = csv.writer(response, quoting=csv.QUOTE_NONNUMERIC)
for row in telephones:
writer.writerow(row)
return response
return render(request, 'abonapp/modal_phonebook.html', {
'tels': telephones,
'gid': gid
})
@login_required
@only_admins
@permission_required('group_app.view_group', (Group, 'pk', 'gid'))
def abon_export(request, gid):
res_format = request.GET.get('f')
if request.method == 'POST':
frm = forms.ExportUsersForm(request.POST)
if frm.is_valid():
cleaned_data = frm.clean()
fields = cleaned_data.get('fields')
subscribers = models.Abon.objects.filter(group__id=gid).only(
*fields).values_list(*fields)
if res_format == 'csv':
import csv
response = HttpResponse(content_type='text/csv')
response[
'Content-Disposition'
] = 'attachment; filename="users.csv"'
writer = csv.writer(response, quoting=csv.QUOTE_NONNUMERIC)
display_values = (
f[1] for f in frm.fields['fields'].choices if
f[0] in fields
)
writer.writerow(display_values)
for row in subscribers:
writer.writerow(row)
return response
else:
messages.info(
request,
_('Unexpected format %(export_format)s') % {
'export_format': res_format
}
)
return redirect('abonapp:group_list')
else:
messages.error(request, _('fix form errors'))
return redirect('abonapp:group_list')
else:
frm = forms.ExportUsersForm()
return render(request, 'abonapp/modal_export.html', {
'gid': gid,
'form': frm
})
@login_required
@only_admins
@permission_required('group_app.view_group', (Group, 'pk', 'gid'))
def add_edit_periodic_pay(request, gid: int, uname, periodic_pay_id=0):
if periodic_pay_id == 0:
if not request.user.has_perm('abonapp.add_periodicpayforid'):
raise PermissionDenied
periodic_pay_instance = models.PeriodicPayForId()
else:
if not request.user.has_perm('abonapp.change_periodicpayforid'):
raise PermissionDenied
periodic_pay_instance = get_object_or_404(
models.PeriodicPayForId,
pk=periodic_pay_id
)
if request.method == 'POST':
frm = forms.PeriodicPayForIdForm(
request.POST,
instance=periodic_pay_instance
)
if frm.is_valid():
abon = get_object_or_404(models.Abon, username=uname)
inst = frm.save(commit=False)
inst.account = abon
inst.save()
messages.success(request, _('Periodic pays has been designated'))
else:
messages.error(request, _('Something wrong in form'))
return redirect('abonapp:abon_services', gid, uname)
else:
frm = forms.PeriodicPayForIdForm(instance=periodic_pay_instance)
return render(request, 'abonapp/modal_periodic_pay.html', {
'form': frm,
'gid': gid,
'uname': uname
})
@login_required
@only_admins
@permission_required('group_app.view_group', (Group, 'pk', 'gid'))
@permission_required('abonapp.delete_periodicpayforid')
def del_periodic_pay(request, gid: int, uname, periodic_pay_id):
periodic_pay_instance = get_object_or_404(
models.PeriodicPayForId,
pk=periodic_pay_id
)
if periodic_pay_instance.account.username != uname:
uname = periodic_pay_instance.account.username
periodic_pay_instance.delete()
messages.success(request, _('Periodic pay successfully deleted'))
return redirect('abonapp:abon_services', gid, uname)
class EditSibscriberMarkers(LoginAdminPermissionMixin, UpdateView):
permission_required = 'abonapp.change_abon'
http_method_names = ('get', 'post')
template_name = 'abonapp/modal_user_markers.html'
form_class = forms.MarkersForm
model = models.Abon
slug_url_kwarg = 'uname'
slug_field = 'username'
def dispatch(self, request, *args, **kwargs):
try:
return super(EditSibscriberMarkers, self).dispatch(
request, *args, **kwargs
)
except ValidationError as e:
messages.error(request, e)
return self.render_to_response(self.get_context_data())
def get_context_data(self, **kwargs):
context = super(EditSibscriberMarkers, self).get_context_data(**kwargs)
context['gid'] = self.kwargs.get('gid')
context['uname'] = self.kwargs.get('uname')
return context
def form_invalid(self, form):
messages.error(self.request, _('fix form errors'))
return super(EditSibscriberMarkers, self).form_invalid(form)
def form_valid(self, form):
v = super(EditSibscriberMarkers, self).form_valid(form)
messages.success(
self.request,
_('User flags has changed successfully')
)
return v
class UserSessionFree(LoginRequiredMixin, OnlyAdminsMixin, PermissionRequiredMixin, UpdateView):
permission_required = 'abonapp.change_abon'
model = models.Abon
slug_url_kwarg = 'uname'
slug_field = 'username'
fields = 'ip_address',
template_name = 'abonapp/modal_confirm_ip_free.html'
def get(self, request, *args, **kwargs):
r = super().get(request, *args, **kwargs)
abon = self.object
if abon.nas is None:
messages.error(self.request, _('gateway required'))
return redirect(
'abonapp:abon_home',
gid=self.kwargs.get('gid'),
uname=self.kwargs.get('uname')
)
return r
def post(self, request, *args, **kwargs):
self.object = self.get_object()
abon = self.object
try:
if abon.ip_address:
abon.free_ip_addr()
customer_nas_command.delay(abon.pk, 'remove')
messages.success(request, _('Ip lease has been freed'))
else:
messages.error(request, _('User not have ip'))
except OperationalError as e:
messages.error(request, e)
return redirect(
'abonapp:abon_home',
gid=self.kwargs.get('gid'),
uname=self.kwargs.get('uname')
)
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context.update({
'gid': self.kwargs.get('gid'),
})
return context
@login_required
@only_admins
@permission_required('abonapp.change_abon')
def attach_nas(request, gid):
if request.method == 'POST':
gateway_id = lib.safe_int(request.POST.get('gateway'))
if gateway_id:
nas = get_object_or_404(NASModel, pk=gateway_id)
customers = models.Abon.objects.filter(group__id=gid)
if customers.exists():
customers.update(nas=nas)
messages.success(
request,
_('Network access server for users in this '
'group, has been updated')
)
return redirect('abonapp:group_list')
else:
messages.warning(request, _('Users not found'))
else:
messages.error(request, _('You must select gateway'))
return render(request, 'abonapp/modal_attach_nas.html', {
'gid': gid,
'nas_list': NASModel.objects.all().iterator()
})
# API's
@login_required
@only_admins
@json_view
def abons(request):
ablist = ({
'id': abn.pk,
'tarif_id': abn.active_tariff().tariff.pk
if abn.active_tariff() is not None else 0,
'ip': abn.ip_address
} for abn in models.Abon.objects.iterator())
tarlist = ({
'id': trf.pk,
'speedIn': trf.speedIn,
'speedOut': trf.speedOut
} for trf in Tariff.objects.all())
data = {
'subscribers': ablist,
'tariffs': tarlist
}
del ablist, tarlist
return data
@login_required
@only_admins
@json_view
def search_abon(request):
word = request.GET.get('s')
if not word:
return None
results = models.Abon.objects.filter(fio__icontains=word)[:8]
return list(
{'id': usr.pk, 'text': "%s: %s" % (usr.username, usr.fio)}
for usr in results
)
class DhcpLever(SecureApiView):
#
# Api view for dhcp event
#
http_method_names = ('get',)
@method_decorator(json_view)
def get(self, request, *args, **kwargs):
data = request.GET.copy()
try:
r = self.on_dhcp_event(data)
if r is not None:
return {'text': r}
return {'status': 'ok'}
except IntegrityError as e:
return {'status': str(e).replace('\n', ' ')}
@staticmethod
def on_dhcp_event(data: Dict) -> Optional[str]:
"""
:param data = {
'client_ip': ip_address('127.0.0.1'),
'client_mac': 'aa:bb:cc:dd:ee:ff',
'switch_mac': 'aa:bb:cc:dd:ee:ff',
'switch_port': 3,
'cmd': 'commit'
}"""
try:
action = data.get('cmd')
if action is None:
return '"cmd" parameter is missing'
client_ip = data.get('client_ip')
if client_ip is None:
return '"client_ip" parameter is missing'
if action == 'commit':
return dhcp_commit(
client_ip, data.get('client_mac'),
data.get('switch_mac'), data.get('switch_port')
)
elif action == 'expiry':
return dhcp_expiry(client_ip)
elif action == 'release':
return dhcp_release(client_ip)
else:
return '"cmd" parameter is invalid: %s' % action
except lib.LogicError as e:
print('LogicError', e)
return str(e)
except lib.DuplicateEntry as e:
print('Duplicate:', e)
return str(e)
class PayHistoryListView(LoginAdminPermissionMixin, OrderedFilteredList):
permission_required = 'group_app.view_group'
context_object_name = 'pay_history'
template_name = 'abonapp/payHistory.html'
def get_permission_object(self):
if hasattr(self, 'abon'):
return self.abon.group
return Group.objects.filter(pk=self.kwargs.get('gid')).first()
def get_queryset(self):
abon = get_object_or_404(models.Abon,
username=self.kwargs.get('uname'))
self.abon = abon
pay_history = models.AbonLog.objects.filter(abon=abon).order_by('-date')
return pay_history
def get_context_data(self, **kwargs):
context = {
'group': self.abon.group,
'abon': self.abon
}
context.update(kwargs)
return super(PayHistoryListView, self).get_context_data(**context)
|
|
# *-* coding: utf-8 *-*
# This file is part of butterfly
#
# butterfly Copyright(C) 2015-2017 Florian Mounier
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import json
import os
import struct
import sys
import time
from collections import defaultdict
from mimetypes import guess_type
from uuid import uuid4
import tornado.escape
import tornado.options
import tornado.process
import tornado.web
import tornado.websocket
from butterfly import Route, url, utils
from butterfly.terminal import Terminal
def u(s):
if sys.version_info[0] == 2:
return s.decode('utf-8')
return s
@url(r'/(?:session/(?P<session>[^/]+)/?)?')
class Index(Route):
def get(self, session):
user = self.request.query_arguments.get(
'user', [b''])[0].decode('utf-8')
if not tornado.options.options.unsecure and user:
raise tornado.web.HTTPError(400)
return self.render(
'index.html', session=session or str(uuid4()))
@url(r'/theme/([^/]+)/style.css')
class Theme(Route):
def get(self, theme):
self.log.info('Getting style')
try:
import sass
sass.CompileError
except Exception:
self.log.error(
'You must install libsass to use sass '
'(pip install libsass)')
return
base_dir = self.get_theme_dir(theme)
style = None
for ext in ['css', 'scss', 'sass']:
probable_style = os.path.join(base_dir, 'style.%s' % ext)
if os.path.exists(probable_style):
style = probable_style
if not style:
raise tornado.web.HTTPError(404)
sass_path = os.path.join(
os.path.dirname(__file__), 'sass')
css = None
try:
css = sass.compile(filename=style, include_paths=[
base_dir, sass_path])
except sass.CompileError:
self.log.error(
'Unable to compile style (filename: %s, paths: %r) ' % (
style, [base_dir, sass_path]), exc_info=True)
if not style:
raise tornado.web.HTTPError(500)
self.log.debug('Style ok')
self.set_header("Content-Type", "text/css")
self.write(css)
self.finish()
@url(r'/theme/([^/]+)/(.+)')
class ThemeStatic(Route):
def get(self, theme, name):
if '..' in name:
raise tornado.web.HTTPError(403)
base_dir = self.get_theme_dir(theme)
fn = os.path.normpath(os.path.join(base_dir, name))
if not fn.startswith(base_dir):
raise tornado.web.HTTPError(403)
if os.path.exists(fn):
type = guess_type(fn)[0]
if type is None:
# Fallback if there's no mimetypes on the system
type = {
'png': 'image/png',
'jpg': 'image/jpeg',
'jpeg': 'image/jpeg',
'gif': 'image/gif',
'woff': 'application/font-woff',
'ttf': 'application/x-font-ttf'
}.get(fn.split('.')[-1], 'text/plain')
self.set_header("Content-Type", type)
with open(fn, 'rb') as s:
while True:
data = s.read(16384)
if data:
self.write(data)
else:
break
self.finish()
raise tornado.web.HTTPError(404)
class KeptAliveWebSocketHandler(tornado.websocket.WebSocketHandler):
keepalive_timer = None
def open(self, *args, **kwargs):
self.keepalive_timer = tornado.ioloop.PeriodicCallback(
self.send_ping, tornado.options.options.keepalive_interval * 1000)
self.keepalive_timer.start()
def send_ping(self):
t = int(time.time())
frame = struct.pack('<I', t) # A ping frame based on time
self.log.info("Sending ping frame %s" % t)
try:
self.ping(frame)
except tornado.websocket.WebSocketClosedError:
self.keepalive_timer.stop()
def on_close(self):
if self.keepalive_timer is not None:
self.keepalive_timer.stop()
@url(r'/ctl/session/(?P<session>[^/]+)')
class TermCtlWebSocket(Route, KeptAliveWebSocketHandler):
sessions = defaultdict(list)
sessions_secure_users = {}
def open(self, session):
super(TermCtlWebSocket, self).open(session)
self.session = session
self.closed = False
self.log.info('Websocket /ctl opened %r' % self)
def create_terminal(self):
socket = utils.Socket(self.ws_connection.stream.socket)
user = self.request.query_arguments.get(
'user', [b''])[0].decode('utf-8')
path = self.request.query_arguments.get(
'path', [b''])[0].decode('utf-8')
secure_user = None
if not tornado.options.options.unsecure:
user = utils.parse_cert(
self.ws_connection.stream.socket.getpeercert())
assert user, 'No user in certificate'
try:
user = utils.User(name=user)
except LookupError:
raise Exception('Invalid user in certificate')
# Certificate authed user
secure_user = user
elif socket.local and socket.user == utils.User() and not user:
# Local to local returning browser user
secure_user = socket.user
elif user:
try:
user = utils.User(name=user)
except LookupError:
raise Exception('Invalid user')
if secure_user:
user = secure_user
if self.session in self.sessions and self.session in (
self.sessions_secure_users):
if user.name != self.sessions_secure_users[self.session]:
# Restrict to authorized users
raise tornado.web.HTTPError(403)
else:
self.sessions_secure_users[self.session] = user.name
self.sessions[self.session].append(self)
terminal = Terminal.sessions.get(self.session)
# Handling terminal session
if terminal:
TermWebSocket.last.write_message(terminal.history)
# And returning, we don't want another terminal
return
# New session, opening terminal
terminal = Terminal(
user, path, self.session, socket,
self.request.full_url().replace('/ctl/', '/'), self.render_string,
TermWebSocket.broadcast)
terminal.pty()
self.log.info('Openning session %s for secure user %r' % (
self.session, user))
@classmethod
def broadcast(cls, session, message, emitter=None):
for wsocket in cls.sessions[session]:
try:
if wsocket != emitter:
wsocket.write_message(message)
except Exception:
wsocket.log.exception('Error on broadcast')
wsocket.close()
def on_message(self, message):
cmd = json.loads(message)
if cmd['cmd'] == 'open':
self.create_terminal()
else:
try:
Terminal.sessions[self.session].ctl(cmd)
except Exception:
# FF strange bug
pass
self.broadcast(self.session, message, self)
def on_close(self):
super(TermCtlWebSocket, self).on_close()
if self.closed:
return
self.closed = True
self.log.info('Websocket /ctl closed %r' % self)
if self in self.sessions[self.session]:
self.sessions[self.session].remove(self)
if tornado.options.options.one_shot or (
getattr(self.application, 'systemd', False) and
not sum([
len(wsockets)
for session, wsockets in self.sessions.items()])):
sys.exit(0)
@url(r'/ws/session/(?P<session>[^/]+)')
class TermWebSocket(Route, KeptAliveWebSocketHandler):
# List of websockets per session
sessions = defaultdict(list)
# Last is kept for session shared history send
last = None
# Session history
history = {}
def open(self, session):
super(TermWebSocket, self).open(session)
self.set_nodelay(True)
self.session = session
self.closed = False
self.sessions[session].append(self)
self.__class__.last = self
self.log.info('Websocket /ws opened %r' % self)
@classmethod
def close_session(cls, session):
wsockets = (cls.sessions.get(session, []) +
TermCtlWebSocket.sessions.get(session, []))
for wsocket in wsockets:
wsocket.on_close()
wsocket.close()
if session in cls.sessions:
del cls.sessions[session]
if session in TermCtlWebSocket.sessions_secure_users:
del TermCtlWebSocket.sessions_secure_users[session]
if session in TermCtlWebSocket.sessions:
del TermCtlWebSocket.sessions[session]
@classmethod
def broadcast(cls, session, message, emitter=None):
if message is None:
cls.close_session(session)
return
wsockets = cls.sessions.get(session)
for wsocket in wsockets:
try:
if wsocket != emitter:
wsocket.write_message(message)
except Exception:
wsocket.log.exception('Error on broadcast')
wsocket.close()
def on_message(self, message):
Terminal.sessions[self.session].write(message)
def on_close(self):
super(TermWebSocket, self).on_close()
if self.closed:
return
self.closed = True
self.log.info('Websocket /ws closed %r' % self)
self.sessions[self.session].remove(self)
@url(r'/sessions/list.json')
class SessionsList(Route):
"""Get the theme list"""
def get(self):
if tornado.options.options.unsecure:
raise tornado.web.HTTPError(403)
cert = self.request.get_ssl_certificate()
user = utils.parse_cert(cert)
if not user:
raise tornado.web.HTTPError(403)
self.set_header('Content-Type', 'application/json')
self.write(tornado.escape.json_encode({
'sessions': sorted(
TermWebSocket.sessions),
'user': user
}))
@url(r'/themes/list.json')
class ThemesList(Route):
"""Get the theme list"""
def get(self):
if os.path.exists(self.themes_dir):
themes = [
theme
for theme in os.listdir(self.themes_dir)
if os.path.isdir(os.path.join(self.themes_dir, theme)) and
not theme.startswith('.')]
else:
themes = []
if os.path.exists(self.builtin_themes_dir):
builtin_themes = [
'built-in-%s' % theme
for theme in os.listdir(self.builtin_themes_dir)
if os.path.isdir(os.path.join(
self.builtin_themes_dir, theme)) and
not theme.startswith('.')]
else:
builtin_themes = []
self.set_header('Content-Type', 'application/json')
self.write(tornado.escape.json_encode({
'themes': sorted(themes),
'builtin_themes': sorted(builtin_themes),
'dir': self.themes_dir
}))
@url('/local.js')
class LocalJsStatic(Route):
def get(self):
self.set_header("Content-Type", 'application/javascript')
if os.path.exists(self.local_js_dir):
for fn in os.listdir(self.local_js_dir):
if not fn.endswith('.js'):
continue
with open(os.path.join(self.local_js_dir, fn), 'rb') as s:
while True:
data = s.read(16384)
if data:
self.write(data)
else:
self.write(';')
break
self.finish()
|
|
# Copyright (c) 2014, Fundacion Dr. Manuel Sadosky
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import bisect
import itertools
import networkx
from Queue import Queue
from pydot import Dot
from pydot import Edge
from pydot import Node
from barf.core.reil import DualInstruction
from barf.core.reil import ReilMnemonic
from barf.core.reil import ReilImmediateOperand
# CFG recovery mode
BARF_DISASM_LINEAR = 0 # linear sweep
BARF_DISASM_RECURSIVE = 1 # recursive descent
BARF_DISASM_MIXED = 2 # linear sweep + recursive descent
verbose = False
class BasicBlock(object):
"""Basic block representation.
"""
def __init__(self):
# List of instruction within the basic block. Each instruction
# is a 'dual' instruction, e.i. it pairs an assembler
# instruction with its REIL translation.
self._instrs = []
# Start address of the basic block.
self._address = None
# Taken branch address. If a basic block ends in a conditional
# instruction, this field has the address of the taken branch
# (condition equals True)
self._taken_branch = None
# Similar to taken branch but it holds the target address of
# the jump when the condition is false.
self._not_taken_branch = None
# If a basic block ends in a direct jump or in an instruction
# different from a conditional jump, this fields holds the
# address of the jump or next instruction.
self._direct_branch = None
@property
def instrs(self):
"""Get basic block instructions.
"""
return self._instrs
@property
def address(self):
"""Get basic block start address.
"""
if self._instrs == []:
return None
return self._instrs[0].address
@property
def start_address(self):
"""Get basic block start address.
"""
if self._instrs is []:
return None
return self._instrs[0].address
@property
def end_address(self):
"""Get basic block end address.
"""
if self._instrs is []:
return None
return self._instrs[-1].address + self._instrs[-1].asm_instr.size - 1
@property
def size(self):
"""Get basic block size.
"""
if self._instrs is []:
return None
return sum([dinstr.asm_instr.size for dinstr in self._instrs])
@property
def taken_branch(self):
"""Get basic block taken branch.
"""
return self._taken_branch
@taken_branch.setter
def taken_branch(self, value):
"""Set basic block taken branch.
"""
self._taken_branch = value
@property
def not_taken_branch(self):
"""Get basic block not taken branch.
"""
return self._not_taken_branch
@not_taken_branch.setter
def not_taken_branch(self, value):
"""Set basic block not taken branch.
"""
self._not_taken_branch = value
@property
def direct_branch(self):
"""Get basic block direct branch.
"""
return self._direct_branch
@direct_branch.setter
def direct_branch(self, value):
"""Set basic block direct branch.
"""
self._direct_branch = value
@property
def branches(self):
"""Get basic block branches.
"""
branches = []
if self._taken_branch:
branches += [(self._taken_branch, 'taken')]
if self._not_taken_branch:
branches += [(self._not_taken_branch, 'not-taken')]
if self._direct_branch:
branches += [(self._direct_branch, 'direct')]
return branches
def contains(self, address):
"""Check if an address is within the range of a basic block.
"""
return address >= self.address and address <= self.end_address
def empty(self):
"""Check if a basic block is empty.
"""
return len(self._instrs) == 0
def __str__(self):
lines = ["Basic Block @ 0x%08x" % (self.address if self.address else 0)]
for instr in self._instrs:
lines += [" %s ; %s" % (str(instr.ir_instrs[0]).ljust(25), str(instr.asm_instr))]
for ir_instr in instr.ir_instrs[1:]:
lines += [" %s" % str(ir_instr)]
return "\n".join(lines)
def __eq__(self, other):
# Assumes that you are comparing basic block from the same binary
return self.address == other.address and self.end_address == other.end_address
def __ne__(self, other):
return not self.__eq__(other)
class BasicBlockGraph(object):
"""Basic block graph representation.
"""
def __init__(self, basic_blocks):
# List of basic blocks.
self._basic_blocks = basic_blocks
# Basic block accessed by address
self._bb_by_addr = dict([(bb.address, bb) for bb in basic_blocks])
# Basic block graph
self._graph = self._build_graph(basic_blocks)
def all_simple_bb_paths(self, start_address, end_address):
"""Return a list of path between start and end address.
"""
bb_start = self._find_basic_block(start_address)
bb_end = self._find_basic_block(end_address)
paths = networkx.all_simple_paths(self._graph, \
source=bb_start.address, target=bb_end.address)
return (map(lambda addr : self._bb_by_addr[addr], path) for path in paths)
def save(self, filename, print_ir=False, format='dot'):
"""Save basic block graph into a file.
"""
node_format = {
'shape' : 'Mrecord',
'rankdir' : 'LR',
'fontname' : 'monospace',
'fontsize' : '9.0'
}
edge_format = {
'fontname' : 'monospace',
'fontsize' : '8.0'
}
edge_colors = {
'taken' : 'green',
'not-taken' : 'red',
'direct' : 'blue'
}
try:
# for each conneted component
for idx, gr in enumerate(networkx.connected_component_subgraphs(self._graph.to_undirected())):
graph = Dot(graph_type="digraph", rankdir="TB")
# add nodes
nodes = {}
for bb_addr in gr.node.keys():
dump = self._dump_bb(self._bb_by_addr[bb_addr], print_ir)
# html-encode colon character
dump = dump.replace("!", "!")
dump = dump.replace("#", "#")
dump = dump.replace(":", ":")
dump = dump.replace("{", "{")
dump = dump.replace("}", "}")
label = "{<f0> 0x%08x | %s}" % (bb_addr, dump)
nodes[bb_addr] = Node(bb_addr, label=label, **node_format)
graph.add_node(nodes[bb_addr])
# add edges
for bb_src_addr in gr.node.keys():
for bb_dst_addr, branch_type in self._bb_by_addr[bb_src_addr].branches:
graph.add_edge(Edge(nodes[bb_src_addr],
nodes[bb_dst_addr], label=branch_type, \
color=edge_colors[branch_type], **edge_format))
graph.write("%s_%03d.%s" % (filename, idx, format), format=format)
except Exception as err:
import traceback
import sys
print("[E] Error loading BARF (%s:%d) : '%s'" %
(__name__, sys.exc_traceback.tb_lineno, str(err)))
print("")
print(traceback.format_exc())
# Auxiliary functions
# ======================================================================== #
def _build_graph(self, basic_blocks):
graph = networkx.DiGraph()
# add nodes
for bb_addr in self._bb_by_addr.keys():
graph.add_node(bb_addr, address=bb_addr)
# add edges
for bb_src_addr in self._bb_by_addr.keys():
for bb_dst_addr, branch_type in self._bb_by_addr[bb_src_addr].branches:
graph.add_edge(bb_src_addr, bb_dst_addr, branch_type=branch_type)
return graph
def _find_basic_block(self, address):
bb_rv = None
for bb in self._basic_blocks:
if address >= bb.address and address <= bb.end_address:
bb_rv = bb
break
return bb_rv
def _dump_bb(self, basic_block, print_ir=False):
lines = []
base_addr = basic_block.instrs[0].address
for instr in basic_block.instrs:
lines += ["0x%08x (%2d) " % (instr.address, instr.asm_instr.size) + str(instr.asm_instr) + "\\l"]
# lines += ["+%02x " % (instr.address - base_addr) + str(instr.asm_instr) + "\\l"]
# lines += [str(instr.asm_instr) + "\\l"]
if print_ir:
for ir_instr in instr.ir_instrs:
lines += [" " + str(ir_instr) + "\\l"]
return "".join(lines)
@property
def basic_blocks(self):
return self._basic_blocks
class BasicBlockBuilder(object):
"""Basic block builder.
"""
def __init__(self, disassembler, memory, translator):
# An instance of a disassembler.
self._disasm = disassembler
# And instance of a REIL translator.
self._ir_trans = translator
# Maximun number of bytes that gets from memory to disassemble.
self._lookahead_max = 16
# Memory of the program being analyze.
self._mem = memory
def build(self, start_address, end_address):
"""Return the list of basic blocks.
Linear Sweep Disassembly.
@param start_address: Address of the first byte to start disassembling
basic blocks.
@param end_address: Address of the last byte (inclusive) to finish
disassembling basic blocks.
"""
if verbose:
print("[+] Recovering Basic Blocks :")
if verbose:
print(" Finding candidate BBs...")
bbs = self._find_candidate_bbs(start_address, end_address)
if verbose:
print(" %d" % len(bbs))
# print " Number of instrs..."
# asm_count = 0
# ir_count = 0
# for bb in bbs:
# asm_count += len(bb.instrs)
# ir_count += sum(map(lambda i : len(i.ir_instrs), bb.instrs))
# print " asm : %d" % asm_count
# print " ir : %d" % ir_count
if verbose:
print(" Refining BBs...")
bbs = self._refine_bbs(bbs)
if verbose:
print(" %d" % len(bbs))
# print " Checking gaps..."
# for curr, next in zip(bbs[:-1], bbs[1:]):
# if curr.address + curr.size != next.address:
# print "gap found @ %s" % hex(curr.address + curr.size)
if verbose:
print(" Stripping BBs...")
bbs = self._strip_bbs(bbs)
if verbose:
print(" %d" % len(bbs))
if verbose:
print(" Updating branches...")
self._update_branches(bbs)
if verbose:
print(" %d" % len(bbs))
return bbs
def _find_candidate_bbs(self, start_address, end_address, mode=BARF_DISASM_MIXED):
bbs = []
addrs_to_process = Queue()
addrs_processed = set()
addrs_to_process.put(start_address)
while not addrs_to_process.empty():
curr_addr = addrs_to_process.get()
# there no standard way to check if an item is in the queue
# before pushing it in. So, it is necesary to check if the pop
# address have already been processed.
if curr_addr in addrs_processed:
continue
# print "curr_addr : ", hex(curr_addr)
bb = self._disassemble_bb(curr_addr, end_address + 0x1)
if bb.empty():
# print " empty bb"
continue
# print " valid bb"
# add bb to the list
bbs += [bb]
addrs_processed.add(curr_addr)
# linear sweep mode: add next addr to process queue
if mode in [BARF_DISASM_LINEAR, BARF_DISASM_MIXED]:
next_addr = bb.address + bb.size
# print "next_addr : ", hex(next_addr)
if next_addr < end_address and not next_addr in addrs_processed:
addrs_to_process.put(next_addr)
# recursive descent mode: add branches to process queue
if mode in [BARF_DISASM_RECURSIVE, BARF_DISASM_MIXED]:
for addr, branch_type in bb.branches:
if not addr in addrs_processed:
addrs_to_process.put(addr)
return bbs
def _refine_bbs(self, bbs):
bbs.sort(key=lambda x : x.address)
bbs_addrs = map(lambda x : x.address, bbs)
bbs_new = []
for idx, bb1 in enumerate(bbs):
# sys.stdout.write("\r Processing : %d/%d" % (idx, len(bbs)))
# sys.stdout.flush()
bb_divided = False
lower = bisect.bisect_left(bbs_addrs, bb1.start_address)
upper = bisect.bisect_right(bbs_addrs, bb1.end_address)
for bb2 in bbs[lower:upper]:
if bb1.contains(bb2.address) and bb1 != bb2:
# print "split!!", hex(bb2.address)
bba = self._divide_bb(bb1, bb2.address)
if len(bba.instrs) > 0 and bba not in bbs_new:
bbs_new += [bba]
bb_divided = True
break
if not bb_divided:
if bb1 not in bbs_new:
bbs_new += [bb1]
return bbs_new
def _strip_bbs(self, bbs):
return [bb for bb in map(self._strip_bb, bbs) if len(bb.instrs) > 0]
def _update_branches(self, bbs):
bb_addrs = [bb.address for bb in bbs]
for bb in bbs:
if not bb.taken_branch in bb_addrs:
bb.taken_branch = None
if not bb.not_taken_branch in bb_addrs:
bb.not_taken_branch = None
if not bb.direct_branch in bb_addrs:
bb.direct_branch = None
def _strip_bb(self, bb):
# top
while len(bb.instrs) > 0:
if bb.instrs[0].ir_instrs[0].mnemonic == ReilMnemonic.NOP:
del bb.instrs[0]
else:
break
# bottom
while len(bb.instrs) > 0:
if bb.instrs[-1].ir_instrs[0].mnemonic == ReilMnemonic.NOP:
del bb.instrs[-1]
else:
break
return bb
def _divide_bb(self, bb, address):
bb_new = BasicBlock()
for dinstr in bb.instrs:
if dinstr.address == address:
break
bb_new.instrs.append(dinstr)
bb_new.direct_branch = address
return bb_new
def _disassemble_bb(self, start_address, end_address):
bb_current = BasicBlock()
if start_address > end_address:
return bb_current
addr = start_address
taken = None
not_taken = None
direct = None
while addr < end_address:
start, end = addr, min(addr + self._lookahead_max, end_address)
try:
data_chunk = self._mem[start:end]
except:
# TODO: Log error.
break
asm = self._disasm.disassemble(data_chunk, addr)
if not asm:
break
ir = self._ir_trans.translate(asm)
bb_current.instrs.append(DualInstruction(addr, asm, ir))
# if there is an 'end' instruction process it accordingly
if ir[-1].mnemonic == ReilMnemonic.RET:
break
# TODO: Manage 'call' instruction properly (without
# resorting to 'asm.mnemonic == "call"').
if ir[-1].mnemonic == ReilMnemonic.JCC and not asm.mnemonic == "call":
taken, not_taken, direct = self._extract_branches(addr, asm, asm.size, ir)
break
# if ir[-1].mnemonic == ReilMnemonic.JCC and asm.mnemonic == "call":
# direct_branch = addr + asm.size
# break
# update instruction pointer and iterate
addr += asm.size
bb_current.taken_branch = taken
bb_current.not_taken_branch = not_taken
bb_current.direct_branch = direct
# print "bb addr : ", hex(bb_current.address), " bb end addr : ", hex(bb_current.end_address)
# print " taken :", hex(taken) if taken else ""
# print " not_taken :", hex(not_taken) if not_taken else ""
# print " direct :", hex(direct) if direct else ""
return bb_current
def _resolve_branch_address(self, jmp_instr, instrs):
dst = jmp_instr.operands[2]
if isinstance(dst, ReilImmediateOperand):
# branch address is an immediate
# Transform Reil address back to source arch address
return dst.immediate >> 8
else:
# try to resolve branch address
for instr in instrs[::-1]:
if instr.mnemonic == ReilMnemonic.STR and \
isinstance(instr.operands[0], ReilImmediateOperand) and \
instr.dst == dst:
# Transform Reil address back to source arch address
return instr.operands[0].immediate >> 8
def _extract_branches(self, addr, asm, size, ir):
taken_branch = None
not_taken_branch = None
direct_branch = None
instr_last = ir[-1]
if instr_last.mnemonic == ReilMnemonic.JCC:
cond = instr_last.operands[0]
dst = instr_last.operands[2]
branch_addr = self._resolve_branch_address(instr_last, ir)
# set branch address according to its type
if isinstance(cond, ReilImmediateOperand):
if cond.immediate == 0x0:
taken_branch = addr + size
not_taken_branch = branch_addr
if cond.immediate == 0x1 and asm.mnemonic == 'call':
direct_branch = addr + size
if cond.immediate == 0x1 and asm.mnemonic != 'call':
direct_branch = branch_addr
else:
taken_branch = branch_addr
not_taken_branch = addr + size
return taken_branch, not_taken_branch, direct_branch
|
|
# Create your views here.
# import importlib
import sys
import urllib2
import os
import mimetypes
from django.utils import simplejson
from django.conf import settings
from django.contrib.contenttypes.models import ContentType
from django.utils.translation import ugettext_lazy as _
from django.shortcuts import render_to_response
from django.template import RequestContext, loader, TemplateDoesNotExist
from django.http import HttpResponseRedirect, HttpResponseForbidden
from django.http import HttpResponse
from django.views.generic.list_detail import object_list
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from django.core import mail
from django.contrib.auth.decorators import login_required
from django.forms.formsets import formset_factory
from django.forms.models import modelformset_factory
from django.template.loader import render_to_string
from zorna import defines
from zorna.acl.models import ACLPermission, get_acl_users_by_object
from zorna.site.email import ZornaEmail
from zorna.communities.models import EventCommunity, PollCommunity, PollCommunityChoice, UrlCommunity, PageCommunity
from zorna.communities.forms import CommunityAdminAddForm, MessageCommunityForm, EventCommunityForm, \
InviteCommunityForm, PollCommunityForm, PollCommunityChoiceForm, UrlCommunityForm, \
PageCommunityForm, EMBEDLY_RE
from zorna.utilit import get_upload_communities
from zorna.forms.forms import comforms_community, FormsForm
from zorna.communities.api import *
from zorna.forms.api import forms_get_entries
NB_MESSAGES_BY_PAGE = 20
NB_COMMUNITIES_BY_PAGE = 15
def CommunityPopupHttpResponseError(request, error):
context = RequestContext(request)
return render_to_response('communities/popup_error.html', {'title': _(u'Error'), 'error': error}, context_instance=context)
def get_community_addons_instance(request, community_id=0, all=True):
return CommunityAddons(request, community_id, all)
class zornapoll_community(ZornaCommunityAddons):
def get_title(self, id):
return _(u'Poll')
def get_content_type(self, id):
ct = ContentType.objects.get_for_model(PollCommunity)
return ct
def get_content_types(self):
ct = ContentType.objects.get_for_model(PollCommunity)
return [ct]
def get_content_type_id(self, id):
ct = ContentType.objects.get_by_natural_key(
'communities', 'pollcommunity')
return ct.pk
def get_css_file(self, id):
# return '/skins/%s/css/event.css' % settings.ZORNA_SKIN
return None
def get_message(self, id):
return None
def get_tabs(self, request, community_id=0):
ao = get_allowed_objects(request.user, Community, 'manage')
if len(ao):
return ['poll']
else:
return []
def get_communities(self, request, id):
return get_allowed_objects(request.user, Community, 'manage')
def get_users(self, request, id):
return []
def get_menus(self, request, community_id=0):
id = 'zornapoll_poll_menu'
return [{'title': _(u'Polls'), 'url': reverse('communities_home_plugin', args=(id,)), 'id': id, 'css_icon': 'polls'}]
def get_page_title(self, request, id):
return _(u'Polls')
def render_form_by_id(self, request, id, post=False):
if id == 'poll':
form = self.get_form(request, id, post)
return self.render_form(request, form)
else:
return ''
def render_form(self, request, form):
try:
if not form.instance.pk:
t = loader.get_template("communities/poll_form.html")
else:
t = loader.get_template("communities/poll_edit_form.html")
c = RequestContext(request, {'form_extra': form})
return t.render(c)
except:
return ''
def get_form(self, request, id, post=False, instance_id=None):
if instance_id:
try:
instance = PollCommunity.objects.get(pk=instance_id)
except PollCommunity.DoesNotExist:
instance = None
else:
instance = None
if post:
choice_form_set = modelformset_factory(
PollCommunityChoice, form=PollCommunityChoiceForm, extra=2)
if instance:
# don't let user update poll questions
poll_form_set = choice_form_set(
queryset=instance.pollcommunitychoice_set.all())
else:
poll_form_set = choice_form_set(request.POST)
k = poll_form_set.total_form_count()
if k == 0:
# call again choice_form_set without otherwise the form
# displayed without add link
poll_form_set = choice_form_set(
queryset=PollCommunityChoice.objects.none())
form_poll = PollCommunityForm(
poll_form_set, request.POST, instance=instance)
else:
if instance:
choice_form_set = modelformset_factory(
PollCommunityChoice, form=PollCommunityChoiceForm, extra=2)
poll_form_set = choice_form_set(
queryset=PollCommunityChoice.objects.none())
form_poll = PollCommunityForm(poll_form_set, instance=instance)
else:
choice_form_set = modelformset_factory(
PollCommunityChoice, form=PollCommunityChoiceForm, extra=2)
poll_form_set = choice_form_set(
queryset=PollCommunityChoice.objects.none())
form_poll = PollCommunityForm(poll_form_set)
return form_poll
def save(self, request, id, form_poll, message=None):
poll = form_poll.save()
for i in range(0, form_poll.poll_form_set.total_form_count()):
form = form_poll.poll_form_set.forms[i]
try:
value = form.cleaned_data['choice']
if value:
choice = PollCommunityChoice(
choice=form.cleaned_data['choice'])
choice.question = poll
choice.save()
except:
pass
return poll
def render_message(self, request, poll):
t = loader.get_template('communities/poll_view.html')
c = RequestContext(request, {'url_frame': reverse(
'communities_poll_vote', args=(poll.pk,)), 'poll': poll})
return t.render(c)
def render_widget(self, request, id, community_id=0):
return '', ''
def render_page(self, request, id, context={}):
ct = ContentType.objects.get_for_model(PollCommunity)
# m =
# MessageCommunityExtra.objects.select_related().filter(content_type =
# ct).order_by('-message_time_updated')
community_id = context.get('community_id', 0)
messages = get_messages_extra_by_content_type(
request, ct, community_id)
extra = {}
for m in messages:
extra[m.object_id] = m
q = context.get('search_string', None)
if q:
polls = PollCommunity.objects.filter(Q(question__icontains=q) | Q(
pollcommunitychoice__choice__icontains=q), pk__in=extra.keys(),).distinct()
else:
polls = PollCommunity.objects.filter(pk__in=extra.keys())
q = []
for p in polls:
p.message = extra[p.pk].message
q.append({'html': self.render_message(
request, p), 'message': extra[p.pk].message, 'id': p.pk})
return q
def communities_poll_vote(request, poll):
try:
poll = PollCommunity.objects.get(pk=poll)
except PollCommunity.DoesNotExist:
return HttpResponse('')
if request.method == 'POST':
answer = request.POST.get("answer", 0)
if answer:
choice = poll.pollcommunitychoice_set.get(pk=int(answer))
choice.user.add(request.user)
extra_context = {'poll': poll}
if poll.pollcommunitychoice_set.filter(user=request.user).count() == 0:
template = 'communities/poll_vote.html'
else:
extra_context['query'] = poll.pollcommunitychoice_set.values(
"choice").annotate(count=Count("user__pk"))
extra_context['total_votes'] = 0
for q in extra_context['query']:
extra_context['total_votes'] += q['count']
template = 'communities/poll_results.html'
context = RequestContext(request)
return render_to_response(template, extra_context, context_instance=context)
class zornaevent_community(ZornaCommunityAddons):
def get_title(self, id):
return _(u'Event')
def get_content_type(self, id):
ct = ContentType.objects.get_for_model(EventCommunity)
return ct
def get_content_types(self):
ct = ContentType.objects.get_for_model(EventCommunity)
return [ct]
def get_content_type_id(self, id):
ct = ContentType.objects.get_by_natural_key(
'communities', 'eventcommunity')
return ct.pk
def get_css_file(self, id):
# return '/skins/%s/css/event.css' % settings.ZORNA_SKIN
return None
def get_message(self, id):
return None
def get_communities(self, request, id):
return get_allowed_objects(request.user, Community, 'manage')
def get_users(self, request, id):
return []
def get_tabs(self, request, community_id=0):
ao = get_allowed_objects(request.user, Community, 'manage')
if len(ao):
return ['event']
else:
return []
def get_menus(self, request, community_id=0):
id = 'zornaevent_event_menu'
return [{'title': _(u'Events'), 'url': reverse('communities_home_plugin', args=(id,)), 'id': id, 'css_icon': 'events'}]
def get_page_title(self, request, id):
return _(u'Events')
def render_form_by_id(self, request, id, post=False):
if id == 'event':
form = self.get_form(request, id, post)
return self.render_form(request, form)
else:
return ''
def render_form(self, request, form):
try:
t = loader.get_template("communities/event_form.html")
c = RequestContext(request, {'form_extra': form})
return t.render(c)
except:
return ''
def get_form(self, request, id, post=False, instance_id=None):
if instance_id:
try:
instance = EventCommunity.objects.get(pk=instance_id)
except EventCommunity.DoesNotExist:
instance = None
else:
instance = None
if post:
form = EventCommunityForm(request.POST, instance=instance)
else:
if instance:
id = {'title': instance.event.title, 'start':
instance.event.start, 'end': instance.event.end}
form = EventCommunityForm(instance=instance, initial=id)
else:
form = EventCommunityForm(instance=instance)
return form
def save(self, request, id, form, message=None):
return form.save(request=request, message=message)
def render_message(self, request, assignment):
t = loader.get_template('communities/event_view.html')
c = RequestContext(request, {'msg': assignment})
return t.render(c)
def render_widget(self, request, id, community_id=0):
ct = ContentType.objects.get_for_model(EventCommunity)
# m =
# MessageCommunityExtra.objects.select_related().filter(content_type =
# ct).order_by('-message_time_updated')
messages = get_messages_extra_by_content_type(
request, ct, community_id)
messages = messages.order_by('-message__time_updated')
extra = {}
for m in messages:
extra[m.object_id] = m
m = EventCommunity.objects.filter(
pk__in=extra.keys()).order_by('event__start')[0:10]
for c in m:
c.message = extra[c.pk].message
if len(m):
t = loader.get_template('communities/event_widget.html')
c = RequestContext(request, {'assignements': m})
return _(u'Events'), t.render(c)
else:
return '', ''
def render_page(self, request, id, context={}):
ct = ContentType.objects.get_for_model(EventCommunity)
# m =
# MessageCommunityExtra.objects.select_related().filter(content_type =
# ct).order_by('-message_time_updated')
community_id = context.get('community_id', 0)
messages = get_messages_extra_by_content_type(
request, ct, community_id)
extra = {}
for m in messages:
extra[m.object_id] = m
q = context.get('search_string', None)
if q:
messages = EventCommunity.objects.filter(Q(event__title__icontains=q) | Q(
event__description__icontains=q), pk__in=extra.keys(),)
else:
messages = EventCommunity.objects.filter(pk__in=extra.keys())
messages = messages.order_by('event__start')
t = loader.get_template('communities/event_view.html')
q = []
for m in messages:
m.message = extra[m.pk].message
c = RequestContext(request, {'msg': m})
q.append({'html': t.render(
c), 'message': extra[m.pk].message, 'id': m.pk})
return q
class zornaurl_community(ZornaCommunityAddons):
def get_title(self, id):
return _(u'Link')
def get_content_type(self, id):
ct = ContentType.objects.get_for_model(UrlCommunity)
return ct
def get_content_types(self):
ct = ContentType.objects.get_for_model(UrlCommunity)
return [ct]
def get_content_type_id(self, id):
ct = ContentType.objects.get_by_natural_key(
'communities', 'urlcommunity')
return ct.pk
def get_css_file(self, id):
# return '/skins/%s/css/event.css' % settings.ZORNA_SKIN
return None
def get_message(self, id):
return None
def get_communities(self, request, id):
return get_allowed_objects(request.user, Community, 'manage')
def get_users(self, request, id):
return []
def get_tabs(self, request, community_id=0):
ao = get_allowed_objects(request.user, Community, 'manage')
if len(ao):
return ['link']
else:
return []
def get_menus(self, request, community_id=0):
id = 'zornaurl_url_menu'
return [{'title': _(u'Links'), 'url': reverse('communities_home_plugin', args=(id,)), 'id': id, 'css_icon': 'links'}]
def get_page_title(self, request, id):
return _(u'Links')
def render_form_by_id(self, request, id, post=False):
if id == 'url':
form = self.get_form(request, id, post)
return self.render_form(request, form)
else:
return ''
def render_form(self, request, form):
try:
t = loader.get_template("communities/url_form.html")
c = RequestContext(request, {'form_extra': form})
return t.render(c)
except:
return ''
def get_form(self, request, id, post=False, instance_id=None):
if instance_id:
try:
instance = UrlCommunity.objects.get(pk=instance_id)
except UrlCommunity.DoesNotExist:
instance = None
else:
instance = None
if post:
form = UrlCommunityForm(request.POST, instance=instance)
else:
if instance:
form = UrlCommunityForm(instance=instance)
else:
form = UrlCommunityForm(instance=instance)
return form
def save(self, request, id, form, message=None):
return form.save(request=request, message=message)
def render_message(self, request, extra):
t = loader.get_template('communities/url_view.html')
context = {}
context['title'] = extra.title
context['description'] = extra.excerpt
context['url'] = extra.url
if settings.EMBEDLY_KEY and EMBEDLY_RE.search(extra.url):
api_url = 'http://api.embed.ly/1/oembed?'
# max_width = settings.EMBEDLY_MAX_WIDTH if
# settings.EMBEDLY_MAX_WIDTH else 500
params = {'url': extra.url, 'key':
settings.EMBEDLY_KEY, 'maxwidth': 500}
oembed_call = "%s%s" % (api_url, urllib.urlencode(params))
try:
embedly_info = simplejson.loads(
urllib2.urlopen(oembed_call).read())
context['description'] = embedly_info['description']
context['title'] = embedly_info['title']
except Exception as e:
embedly_info = False
else:
embedly_info = False
c = RequestContext(request, {
'msg': context, 'embedly_info': embedly_info})
return t.render(c)
def render_widget(self, request, id, community_id=0):
return '', ''
def render_page(self, request, id, context={}):
ct = ContentType.objects.get_for_model(UrlCommunity)
# m =
# MessageCommunityExtra.objects.select_related().filter(content_type =
# ct).order_by('-message_time_updated')
community_id = context.get('community_id', 0)
messages = get_messages_extra_by_content_type(
request, ct, community_id)
extra = {}
for m in messages:
extra[m.object_id] = m
q = context.get('search_string', None)
if q:
messages = UrlCommunity.objects.filter(Q(title__icontains=q) | Q(
excerpt__icontains=q), pk__in=extra.keys(),)
else:
messages = UrlCommunity.objects.filter(pk__in=extra.keys())
messages = messages.order_by('-time_created')
t = loader.get_template('communities/url_view.html')
q = []
for m in messages:
m.message = extra[m.pk].message
q.append({'html': self.render_message(
request, m), 'message': extra[m.pk].message, 'id': m.pk})
return q
class zornapage_community(ZornaCommunityAddons):
def get_title(self, id):
return _(u'Page')
def get_content_type(self, id):
ct = ContentType.objects.get_for_model(PageCommunity)
return ct
def get_content_types(self):
ct = ContentType.objects.get_for_model(PageCommunity)
return [ct]
def get_content_type_id(self, id):
ct = ContentType.objects.get_by_natural_key(
'communities', 'pagecommunity')
return ct.pk
def get_css_file(self, id):
# return '/skins/%s/css/event.css' % settings.ZORNA_SKIN
return None
def get_message(self, id):
return None
def get_communities(self, request, id):
return get_allowed_objects(request.user, Community, 'manage')
def get_users(self, request, id):
return []
def get_tabs(self, request, community_id=0):
ao = get_allowed_objects(request.user, Community, 'manage')
if len(ao):
return ['page']
else:
return []
def get_menus(self, request, community_id=0):
id = 'zornapage_page_menu'
return [{'title': _(u'Pages'), 'url': reverse('communities_home_plugin', args=(id,)), 'id': id, 'css_icon': 'pages'}]
def get_page_title(self, request, id):
return _(u'Pages')
def render_form_by_id(self, request, id, post=False):
if id == 'page':
form = self.get_form(request, id, post)
return self.render_form(request, form)
else:
return ''
def render_form(self, request, form):
try:
t = loader.get_template("communities/page_form.html")
c = RequestContext(request, {'form_extra': form})
return t.render(c)
except:
return ''
def get_form(self, request, id, post=False, instance_id=None):
if instance_id:
try:
instance = PageCommunity.objects.get(pk=instance_id)
except PageCommunity.DoesNotExist:
instance = None
else:
instance = None
if post:
form = PageCommunityForm(request.POST, instance=instance, request=request)
else:
if instance:
form = PageCommunityForm(instance=instance, request=request)
else:
form = PageCommunityForm(instance=instance, request=request)
return form
def save(self, request, id, form, message=None):
return form.save()
def render_message(self, request, extra):
t = loader.get_template('communities/page_view.html')
context = {}
context['title'] = extra.title
context['body'] = extra.body
c = RequestContext(request, {'msg': context})
return t.render(c)
def render_widget(self, request, id, community_id=0):
return '', ''
def render_page(self, request, id, context={}):
ct = ContentType.objects.get_for_model(PageCommunity)
# m =
# MessageCommunityExtra.objects.select_related().filter(content_type =
# ct).order_by('-message_time_updated')
community_id = context.get('community_id', 0)
messages = get_messages_extra_by_content_type(
request, ct, community_id)
extra = {}
for m in messages:
extra[m.object_id] = m
q = context.get('search_string', None)
if q:
messages = PageCommunity.objects.filter(
Q(body__icontains=q), pk__in=extra.keys(),)
else:
messages = PageCommunity.objects.filter(pk__in=extra.keys())
messages = messages.order_by('-time_created')
t = loader.get_template('communities/page_view.html')
q = []
for m in messages:
m.message = extra[m.pk].message
q.append({'html': self.render_message(
request, m), 'message': extra[m.pk].message, 'id': m.pk})
return q
class CommunityAddons(object):
plugins = {}
content_types = {}
_populated = False
def __init__(self, request, community_id=0, all=False):
self.id_by_content_type = {}
self.request = request
self.community_id = community_id
if all and not CommunityAddons._populated:
self.register_addon(zornaurl_community())
self.register_addon(zornaevent_community())
self.register_addon(zornapoll_community())
self.register_addon(zornapage_community())
try:
if settings.ZORNA_COMMUNITY_FORMS:
self.register_addon(comforms_community(request))
except:
pass
self.get_instances()
self.tabs = {}
self.plugins_menus = []
self.plugins_tabs = []
for p in CommunityAddons.plugins.values():
tabs = p.get_tabs(request, community_id)
for t in tabs:
tab = self.format_tab(p, t)
self.plugins_tabs.append(tab)
self.tabs[tab['id']] = p
self.plugins_menus.extend(p.get_menus(request, community_id))
def register_addon(self, instance):
app = instance.__class__.__name__.split('_')[0]
CommunityAddons.plugins[app] = instance
cts = instance.get_content_types()
for ct in cts:
CommunityAddons.content_types[ct.pk] = instance
def get_tab_id(self, instance, tab):
app = instance.__class__.__name__.split('_')[0]
ct = instance.get_content_type(tab)
return '%s_%s_%s_%s_tab' % (app, ct.app_label, ct.model, tab)
def format_tab(self, instance, tab):
ct = instance.get_content_type(tab)
id = self.get_tab_id(instance, tab)
self.id_by_content_type[ct.pk] = id
return {'id': id, 'title': instance.get_title(tab), 'css_file': instance.get_css_file(tab)}
def load_app(self, app):
try:
__import__('zorna_plugins.%s.zorna_community' % app)
b = sys.modules['zorna_plugins.%s.zorna_community' % app]
obj = getattr(b, '%s_community' % app)
self.register_addon(obj())
except:
pass
def get_instances(self):
if CommunityAddons._populated == True:
return CommunityAddons.plugins.values()
else:
CommunityAddons._populated = True
plugins_path = os.path.join(settings.PROJECT_PATH, 'zorna_plugins')
for app in os.listdir(plugins_path):
app_path = os.path.join(plugins_path, app)
if os.path.isdir(app_path) and not CommunityAddons.plugins.has_key(app):
self.load_app(app)
return CommunityAddons.plugins.values()
def get_instance_by_name(self, name):
try:
return CommunityAddons.plugins[name]
except KeyError:
try:
plugins_path = os.path.join(
settings.PROJECT_PATH, 'zorna_plugins')
for app in os.listdir(plugins_path):
if app == name:
app_path = os.path.join(plugins_path, app)
if os.path.isdir(app_path):
if not CommunityAddons.plugins.has_key(app):
self.load_app(app)
return CommunityAddons.plugins[app]
except Exception as e:
pass
def get_instance_by_content_type(self, ct):
try:
return CommunityAddons.content_types[ct]
except KeyError:
return None
def get_id_by_content_type(self, ct):
return self.id_by_content_type.get(ct, None)
@login_required()
def admin_list_communities(request):
if request.user.is_superuser:
object_id = request.GET.get('object_id', None)
if object_id:
com = Community.objects.get(pk=object_id)
cal = get_community_calendar(com)
ACLPermission.objects.copy_permissions(
com, 'member', cal, 'viewer')
ACLPermission.objects.copy_permissions(
com, 'manage', cal, 'manager')
ob_list = Community.objects.all()
extra_context = {}
extra_context['communities_list'] = ob_list
context = RequestContext(request)
return render_to_response('communities/admin_list_communities.html', extra_context, context_instance=context)
else:
return HttpResponseForbidden()
@login_required()
def admin_add_community(request):
if request.user.is_superuser:
if request.method == 'POST':
form = CommunityAdminAddForm(request.POST)
if form.is_valid():
com = form.save(commit=False)
com.owner = com.modifier = request.user
com.save()
get_community_calendar(com)
return HttpResponseRedirect(reverse('admin_list_communities'))
else:
form = CommunityAdminAddForm(request.POST)
else:
form = CommunityAdminAddForm()
context = RequestContext(request)
extra_context = {'form': form, 'curcommunity': False}
return render_to_response('communities/edit_community.html', extra_context, context_instance=context)
else:
return HttpResponseForbidden()
@login_required()
def admin_edit_community(request, community):
if request.user.is_superuser:
community = Community.objects.get(pk=community)
if request.method == 'POST':
form = CommunityAdminAddForm(request.POST, instance=community)
if form.is_valid():
com = form.save(commit=False)
com.modifier = request.user
com.save()
cal = get_community_calendar(com)
cal.rename(com.name)
return HttpResponseRedirect(reverse('admin_list_communities'))
else:
form = CommunityAdminAddForm(request.POST, instance=community)
else:
form = CommunityAdminAddForm(instance=community)
context = RequestContext(request)
extra_context = {'form': form, 'curcommunity': community}
return render_to_response('communities/edit_community.html', extra_context, context_instance=context)
else:
return HttpResponseForbidden()
@login_required()
def join_community(request, comname, community):
try:
community = Community.objects.get(pk=community)
if community.status == 1: # Public
check = get_acl_for_model(community)
check.add_perm(
'member', community, request.user, defines.ZORNA_PERMISSIONS_ALLOW)
return HttpResponseRedirect(reverse('communities_home_page') + '?community_id=' + str(community.pk))
else:
return HttpResponseRedirect(reverse('list_communities'))
except Exception as e:
return HttpResponseRedirect(reverse('list_communities'))
@login_required()
def leave_community(request, comname, community):
try:
community = Community.objects.get(pk=community)
if community.status == 1: # Public
check = get_acl_for_model(community)
check.add_perm(
'member', community, request.user, defines.ZORNA_PERMISSIONS_DENY)
return HttpResponseRedirect(reverse('list_communities'))
else:
return HttpResponseRedirect(reverse('list_communities'))
except Exception as e:
return HttpResponseRedirect(reverse('list_communities'))
@login_required()
def list_communities_view(request, queryset, extra_context, nb_page=NB_COMMUNITIES_BY_PAGE):
try:
extra_context[
'sidebar_right_template'] = "community_sidebar_right_template.html"
loader.get_template(extra_context['sidebar_right_template'])
except TemplateDoesNotExist:
extra_context[
'sidebar_right_template'] = "communities/community_sidebar_right_template.html"
extra_context['communities_count'] = Community.objects.filter(
status__in=[0, 1]).count()
extra_context['allowed_communities'] = get_communities_ids(request.user)
return object_list(request, queryset=queryset, extra_context=extra_context, template_name='communities/list_communities.html', paginate_by=nb_page)
@login_required()
def list_communities(request):
extra_context = {}
extra_context['aselected'] = 'selected'
extra_context['allowed_communities'] = get_communities_ids(request.user)
ob_list = Community.objects.filter(status__in=[0, 1])
for o in ob_list:
o.members_count = len(get_acl_by_object(o, 'member'))
o.managers_count = len(get_acl_by_object(o, 'manage'))
return list_communities_view(request, ob_list, extra_context)
@login_required()
def user_list_communities(request):
extra_context = {}
extra_context['bselected'] = 'selected'
extra_context['allowed_communities'] = get_communities_ids(request.user)
ob_list = Community.objects.filter(
pk__in=extra_context['allowed_communities'])
for o in ob_list:
o.members_count = len(get_acl_by_object(o, 'member'))
o.managers_count = len(get_acl_by_object(o, 'manage'))
return list_communities_view(request, ob_list, extra_context)
@login_required()
def last_activity_communities(request):
extra_context = {}
extra_context['cselected'] = 'selected'
extra_context['allowed_communities'] = get_communities_ids(request.user)
messages = MessageCommunity.objects.select_related().filter(
Q(users=request.user) |
Q(communities__in=extra_context['allowed_communities'], reply__isnull=True))
messages = messages.order_by('-time_updated')
return list_communities_view(request, messages, extra_context, 30)
@login_required()
def community_members(request, community):
extra_context = {}
extra_context['community'] = Community.objects.get(pk=community)
check = get_acl_for_model(extra_context['community'])
if check.manage_community(extra_context['community'], request.user):
extra_context['users_title'] = _(u"Members")
users_list = get_acl_by_object(extra_context['community'], 'member')
template = "communities/community_users.html"
extra_context['next'] = request.REQUEST.get('next', reverse(
'communities_home_page') + '?community_id=' + community)
return object_list(request, queryset=users_list, template_name=template, extra_context=extra_context, paginate_by=20)
else:
return HttpResponseForbidden()
@login_required()
def community_managers(request, community):
extra_context = {}
extra_context['community'] = Community.objects.get(pk=community)
check = get_acl_for_model(extra_context['community'])
if check.manage_community(extra_context['community'], request.user):
extra_context['users_title'] = _(u"Managers")
users_list = get_acl_by_object(extra_context['community'], 'manage')
template = "communities/community_users.html"
extra_context['next'] = request.REQUEST.get('next', reverse(
'communities_home_page') + '?community_id=' + community)
return object_list(request, queryset=users_list, template_name=template, extra_context=extra_context, paginate_by=20)
else:
return HttpResponseForbidden()
@login_required()
def manage_community_members(request, community):
extra_context = {}
extra_context['community'] = Community.objects.get(pk=community)
check = get_acl_for_model(extra_context['community'])
if check.manage_community(extra_context['community'], request.user):
extra_context['users_title'] = _(u"Members of ")
if request.method == 'POST':
selected = request.POST.getlist('_selected_action')
# delete permission
ACLPermission.objects.delete_user_permissions(
'member', extra_context['community'])
# add permission for checked users
ol = User.objects.filter(pk__in=selected)
for u in ol:
check.add_perm('member', extra_context[
'community'], u, defines.ZORNA_PERMISSIONS_ALLOW)
u = request.POST.get("u", "")
if u:
u = User.objects.get(pk=u)
check.add_perm('member', extra_context[
'community'], u, defines.ZORNA_PERMISSIONS_ALLOW)
users_list = get_acl_users_by_object(
extra_context['community'], 'member')
extra_context['next'] = request.REQUEST.get('next', reverse(
'communities_home_page') + '?community_id=' + community)
template = "communities/manage_community_users.html"
return object_list(request, queryset=users_list, template_name=template, extra_context=extra_context, paginate_by=20)
else:
return HttpResponseForbidden()
@login_required()
def manage_community_managers(request, community):
extra_context = {}
extra_context['community'] = Community.objects.get(pk=community)
check = get_acl_for_model(extra_context['community'])
if check.manage_community(extra_context['community'], request.user):
extra_context['users_title'] = _(u"Managers of ")
if request.method == 'POST':
selected = request.POST.getlist('_selected_action')
# delete permission
ACLPermission.objects.delete_user_permissions(
'manage', extra_context['community'])
# add permission for checked users
ol = User.objects.filter(pk__in=selected)
for u in ol:
check.add_perm('manage', extra_context[
'community'], u, defines.ZORNA_PERMISSIONS_ALLOW)
u = request.POST.get("u", "")
if u:
u = User.objects.get(pk=u)
check.add_perm('manage', extra_context[
'community'], u, defines.ZORNA_PERMISSIONS_ALLOW)
users_list = get_acl_users_by_object(
extra_context['community'], 'manage')
extra_context['next'] = request.REQUEST.get('next', reverse(
'communities_home_page') + '?community_id=' + community)
template = "communities/manage_community_users.html"
return object_list(request, queryset=users_list, template_name=template, extra_context=extra_context, paginate_by=20)
else:
return HttpResponseForbidden()
@login_required()
def communities_home_page(request):
if user_has_access_to_communities(request.user) == False:
return HttpResponseForbidden()
# first process form submission
msg_type = request.REQUEST.get("msg-type", 'message_tab')
html_form = user_send_message(request, msg_type)
# then get messages to display
ret = get_messages(request)
ret['html_form'] = html_form
ret['tab'] = msg_type
if ret['all_msg'] == 'followed':
ret['current_menu'] = "followed"
ret['zorna_title_page'] = _(u"Followed posts")
elif ret['all_msg'] == 'tome':
ret['current_menu'] = "tome"
ret['zorna_title_page'] = _(u"Direct to me")
elif ret['all_msg'] == 'last':
ret['current_menu'] = "last"
ret['zorna_title_page'] = _(u"Recent messages")
else:
ret['current_menu'] = "messages"
ret['zorna_title_page'] = _(u"Messages")
return communities_home(request, ret)
@login_required()
def communities_home_files(request):
if user_has_access_to_communities(request.user) == False:
return HttpResponseForbidden()
ret = initialize_context(request)
ret['current_menu'] = "files"
ret['zorna_title_page'] = _(u"Attachments")
q = ret['search_string']
messages = get_all_messages(request, int(ret['community_id']))
if ret['from_id'] != 0:
messages = messages.filter(owner=ret['from_id'], reply__isnull=True)
messages = messages.order_by('-time_updated')
ret['attachments'] = []
puc = get_upload_communities()
for msg in messages:
path = "%s/%s" % (puc, msg.pk)
try:
files = []
for f in os.listdir(path):
if q and not q in f:
continue
files.append({'name': f, 'ext': os.path.splitext(f)[1][1:]})
if len(files):
if not ret['users_avatars'].has_key(msg.owner_id):
try:
ret['users_avatars'][msg.owner_id] = UserAvatar.objects.get(
user__id=msg.owner_id)
except UserAvatar.DoesNotExist:
ret['users_avatars'][msg.owner_id] = None
avatar_user = ret['users_avatars'][msg.owner_id]
msg.avatar_user = avatar_user
ret['attachments'].append({'message': msg, 'files': files})
except:
pass
paginator = Paginator(ret['attachments'], NB_MESSAGES_BY_PAGE)
page = int(request.GET.get('page', 1))
try:
ret['attachments'] = paginator.page(page)
except PageNotAnInteger:
# If page is not an integer, deliver first page.
ret['attachments'] = paginator.page(1)
except EmptyPage:
# If page is out of range (e.g. 9999), deliver last page of results.
ret['attachments'] = paginator.page(paginator.num_pages)
ret['page'] = page
ret['paginator'] = paginator
return communities_home(request, ret, 'communities/home_files.html')
@login_required()
def communities_home_members(request):
if user_has_access_to_communities(request.user) == False:
return HttpResponseForbidden()
ret = initialize_context(request)
ret['current_menu'] = "members"
ret['zorna_title_page'] = _(u"Members")
try:
form = FormsForm.objects.get(
slug=settings.ZORNA_COMMUNITY_USER_PROFILE_FORM)
entries = form.entries.filter(account__in=ret['members'])
except:
entries = None
kwargs = {}
profiles = {}
kwargs['q'] = ret['search_string'].decode('utf8')
ret['page'] = request.GET.get('page', "")
ret['com_members'] = []
index = 0
for x in ret['members']:
full_name = x.get_full_name().lower()
if kwargs['q'] and kwargs['q'].lower() not in full_name:
continue
if ret['page'] and not full_name.startswith(ret['page']):
continue
if not ret['users_avatars'].has_key(x.pk):
try:
ret['users_avatars'][
x.pk] = UserAvatar.objects.get(user__id=x.pk)
except UserAvatar.DoesNotExist:
ret['users_avatars'][x.pk] = None
x.avatar_user = ret['users_avatars'][x.pk]
ret['com_members'].append(x)
def entry_sort(entry):
return entry.get_full_name()
ret['com_members'].sort(key=entry_sort)
return communities_home(request, ret, 'communities/home_members.html')
@login_required()
def communities_home_plugin(request, id):
if not user_has_access_to_communities(request.user):
return HttpResponseForbidden()
ret = initialize_context(request)
ret['current_menu'] = id
cp = get_community_addons_instance(request, ret['community_id'])
try:
r = id.split('_')
if len(r) == 3 and r[-1] == 'menu':
instance = cp.get_instance_by_name(r[0])
else:
instance = None
except:
instance = None
if instance:
ret['plugin_list'] = instance.render_page(request, r[-2], ret)
ret['plugin_id'] = cp.get_tab_id(instance, r[-2])
for entry in ret['plugin_list']:
if not ret['users_avatars'].has_key(entry['message'].owner_id):
try:
ret['users_avatars'][entry['message'].owner_id] = UserAvatar.objects.get(
user__id=entry['message'].owner_id)
except UserAvatar.DoesNotExist:
ret['users_avatars'][entry['message'].owner_id] = None
entry['avatar_user'] = ret[
'users_avatars'][entry['message'].owner_id]
paginator = Paginator(ret['plugin_list'], NB_MESSAGES_BY_PAGE)
page = int(request.GET.get('page', 1))
try:
ret['plugin_list'] = paginator.page(page)
except PageNotAnInteger:
# If page is not an integer, deliver first page.
ret['plugin_list'] = paginator.page(1)
except EmptyPage:
# If page is out of range (e.g. 9999), deliver last page of
# results.
ret['plugin_list'] = paginator.page(paginator.num_pages)
ret['page'] = page
ret['paginator'] = paginator
ret['css_file'] = instance.get_css_file(ret['plugin_id'])
ret['zorna_title_page'] = instance.get_page_title(request, r[-2])
return communities_home(request, ret, 'communities/home_plugin.html')
@login_required()
def communities_edit_plugin(request, id, instance_id):
if not user_has_access_to_communities(request.user):
return HttpResponseForbidden()
cp = get_community_addons_instance(request, 0, False)
try:
r = id.split('_')
if len(r) == 5 and r[-1] == 'tab':
instance = cp.get_instance_by_name(r[0])
else:
instance = None
except:
instance = None
if instance:
if request.method == 'POST':
form = instance.get_form(
request, r[-2], post=True, instance_id=instance_id)
if form.is_valid():
instance.save(request, r[-2], form, None)
return HttpResponse('')
else:
form = instance.get_form(request, r[-2], instance_id=instance_id)
extra_context = {}
extra_context['form'] = form
extra_context['form_extra'] = instance.render_form(request, form)
extra_context['action'] = reverse(
'communities_edit_plugin', args=[id, instance_id])
context = RequestContext(request)
return render_to_response('communities/edit_plugin.html', extra_context, context_instance=context)
else:
return CommunityPopupHttpResponseError(request, _(u'Access Denied'))
@login_required()
def communities_home(request, ret, template='communities/home.html'):
try:
avatar_user = UserAvatar.objects.get(user=request.user)
except UserAvatar.DoesNotExist:
avatar_user = None
ret['avatar'] = avatar_user
cp = get_community_addons_instance(request, ret['community_id'])
ret['tabs'] = [{'id': 'message_tab', 'title': _(u'Message')}, ]
ret['tabs'].extend(cp.plugins_tabs)
ret['plugins_menus'] = cp.plugins_menus
ret['plugins'] = []
for id, instance in cp.tabs.iteritems():
r = id.split('_')
title, html = instance.render_widget(
request, r[-2], ret['community_id'])
if title:
ret['plugins'].append({
'title': title, 'html': html, 'contenttype_id': instance.get_content_type_id(r[-2])})
ret['jsfollow'] = _(u'Follow')
ret['jsunfollow'] = _(u'Unfollow')
context = RequestContext(request)
return render_to_response(template, ret, context_instance=context)
def get_tab(request, tab):
# form.tab_title = _(u"Message")
ret = {}
data = []
community_id = request.GET.get("community_id", 0)
try:
cp = get_community_addons_instance(request, 0, True)
r = tab.split('_')
if len(r) == 5 and r[-1] == 'tab':
instance = cp.get_instance_by_name(r[0])
ao = instance.get_communities(request, r[-2])
if community_id and int(community_id) in ao:
ao = [community_id]
communities = Community.objects.filter(id__in=ao).order_by('name')
members = instance.get_users(request, r[-2])
for com in communities:
data.append([com.name, "g-%s" % str(com.id)])
data.extend([("%s %s" % (x.last_name, x.first_name), ("u-%s" % str(x.id)))
for x in members])
else:
instance = None
ao = set([])
ao_member = get_allowed_objects(request.user, Community, 'member')
ao = ao.union(set(ao_member))
ao_manage = get_allowed_objects(request.user, Community, 'manage')
ao = ao.union(set(ao_manage))
if community_id and int(community_id) in ao:
ao = [community_id]
communities = Community.objects.filter(id__in=ao).order_by('name')
members = set([])
for com in communities:
members = members.union(set(get_acl_by_object(com, 'member')))
members = members.union(set(get_acl_by_object(com, 'manage')))
data.append([com.name, "g-%s" % str(com.id)])
data.extend([("%s %s" % (x.last_name, x.first_name), ("u-%s" % str(x.id)))
for x in members])
if request.method == 'POST':
mcf = MessageCommunityForm(
request.POST or None, request.FILES or None)
if instance:
form_extra = instance.get_form(request, r[-2], True)
else:
form_extra = None
if mcf.is_valid() and (form_extra == None or form_extra.is_valid()):
m = mcf.save(request)
if m:
if form_extra:
fx = instance.save(request, r[-2], form_extra, m)
MessageCommunityExtra.objects.create(
message=m, content_object=fx)
# initialize form and display only current tab
mcf = MessageCommunityForm()
if instance:
form_extra = instance.get_form(request, r[-2])
else:
form_extra = None
else:
mcf.data['send_to'] = ''
else:
mcf = MessageCommunityForm()
if instance:
form_extra = instance.get_form(request, r[-2])
else:
form_extra = None
if form_extra:
form_extra = instance.render_form(request, form_extra)
else:
form_extra = None
mcf.message_type = tab
t = loader.get_template("communities/message_form.html")
c = RequestContext(request, {
'message_form': mcf, 'form_extra': form_extra})
ret['error'] = False
ret['html'] = t.render(c)
ret['sendto'] = data
except Exception as e:
ret['error'] = True
ret['html'] = "Error: Can't load form (%s)" % e
ret['sendto'] = []
return ret
def get_json_tab(request, tab):
ret = get_tab(request, tab)
json_data = simplejson.dumps(ret)
return HttpResponse(json_data)
def user_send_message(request, msg_type):
ret = get_tab(request, msg_type)
return ret['html']
@login_required()
def user_send_reply(request):
message = request.GET.get("reply_message", None)
msg_id = request.GET.get("msg_id", 0)
ret = {}
ret['html'] = ''
ret['msgid'] = 0
if msg_id and message:
try:
r = MessageCommunity.objects.get(pk=msg_id)
m = MessageCommunity(message=message)
m.owner = m.modifier = request.user
m.reply = r
m.save()
m.manager = True
# update parent time message
r.time_updated = m.time_updated
r.modifier = request.user
r.save()
# current_community = request.POST.get("current_community", 0)
t = loader.get_template('communities/message_reply.html')
c = RequestContext(request, {"reply": m})
ret['html'] = t.render(c)
ret['msgid'] = m.pk
# send email to followers
followers = r.followers.all().exclude(pk=request.user.pk)
subject = _(u'New reply from %s' % request.user.get_full_name())
url = request.build_absolute_uri(reverse(
'communities_home_page', args=[]))
email = ZornaEmail()
for f in followers:
ec = {"follower": f, "message": r, 'reply': m, 'url': url}
body_text = render_to_string(
'communities/email_follower_text.html', ec)
body_html = render_to_string(
'communities/email_follower_html.html', ec)
email.append(
subject, body_text, body_html, settings.DEFAULT_FROM_EMAIL, [f.email])
email.send()
except MessageCommunity.DoesNotExist:
pass
json_data = simplejson.dumps(ret)
return HttpResponse(json_data)
@login_required()
def user_update_reply(request):
ret = {}
msg_id = request.GET.get("msgid", None)
message = request.GET.get("value", '')
ret['msgid'] = msg_id
ret['message'] = message
try:
r = MessageCommunity.objects.get(pk=msg_id)
ret['error'] = False
except MessageCommunity.DoesNotExist:
ret['error'] = True
r = None
if message and r:
r.message = message
r.modifier = request.user
r.save()
try:
extra = MessageCommunityExtra.objects.get(message=r)
extra.content_object.update_message(r)
except:
pass
elif r:
ret['message'] = r.message
json_data = simplejson.dumps(ret)
return HttpResponse(json_data)
@login_required()
def user_delete_reply(request):
ret = {}
msg_id = request.GET.get("msgid", None)
ret['msgid'] = msg_id
try:
MessageCommunity.objects.get(pk=msg_id).delete()
ret['error'] = False
except MessageCommunity.DoesNotExist:
ret['error'] = True
json_data = simplejson.dumps(ret)
return HttpResponse(json_data)
def delete_message_attachments(message_id):
try:
path = os.path.join(get_upload_communities(), "%s" % message_id)
files = os.listdir(path)
for f in files:
fullpath = os.path.join(path, f)
if os.path.isfile(fullpath):
os.remove(fullpath)
os.rmdir(path)
return True
except Exception:
return False
@login_required()
def user_delete_message(request):
ret = {}
msg_id = request.GET.get("msgid", None)
ret['msgid'] = msg_id
try:
MessageCommunity.objects.filter(reply=msg_id).delete()
r = MessageCommunity.objects.get(pk=msg_id)
try:
extra = MessageCommunityExtra.objects.get(message=r)
extra.content_object.delete()
except:
pass
r.delete()
delete_message_attachments(msg_id)
ret['error'] = False
except MessageCommunity.DoesNotExist:
ret['error'] = True
r = None
json_data = simplejson.dumps(ret)
return HttpResponse(json_data)
def get_new_messages(request, community_id, max_msg_id, search_string, all_msg, member_id, message_id, ct_id):
allowed_objects = get_allowed_objects(
request.user, Community, ['manage', 'member'])
if all_msg == 'followed':
messages = MessageCommunity.objects.select_related().filter(
followers=request.user)
elif all_msg == 'last':
from_date = request.user.get_profile().last_activity
messages = MessageCommunity.objects.select_related().filter(Q(
time_updated__gt=from_date) | Q(reply__time_updated__gt=from_date))
elif all_msg == 'tome':
messages = MessageCommunity.objects.select_related().filter(
users=request.user)
elif all_msg == 'contributor':
member = User.objects.get(pk=member_id)
messages = MessageCommunity.objects.select_related().filter(Q(communities__in=allowed_objects) & (
Q(owner=member, reply__isnull=True) | Q(messagecommunity__owner=member)))
elif all_msg == 'message':
messages = MessageCommunity.objects.select_related().filter(
pk=message_id)
elif all_msg == 'contenttype':
messages = MessageCommunity.objects.select_related().filter(
contenttype=ct_id)
elif community_id:
if int(community_id) in allowed_objects:
messages = MessageCommunity.objects.select_related().filter(
Q(communities__exact=community_id))
else:
messages = MessageCommunity.objects.select_related().filter(Q(communities__in=allowed_objects) | Q(
owner=request.user) | Q(users=request.user) | Q(reply__owner=request.user))
try:
messages = messages.filter((Q(pk__gt=int(max_msg_id)) & Q(
reply__isnull=True)) | Q(messagecommunity__pk__gt=int(max_msg_id)))
except NameError:
return MessageCommunity.objects.none()
return messages
def get_messages(request):
max_msg_id = request.REQUEST.get("msg_max_id", 0)
ret = initialize_context(request)
messages = None
if int(max_msg_id):
messages = get_new_messages(request, int(ret['community_id']), max_msg_id, ret[
'search_string'], ret['all_msg'], ret['member_id'], ret['message_id'], ret['contenttype_id'])
elif ret['all_msg'] == 'all':
messages = get_all_messages(request, int(ret['community_id']))
elif ret['all_msg'] == 'followed':
messages = get_followed_messages(request)
elif ret['all_msg'] == 'last':
messages = get_all_messages(request, int(ret[
'community_id']), request.user.get_profile().last_activity)
elif ret['all_msg'] == 'tome':
messages = get_tome_messages(request)
elif ret['all_msg'] == 'contributor':
messages = get_contributor_messages(request, ret['member_id'])
elif ret['all_msg'] == 'message':
messages = get_message_by_id(request, ret['message_id'])
elif ret['all_msg'] == 'contenttype':
messages = get_messages_by_content_type(request, ret['contenttype_id'])
else:
messages = MessageCommunity.objects.none()
# if max_msg_id == 0 and ret['last_msg_id']:
# messages = messages.filter(pk__lt=int(ret['last_msg_id']))
if ret['search_string'] != '':
messages = messages.filter(message__icontains=ret['search_string'])
if ret['from_id'] != 0:
messages = messages.filter(owner=ret['from_id'], reply__isnull=True)
messages = messages.annotate(nb_replies=Count('messagecommunity'))
messages = messages.order_by('-time_updated')
if max_msg_id == 0:
page = int(ret['com_page'])
messages = messages[page * NB_MESSAGES_BY_PAGE:(
page + 1) * NB_MESSAGES_BY_PAGE]
ret['com_page'] = page + 1
c = messages.count()
if c:
ret['com_page'] = page + 1
ret['messages_more'] = False if c < NB_MESSAGES_BY_PAGE else True
t = loader.get_template('communities/message_entry.html')
extra_context = ret.copy()
html = []
extra_context['msg'] = ''
extra_context['current_query_string'] = ret['current_query_string']
# retrieve msg followed by user
if request.user.is_anonymous():
msg_followed = MessageCommunity.objects.none()
else:
msg_followed = MessageCommunity.objects.filter(followers=request.user)
upload_path = get_upload_communities()
cp = get_community_addons_instance(request, int(ret['community_id']))
for msg in messages:
if msg.nb_replies:
replies = MessageCommunity.objects.select_related().filter(
reply__isnull=False, reply=msg).order_by('time_updated')
msg.replies = replies
msg.nb_followers = msg.followers.count()
if ret['users_avatars'].has_key(msg.owner_id):
extra_context['avatar_user'] = ret['users_avatars'][msg.owner_id]
else:
try:
ret['users_avatars'][msg.owner_id] = UserAvatar.objects.get(
user__id=msg.owner_id)
except UserAvatar.DoesNotExist:
ret['users_avatars'][msg.owner_id] = None
extra_context['avatar_user'] = ret['users_avatars'][msg.owner_id]
path = "%s/%s" % (upload_path, msg.pk)
try:
extra_context['attachments'] = [{'file': f, 'ext': os.path.splitext(
f)[1][1:]} for f in os.listdir(path)]
except:
extra_context['attachments'] = []
try:
extra = MessageCommunityExtra.objects.get(message=msg)
# msg.extra = extra.content_object
instance = cp.get_instance_by_content_type(extra.content_type_id)
if instance:
msg.extra = instance.render_message(
request, extra.content_object)
msg.extra_id = cp.get_id_by_content_type(extra.content_type_id)
msg.extra_object_id = extra.object_id
else:
msg.extra = None
except Exception as e:
msg.extra = None
extra_context['msg'] = msg
c = RequestContext(request, extra_context)
# ret['last_msg_id'] = max(msg.pk, ret['last_msg_id'])
if msg in msg_followed:
msg.follow = True
else:
msg.follow = False
msg.manager = is_manager_by_message(request.user, msg.pk)
html.append({'id': msg.pk, 'msg': t.render(c)})
ret['html_messages'] = html
return ret
def check_messages_ajax(request):
ret = get_messages(request)
data = {}
data['html_messages'] = ret['html_messages']
if ret.has_key('messages_more'):
data['com_page'] = ret['com_page']
data['messages_more'] = ret['messages_more']
json_data = simplejson.dumps(data)
return HttpResponse(json_data)
@login_required()
def get_file(request, msg, filename):
baccess = is_member_by_message(
request.user, msg) or is_manager_by_message(request.user, msg)
if baccess:
upload_path = get_upload_communities()
path = "%s/%s" % (upload_path, msg)
for f in os.listdir(path):
if f == filename:
path = "%s/%s" % (path, f)
fp = open(path, 'rb')
content_type = mimetypes.guess_type(f)[0]
response = HttpResponse(fp.read(), content_type=content_type)
response['Content-Length'] = os.path.getsize(path)
response['Content-Disposition'] = "attachment; filename=%s" % f
return response
else:
return HttpResponseForbidden()
def follow_message_ajax(request):
ret = {}
message = request.GET.get("message", None)
if message and (is_member_by_message(request.user, message) or is_manager_by_message(request.user, message)):
msg = MessageCommunity.objects.get(pk=message)
msg.followers.add(request.user)
ret['msgid'] = message
ret['follow'] = True
ret['error'] = False
else:
ret['follow'] = False
ret['error'] = True
json_data = simplejson.dumps(ret)
return HttpResponse(json_data)
def unfollow_message_ajax(request):
ret = {}
message = request.GET.get("message", None)
baccess = is_member_by_message(
request.user, message) or is_manager_by_message(request.user, message)
if baccess:
msg = MessageCommunity.objects.get(pk=message)
msg.followers.remove(request.user)
ret['msgid'] = message
ret['follow'] = False
ret['error'] = False
else:
ret['follow'] = True
ret['error'] = True
json_data = simplejson.dumps(ret)
return HttpResponse(json_data)
def member_profile(request, member):
# TODO permission
extra_context = {}
try:
avatar_user = UserAvatar.objects.get(user__id=member)
except UserAvatar.DoesNotExist:
avatar_user = None
extra_context['avatar_user'] = avatar_user
extra_context['member_id'] = member
context = RequestContext(request)
return render_to_response('communities/member_profile.html', extra_context, context_instance=context)
def invite_community_member(request, community_id):
extra_context = {}
extra_context['community'] = Community.objects.get(pk=community_id)
check = get_acl_for_model(extra_context['community'])
if check.manage_community(extra_context['community'], request.user):
form = InviteCommunityForm(request.POST or None)
if request.method == 'POST' and form.is_valid():
users = form.cleaned_data['send_to'].split(',')
check = get_acl_for_model(extra_context['community'])
ol = User.objects.filter(pk__in=users)
for u in ol:
if form.cleaned_data['manager']:
check.add_perm('manage', extra_context[
'community'], u, defines.ZORNA_PERMISSIONS_ALLOW)
else:
check.add_perm('member', extra_context[
'community'], u, defines.ZORNA_PERMISSIONS_ALLOW)
# TODO send an email to all recipients
subject = _(u'%(user)s added you to %(community)s') % {
'user': request.user.get_full_name(), 'community': extra_context['community'].name}
url = request.build_absolute_uri(reverse(
'communities_home_page', args=[]))
if form.cleaned_data['manager']:
role = _("manager")
else:
role = _("member")
email = ZornaEmail()
for f in ol:
ec = {
"member": f,
"message": form.cleaned_data['message'],
'community': extra_context['community'],
'url': url,
'role': role,
'user': request.user
}
body_text = render_to_string(
'communities/email_invite_text.html', ec)
body_html = render_to_string(
'communities/email_invite_html.html', ec)
email.append(
subject, body_text, body_html, settings.DEFAULT_FROM_EMAIL, [f.email])
email.send()
return HttpResponse('')
extra_context['form'] = form
extra_context['action'] = reverse(
'invite_community_member', args=[community_id])
context = RequestContext(request)
return render_to_response('communities/community_invite_member.html', extra_context, context_instance=context)
else:
return CommunityPopupHttpResponseError(request, _(u'Access Denied'))
def invite_list_users(request, community_id):
users = set([])
ret = {}
try:
com = Community.objects.get(pk=community_id)
if is_user_community_manager(request.user, com.pk):
users = users.union(set(get_acl_by_object(com, 'member')))
users = users.union(set(get_acl_by_object(com, 'manage')))
ret['data'] = [("%s %s" % (x.last_name, x.first_name), ("%s" % str(x.id)))
for x in User.objects.all().exclude(pk__in=[u.pk for u in users])]
except Community.DoesNotExist:
pass
data = simplejson.dumps(ret)
return HttpResponse(data)
def community_dashboard(request, community_id = None):
communities_ids = get_communities_ids(request.user)
if not len(communities_ids):
return HttpResponseRedirect('/')
if community_id and int(community_id) in communities_ids:
current_community = int(community_id)
else:
current_community = request.session.get('user_current_community', 0)
if not current_community or not current_community in communities_ids:
current_community = communities_ids[0]
request.session['user_current_community'] = current_community
t = loader.select_template(['community_dashboard_%s.html' % current_community,'community_dashboard.html','communities/community_dashboard.html'])
try:
avatar_user = UserAvatar.objects.get(user=request.user)
except UserAvatar.DoesNotExist:
avatar_user = None
communities = Community.objects.filter(pk__in=communities_ids)
community = None
for com in communities:
com.calendar = get_community_calendar(com)
if current_community == com.pk:
community = com
if not community:
return HttpResponseRedirect('/')
ctx = { 'community': community,
'avatar': avatar_user,
'communities': communities,
}
c = RequestContext(request, ctx)
return HttpResponse(t.render(c))
|
|
# Copyright (C) 2012 Google, Inc.
# Copyright (C) 2010 Chris Jerdonek ([email protected])
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""unit testing code for webkitpy."""
import logging
import multiprocessing
import optparse
import os
import StringIO
import sys
import time
import traceback
import unittest
from webkitpy.common.webkit_finder import WebKitFinder
from webkitpy.common.system.filesystem import FileSystem
from webkitpy.test.finder import Finder
from webkitpy.test.printer import Printer
from webkitpy.test.runner import Runner, unit_test_name
_log = logging.getLogger(__name__)
up = os.path.dirname
webkit_root = up(up(up(up(up(os.path.abspath(__file__))))))
def main():
filesystem = FileSystem()
wkf = WebKitFinder(filesystem)
tester = Tester(filesystem, wkf)
tester.add_tree(wkf.path_from_webkit_base('Tools', 'Scripts'), 'webkitpy')
tester.skip(('webkitpy.common.checkout.scm.scm_unittest',), 'are really, really, slow', 31818)
if sys.platform == 'win32':
tester.skip(('webkitpy.common.checkout', 'webkitpy.common.config', 'webkitpy.tool', 'webkitpy.w3c', 'webkitpy.layout_tests.layout_package.bot_test_expectations'), 'fail horribly on win32', 54526)
# This only needs to run on Unix, so don't worry about win32 for now.
appengine_sdk_path = '/usr/local/google_appengine'
if os.path.exists(appengine_sdk_path):
if not appengine_sdk_path in sys.path:
sys.path.append(appengine_sdk_path)
import dev_appserver
from google.appengine.dist import use_library
use_library('django', '1.2')
dev_appserver.fix_sys_path()
tester.add_tree(wkf.path_from_webkit_base('Tools', 'TestResultServer'))
else:
_log.info('Skipping TestResultServer tests; the Google AppEngine Python SDK is not installed.')
return not tester.run()
class Tester(object):
def __init__(self, filesystem=None, webkit_finder=None):
self.filesystem = filesystem or FileSystem()
self.finder = Finder(self.filesystem)
self.printer = Printer(sys.stderr)
self.webkit_finder = webkit_finder or WebKitFinder(self.filesystem)
self._options = None
def add_tree(self, top_directory, starting_subdirectory=None):
self.finder.add_tree(top_directory, starting_subdirectory)
def skip(self, names, reason, bugid):
self.finder.skip(names, reason, bugid)
def _parse_args(self, argv=None):
parser = optparse.OptionParser(usage='usage: %prog [options] [args...]')
parser.add_option('-a', '--all', action='store_true', default=False,
help='run all the tests')
parser.add_option('-c', '--coverage', action='store_true', default=False,
help='generate code coverage info')
parser.add_option('-i', '--integration-tests', action='store_true', default=False,
help='run integration tests as well as unit tests'),
parser.add_option('-j', '--child-processes', action='store', type='int', default=(1 if sys.platform == 'win32' else multiprocessing.cpu_count()),
help='number of tests to run in parallel (default=%default)')
parser.add_option('-p', '--pass-through', action='store_true', default=False,
help='be debugger friendly by passing captured output through to the system')
parser.add_option('-q', '--quiet', action='store_true', default=False,
help='run quietly (errors, warnings, and progress only)')
parser.add_option('-t', '--timing', action='store_true', default=False,
help='display per-test execution time (implies --verbose)')
parser.add_option('-v', '--verbose', action='count', default=0,
help='verbose output (specify once for individual test results, twice for debug messages)')
parser.epilog = ('[args...] is an optional list of modules, test_classes, or individual tests. '
'If no args are given, all the tests will be run.')
return parser.parse_args(argv)
def run(self):
self._options, args = self._parse_args()
self.printer.configure(self._options)
self.finder.clean_trees()
names = self.finder.find_names(args, self._options.all)
if not names:
_log.error('No tests to run')
return False
return self._run_tests(names)
def _run_tests(self, names):
# Make sure PYTHONPATH is set up properly.
sys.path = self.finder.additional_paths(sys.path) + sys.path
# FIXME: unittest2 and coverage need to be in sys.path for their internal imports to work.
thirdparty_path = self.webkit_finder.path_from_webkit_base('Tools', 'Scripts', 'webkitpy', 'thirdparty')
if not thirdparty_path in sys.path:
sys.path.append(thirdparty_path)
if self._options.coverage:
_log.warning("Checking code coverage, so running things serially")
self._options.child_processes = 1
import coverage
cov = coverage.coverage(omit=["/usr/*", "*/webkitpy/thirdparty/*", "/Library/*"])
cov.start()
self.printer.write_update("Checking imports ...")
if not self._check_imports(names):
return False
self.printer.write_update("Finding the individual test methods ...")
loader = _Loader()
parallel_tests, serial_tests = self._test_names(loader, names)
self.printer.write_update("Running the tests ...")
self.printer.num_tests = len(parallel_tests) + len(serial_tests)
start = time.time()
test_runner = Runner(self.printer, loader, self.webkit_finder)
test_runner.run(parallel_tests, self._options.child_processes)
test_runner.run(serial_tests, 1)
self.printer.print_result(time.time() - start)
if self._options.coverage:
cov.stop()
cov.save()
cov.report(show_missing=False)
return not self.printer.num_errors and not self.printer.num_failures
def _check_imports(self, names):
for name in names:
if self.finder.is_module(name):
# if we failed to load a name and it looks like a module,
# try importing it directly, because loadTestsFromName()
# produces lousy error messages for bad modules.
try:
__import__(name)
except ImportError:
_log.fatal('Failed to import %s:' % name)
self._log_exception()
return False
return True
def _test_names(self, loader, names):
parallel_test_method_prefixes = ['test_']
serial_test_method_prefixes = ['serial_test_']
if self._options.integration_tests:
parallel_test_method_prefixes.append('integration_test_')
serial_test_method_prefixes.append('serial_integration_test_')
parallel_tests = []
loader.test_method_prefixes = parallel_test_method_prefixes
for name in names:
parallel_tests.extend(self._all_test_names(loader.loadTestsFromName(name, None)))
serial_tests = []
loader.test_method_prefixes = serial_test_method_prefixes
for name in names:
serial_tests.extend(self._all_test_names(loader.loadTestsFromName(name, None)))
# loader.loadTestsFromName() will not verify that names begin with one of the test_method_prefixes
# if the names were explicitly provided (e.g., MainTest.test_basic), so this means that any individual
# tests will be included in both parallel_tests and serial_tests, and we need to de-dup them.
serial_tests = list(set(serial_tests).difference(set(parallel_tests)))
return (parallel_tests, serial_tests)
def _all_test_names(self, suite):
names = []
if hasattr(suite, '_tests'):
for t in suite._tests:
names.extend(self._all_test_names(t))
else:
names.append(unit_test_name(suite))
return names
def _log_exception(self):
s = StringIO.StringIO()
traceback.print_exc(file=s)
for l in s.buflist:
_log.error(' ' + l.rstrip())
class _Loader(unittest.TestLoader):
test_method_prefixes = []
def getTestCaseNames(self, testCaseClass):
def isTestMethod(attrname, testCaseClass=testCaseClass):
if not hasattr(getattr(testCaseClass, attrname), '__call__'):
return False
return (any(attrname.startswith(prefix) for prefix in self.test_method_prefixes))
testFnNames = filter(isTestMethod, dir(testCaseClass))
testFnNames.sort()
return testFnNames
if __name__ == '__main__':
sys.exit(main())
|
|
# Copyright (c) 2012 NetApp, Inc. All rights reserved.
# Copyright (c) 2014 Ben Swartzlander. All rights reserved.
# Copyright (c) 2014 Navneet Singh. All rights reserved.
# Copyright (c) 2014 Clinton Knight. All rights reserved.
# Copyright (c) 2014 Alex Meade. All rights reserved.
# Copyright (c) 2014 Bob Callaway. All rights reserved.
# Copyright (c) 2015 Tom Barron. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Volume driver for NetApp NFS storage.
"""
import os
import uuid
from oslo_log import log as logging
import six
from cinder import exception
from cinder.i18n import _, _LE, _LI, _LW
from cinder.image import image_utils
from cinder import utils
from cinder.volume.drivers.netapp.dataontap.client import api as na_api
from cinder.volume.drivers.netapp.dataontap.client import client_cmode
from cinder.volume.drivers.netapp.dataontap import nfs_base
from cinder.volume.drivers.netapp.dataontap import ssc_cmode
from cinder.volume.drivers.netapp import options as na_opts
from cinder.volume.drivers.netapp import utils as na_utils
from cinder.volume import utils as volume_utils
LOG = logging.getLogger(__name__)
class NetAppCmodeNfsDriver(nfs_base.NetAppNfsDriver):
"""NetApp NFS driver for Data ONTAP (Cluster-mode)."""
REQUIRED_CMODE_FLAGS = ['netapp_vserver']
def __init__(self, *args, **kwargs):
super(NetAppCmodeNfsDriver, self).__init__(*args, **kwargs)
self.configuration.append_config_values(na_opts.netapp_cluster_opts)
def do_setup(self, context):
"""Do the customized set up on client for cluster mode."""
super(NetAppCmodeNfsDriver, self).do_setup(context)
na_utils.check_flags(self.REQUIRED_CMODE_FLAGS, self.configuration)
self.vserver = self.configuration.netapp_vserver
self.zapi_client = client_cmode.Client(
transport_type=self.configuration.netapp_transport_type,
username=self.configuration.netapp_login,
password=self.configuration.netapp_password,
hostname=self.configuration.netapp_server_hostname,
port=self.configuration.netapp_server_port,
vserver=self.vserver)
self.ssc_enabled = True
self.ssc_vols = None
self.stale_vols = set()
def check_for_setup_error(self):
"""Check that the driver is working and can communicate."""
super(NetAppCmodeNfsDriver, self).check_for_setup_error()
ssc_cmode.check_ssc_api_permissions(self.zapi_client)
def create_volume(self, volume):
"""Creates a volume.
:param volume: volume reference
"""
LOG.debug('create_volume on %s' % volume['host'])
self._ensure_shares_mounted()
# get share as pool name
share = volume_utils.extract_host(volume['host'], level='pool')
if share is None:
msg = _("Pool is not available in the volume host field.")
raise exception.InvalidHost(reason=msg)
extra_specs = na_utils.get_volume_extra_specs(volume)
qos_policy_group = extra_specs.pop('netapp:qos_policy_group', None) \
if extra_specs else None
# warn on obsolete extra specs
na_utils.log_extra_spec_warnings(extra_specs)
try:
volume['provider_location'] = share
LOG.info(_LI('casted to %s') % volume['provider_location'])
self._do_create_volume(volume)
if qos_policy_group:
self._set_qos_policy_group_on_volume(volume, share,
qos_policy_group)
return {'provider_location': volume['provider_location']}
except Exception as ex:
LOG.error(_LW("Exception creating vol %(name)s on "
"share %(share)s. Details: %(ex)s")
% {'name': volume['name'],
'share': volume['provider_location'],
'ex': ex})
volume['provider_location'] = None
finally:
if self.ssc_enabled:
self._update_stale_vols(self._get_vol_for_share(share))
msg = _("Volume %s could not be created on shares.")
raise exception.VolumeBackendAPIException(data=msg % (volume['name']))
def _set_qos_policy_group_on_volume(self, volume, share, qos_policy_group):
target_path = '%s' % (volume['name'])
export_path = share.split(':')[1]
flex_vol_name = self.zapi_client.get_vol_by_junc_vserver(self.vserver,
export_path)
self.zapi_client.file_assign_qos(flex_vol_name,
qos_policy_group,
target_path)
def _check_volume_type(self, volume, share, file_name):
"""Match volume type for share file."""
extra_specs = na_utils.get_volume_extra_specs(volume)
qos_policy_group = extra_specs.pop('netapp:qos_policy_group', None) \
if extra_specs else None
if not self._is_share_vol_type_match(volume, share):
raise exception.ManageExistingVolumeTypeMismatch(
reason=(_("Volume type does not match for share %s."),
share))
if qos_policy_group:
try:
vserver, flex_vol_name = self._get_vserver_and_exp_vol(
share=share)
self.zapi_client.file_assign_qos(flex_vol_name,
qos_policy_group,
file_name)
except na_api.NaApiError as ex:
LOG.exception(_LE('Setting file QoS policy group failed. %s'),
ex)
raise exception.NetAppDriverException(
reason=(_('Setting file QoS policy group failed. %s'), ex))
def _clone_volume(self, volume_name, clone_name,
volume_id, share=None):
"""Clones mounted volume on NetApp Cluster."""
(vserver, exp_volume) = self._get_vserver_and_exp_vol(volume_id, share)
self.zapi_client.clone_file(exp_volume, volume_name, clone_name,
vserver)
share = share if share else self._get_provider_location(volume_id)
self._post_prov_deprov_in_ssc(share)
def _get_vserver_and_exp_vol(self, volume_id=None, share=None):
"""Gets the vserver and export volume for share."""
(host_ip, export_path) = self._get_export_ip_path(volume_id, share)
ifs = self.zapi_client.get_if_info_by_ip(host_ip)
vserver = ifs[0].get_child_content('vserver')
exp_volume = self.zapi_client.get_vol_by_junc_vserver(vserver,
export_path)
return vserver, exp_volume
def _update_volume_stats(self):
"""Retrieve stats info from vserver."""
self._ensure_shares_mounted()
sync = True if self.ssc_vols is None else False
ssc_cmode.refresh_cluster_ssc(self, self.zapi_client.connection,
self.vserver, synchronous=sync)
LOG.debug('Updating volume stats')
data = {}
netapp_backend = 'NetApp_NFS_Cluster_direct'
backend_name = self.configuration.safe_get('volume_backend_name')
data['volume_backend_name'] = backend_name or netapp_backend
data['vendor_name'] = 'NetApp'
data['driver_version'] = self.VERSION
data['storage_protocol'] = 'nfs'
data['pools'] = self._get_pool_stats()
self._spawn_clean_cache_job()
self.zapi_client.provide_ems(self, netapp_backend, self._app_version)
self._stats = data
def _get_pool_stats(self):
"""Retrieve pool (i.e. NFS share) stats info from SSC volumes."""
pools = []
for nfs_share in self._mounted_shares:
capacity = self._get_share_capacity_info(nfs_share)
pool = dict()
pool['pool_name'] = nfs_share
pool['QoS_support'] = False
pool.update(capacity)
# add SSC content if available
vol = self._get_vol_for_share(nfs_share)
if vol and self.ssc_vols:
pool['netapp_raid_type'] = vol.aggr['raid_type']
pool['netapp_disk_type'] = vol.aggr['disk_type']
mirrored = vol in self.ssc_vols['mirrored']
pool['netapp_mirrored'] = six.text_type(mirrored).lower()
pool['netapp_unmirrored'] = six.text_type(not mirrored).lower()
dedup = vol in self.ssc_vols['dedup']
pool['netapp_dedup'] = six.text_type(dedup).lower()
pool['netapp_nodedup'] = six.text_type(not dedup).lower()
compression = vol in self.ssc_vols['compression']
pool['netapp_compression'] = six.text_type(compression).lower()
pool['netapp_nocompression'] = six.text_type(
not compression).lower()
thin = vol in self.ssc_vols['thin']
pool['netapp_thin_provisioned'] = six.text_type(thin).lower()
pool['netapp_thick_provisioned'] = six.text_type(
not thin).lower()
pools.append(pool)
return pools
@utils.synchronized('update_stale')
def _update_stale_vols(self, volume=None, reset=False):
"""Populates stale vols with vol and returns set copy."""
if volume:
self.stale_vols.add(volume)
set_copy = self.stale_vols.copy()
if reset:
self.stale_vols.clear()
return set_copy
@utils.synchronized("refresh_ssc_vols")
def refresh_ssc_vols(self, vols):
"""Refreshes ssc_vols with latest entries."""
if not self._mounted_shares:
LOG.warning(_LW("No shares found hence skipping ssc refresh."))
return
mnt_share_vols = set()
vs_ifs = self.zapi_client.get_vserver_ips(self.vserver)
for vol in vols['all']:
for sh in self._mounted_shares:
host = sh.split(':')[0]
junction = sh.split(':')[1]
ip = na_utils.resolve_hostname(host)
if (self._ip_in_ifs(ip, vs_ifs) and
junction == vol.id['junction_path']):
mnt_share_vols.add(vol)
vol.export['path'] = sh
break
for key in vols.keys():
vols[key] = vols[key] & mnt_share_vols
self.ssc_vols = vols
def _ip_in_ifs(self, ip, api_ifs):
"""Checks if ip is listed for ifs in API format."""
if api_ifs is None:
return False
for ifc in api_ifs:
ifc_ip = ifc.get_child_content("address")
if ifc_ip == ip:
return True
return False
def _shortlist_del_eligible_files(self, share, old_files):
"""Prepares list of eligible files to be deleted from cache."""
file_list = []
(vserver, exp_volume) = self._get_vserver_and_exp_vol(
volume_id=None, share=share)
for file in old_files:
path = '/vol/%s/%s' % (exp_volume, file)
u_bytes = self.zapi_client.get_file_usage(path, vserver)
file_list.append((file, u_bytes))
LOG.debug('Shortlisted files eligible for deletion: %s', file_list)
return file_list
def _share_match_for_ip(self, ip, shares):
"""Returns the share that is served by ip.
Multiple shares can have same dir path but
can be served using different ips. It finds the
share which is served by ip on same nfs server.
"""
ip_vserver = self._get_vserver_for_ip(ip)
if ip_vserver and shares:
for share in shares:
ip_sh = share.split(':')[0]
sh_vserver = self._get_vserver_for_ip(ip_sh)
if sh_vserver == ip_vserver:
LOG.debug('Share match found for ip %s', ip)
return share
LOG.debug('No share match found for ip %s', ip)
return None
def _get_vserver_for_ip(self, ip):
"""Get vserver for the mentioned ip."""
try:
ifs = self.zapi_client.get_if_info_by_ip(ip)
vserver = ifs[0].get_child_content('vserver')
return vserver
except Exception:
return None
def _get_vol_for_share(self, nfs_share):
"""Gets the ssc vol with given share."""
if self.ssc_vols:
for vol in self.ssc_vols['all']:
if vol.export['path'] == nfs_share:
return vol
return None
def _is_share_vol_compatible(self, volume, share):
"""Checks if share is compatible with volume to host it."""
compatible = self._is_share_eligible(share, volume['size'])
if compatible and self.ssc_enabled:
matched = self._is_share_vol_type_match(volume, share)
compatible = compatible and matched
return compatible
def _is_share_vol_type_match(self, volume, share):
"""Checks if share matches volume type."""
netapp_vol = self._get_vol_for_share(share)
LOG.debug("Found volume %(vol)s for share %(share)s."
% {'vol': netapp_vol, 'share': share})
extra_specs = na_utils.get_volume_extra_specs(volume)
vols = ssc_cmode.get_volumes_for_specs(self.ssc_vols, extra_specs)
return netapp_vol in vols
def delete_volume(self, volume):
"""Deletes a logical volume."""
share = volume['provider_location']
super(NetAppCmodeNfsDriver, self).delete_volume(volume)
self._post_prov_deprov_in_ssc(share)
def delete_snapshot(self, snapshot):
"""Deletes a snapshot."""
share = self._get_provider_location(snapshot.volume_id)
super(NetAppCmodeNfsDriver, self).delete_snapshot(snapshot)
self._post_prov_deprov_in_ssc(share)
def _post_prov_deprov_in_ssc(self, share):
if self.ssc_enabled and share:
netapp_vol = self._get_vol_for_share(share)
if netapp_vol:
self._update_stale_vols(volume=netapp_vol)
def copy_image_to_volume(self, context, volume, image_service, image_id):
"""Fetch the image from image_service and write it to the volume."""
copy_success = False
try:
major, minor = self.zapi_client.get_ontapi_version()
col_path = self.configuration.netapp_copyoffload_tool_path
if major == 1 and minor >= 20 and col_path:
self._try_copyoffload(context, volume, image_service, image_id)
copy_success = True
LOG.info(_LI('Copied image %(img)s to volume %(vol)s using '
'copy offload workflow.')
% {'img': image_id, 'vol': volume['id']})
else:
LOG.debug("Copy offload either not configured or"
" unsupported.")
except Exception as e:
LOG.exception(_LE('Copy offload workflow unsuccessful. %s'), e)
finally:
if not copy_success:
super(NetAppCmodeNfsDriver, self).copy_image_to_volume(
context, volume, image_service, image_id)
if self.ssc_enabled:
sh = self._get_provider_location(volume['id'])
self._update_stale_vols(self._get_vol_for_share(sh))
def _try_copyoffload(self, context, volume, image_service, image_id):
"""Tries server side file copy offload."""
copied = False
cache_result = self._find_image_in_cache(image_id)
if cache_result:
copied = self._copy_from_cache(volume, image_id, cache_result)
if not cache_result or not copied:
self._copy_from_img_service(context, volume, image_service,
image_id)
def _get_ip_verify_on_cluster(self, host):
"""Verifies if host on same cluster and returns ip."""
ip = na_utils.resolve_hostname(host)
vserver = self._get_vserver_for_ip(ip)
if not vserver:
raise exception.NotFound(_("Unable to locate an SVM that is "
"managing the IP address '%s'") % ip)
return ip
def _copy_from_cache(self, volume, image_id, cache_result):
"""Try copying image file_name from cached file_name."""
LOG.debug("Trying copy from cache using copy offload.")
copied = False
for res in cache_result:
try:
(share, file_name) = res
LOG.debug("Found cache file_name on share %s.", share)
if share != self._get_provider_location(volume['id']):
col_path = self.configuration.netapp_copyoffload_tool_path
src_ip = self._get_ip_verify_on_cluster(
share.split(':')[0])
src_path = os.path.join(share.split(':')[1], file_name)
dst_ip = self._get_ip_verify_on_cluster(self._get_host_ip(
volume['id']))
dst_path = os.path.join(
self._get_export_path(volume['id']), volume['name'])
# Always run copy offload as regular user, it's sufficient
# and rootwrap doesn't allow copy offload to run as root
# anyways.
self._execute(col_path, src_ip, dst_ip,
src_path, dst_path,
run_as_root=False,
check_exit_code=0)
self._register_image_in_cache(volume, image_id)
LOG.debug("Copied image from cache to volume %s using"
" copy offload.", volume['id'])
else:
self._clone_file_dst_exists(share, file_name,
volume['name'],
dest_exists=True)
LOG.debug("Copied image from cache to volume %s using"
" cloning.", volume['id'])
self._post_clone_image(volume)
copied = True
break
except Exception as e:
LOG.exception(_LE('Error in workflow copy from cache. %s.'), e)
return copied
def _clone_file_dst_exists(self, share, src_name, dst_name,
dest_exists=False):
"""Clone file even if dest exists."""
(vserver, exp_volume) = self._get_vserver_and_exp_vol(share=share)
self.zapi_client.clone_file(exp_volume, src_name, dst_name, vserver,
dest_exists=dest_exists)
def _copy_from_img_service(self, context, volume, image_service,
image_id):
"""Copies from the image service using copy offload."""
LOG.debug("Trying copy from image service using copy offload.")
image_loc = image_service.get_location(context, image_id)
locations = self._construct_image_nfs_url(image_loc)
src_ip = None
selected_loc = None
# this will match the first location that has a valid IP on cluster
for location in locations:
conn, dr = self._check_get_nfs_path_segs(location)
if conn:
try:
src_ip = self._get_ip_verify_on_cluster(conn.split(':')[0])
selected_loc = location
break
except Exception.NotFound:
pass
if src_ip is None:
raise exception.NotFound(_("Source host details not found."))
(__, ___, img_file) = selected_loc.rpartition('/')
src_path = os.path.join(dr, img_file)
dst_ip = self._get_ip_verify_on_cluster(self._get_host_ip(
volume['id']))
# tmp file is required to deal with img formats
tmp_img_file = six.text_type(uuid.uuid4())
col_path = self.configuration.netapp_copyoffload_tool_path
img_info = image_service.show(context, image_id)
dst_share = self._get_provider_location(volume['id'])
self._check_share_can_hold_size(dst_share, img_info['size'])
run_as_root = self._execute_as_root
dst_dir = self._get_mount_point_for_share(dst_share)
dst_img_local = os.path.join(dst_dir, tmp_img_file)
try:
# If src and dst share not equal
if (('%s:%s' % (src_ip, dr)) !=
('%s:%s' % (dst_ip, self._get_export_path(volume['id'])))):
dst_img_serv_path = os.path.join(
self._get_export_path(volume['id']), tmp_img_file)
# Always run copy offload as regular user, it's sufficient
# and rootwrap doesn't allow copy offload to run as root
# anyways.
self._execute(col_path, src_ip, dst_ip, src_path,
dst_img_serv_path, run_as_root=False,
check_exit_code=0)
else:
self._clone_file_dst_exists(dst_share, img_file, tmp_img_file)
self._discover_file_till_timeout(dst_img_local, timeout=120)
LOG.debug('Copied image %(img)s to tmp file %(tmp)s.'
% {'img': image_id, 'tmp': tmp_img_file})
dst_img_cache_local = os.path.join(dst_dir,
'img-cache-%s' % image_id)
if img_info['disk_format'] == 'raw':
LOG.debug('Image is raw %s.', image_id)
self._clone_file_dst_exists(dst_share, tmp_img_file,
volume['name'], dest_exists=True)
self._move_nfs_file(dst_img_local, dst_img_cache_local)
LOG.debug('Copied raw image %(img)s to volume %(vol)s.'
% {'img': image_id, 'vol': volume['id']})
else:
LOG.debug('Image will be converted to raw %s.', image_id)
img_conv = six.text_type(uuid.uuid4())
dst_img_conv_local = os.path.join(dst_dir, img_conv)
# Checking against image size which is approximate check
self._check_share_can_hold_size(dst_share, img_info['size'])
try:
image_utils.convert_image(dst_img_local,
dst_img_conv_local, 'raw',
run_as_root=run_as_root)
data = image_utils.qemu_img_info(dst_img_conv_local,
run_as_root=run_as_root)
if data.file_format != "raw":
raise exception.InvalidResults(
_("Converted to raw, but format is now %s.")
% data.file_format)
else:
self._clone_file_dst_exists(dst_share, img_conv,
volume['name'],
dest_exists=True)
self._move_nfs_file(dst_img_conv_local,
dst_img_cache_local)
LOG.debug('Copied locally converted raw image'
' %(img)s to volume %(vol)s.'
% {'img': image_id, 'vol': volume['id']})
finally:
if os.path.exists(dst_img_conv_local):
self._delete_file(dst_img_conv_local)
self._post_clone_image(volume)
finally:
if os.path.exists(dst_img_local):
self._delete_file(dst_img_local)
|
|
###############################################################################
# read_clusterdata.py: module to read APOGEE data on globular clusters
###############################################################################
import sys
import numpy
import apogee.tools.read as apread
from apogee.tools import bitmask
import os
try:
from apogee.spec import continuum
except RuntimeError:
print('Failed to load continuum')
import astropy.io.ascii
_COMBINED_INDEX=1
_GCS= ['M15','M92','M53','N5466','M13','M2','M3','M5','M107','M71']
_ERASESTR= " "
def read_meszarosgcdata(filename=os.path.join(os.path.dirname(os.path.realpath(__file__)),'..','data','clusterdata','aj509073t2_mrt.txt')):
"""
NAME:
read_meszarosgcdata
PURPOSE:
Read the data on globular clusters from Meszaros et al. (2015)
INPUT:
filename= Name of the file that has the ApJ machine-readable table
OUTPUT:
data structure with the data
HISTORY:
2015-02-11 - Started - Bovy (IAS@KITP)
2015-08-13 - Re-written for new data format - Bovy (UofT)
"""
data= astropy.io.ascii.read(filename)
data.rename_column('Clust','CLUSTER')
data.rename_column('Teff','TEFF')
data.rename_column('log(g)','LOGG')
data.rename_column('[Fe/H]','FEH')
data.rename_column('2MASS','ID')
# Now match to allStar to get the location_ids and H magnitudes
alldata= apread.allStar(raw=True)
locids= numpy.zeros(len(data),dtype='int')-1
hmags= numpy.zeros(len(data),dtype='float')-1
# and match to allVisit for the fibers that each star was observed in
allvdata= apread.allVisit(raw=True)
fibers= numpy.zeros((len(data),numpy.nanmax(alldata['NVISITS'])),
dtype='int')-1
for ii in range(len(data)):
if 'Pleiades' in data['CLUSTER'][ii]: continue
indx= alldata['APOGEE_ID'] == data['ID'][ii]
if numpy.sum(indx) == 0:
raise ValueError('allStar match for %s not found ...' % (data['ID'][ii]))
if len(list(set(alldata['LOCATION_ID'][indx]))) > 1:
raise ValueError('Multiple matches found for for %s ...' % (data['ID'][ii]))
locids[ii]= alldata['LOCATION_ID'][indx][0]
hmags[ii]= alldata['H'][indx][0]
for jj in range(alldata['NVISITS'][indx][0]):
fibers[ii,jj]= allvdata[alldata['VISIT_PK'][indx][0,jj]]['FIBERID']
data['LOCATION_ID']= locids
data['H']= hmags
data['FIBERID']= fibers
data['APOGEE_ID'] = data['ID']
data['FE_H'] = data['FEH']
return data
def read_caldata(filename=os.path.join(os.path.dirname(os.path.realpath(__file__)),'..','data','clusterdata','aj485195t4_mrt.txt'),dr='12'):
"""
NAME:
read_caldata
PURPOSE:
Read the data on calibration clusters from Meszaros et al. (2013)
INPUT:
filename= Name of the file that has the ApJ machine-readable table
OUTPUT:
data structure with the data
HISTORY:
2015-02-11 - Written - Bovy (IAS@KITP)
"""
data= astropy.io.ascii.read(filename)
data.rename_column('Cluster','CLUSTER')
data.remove_column('Teff')
data.rename_column('TeffC','TEFF')
data.remove_column('logg')
data.rename_column('loggC','LOGG')
data.remove_column('[M/H]')
data.rename_column('[M/H]C','FEH')
data.rename_column('2MASS','ID')
# Now match to allStar to get the location_ids
alldata= apread.allStar(raw=True)
locids= numpy.zeros(len(data),dtype='int')-1
hmags= numpy.zeros(len(data),dtype='float')-1
snrs = numpy.zeros(len(data),dtype='float')-1
ras= numpy.zeros(len(data),dtype='float')-1
decs= numpy.zeros(len(data),dtype='float')-1
# and match to allVisit for the fibers that each star was observed in
allvdata= apread.allVisit(raw=True)
fibers= numpy.zeros((len(data),numpy.nanmax(alldata['NVISITS'])),
dtype='int')-1
inds = []
for ii in range(len(data)):
if 'Pleiades' in data['CLUSTER'][ii]:
inds.append(0)
continue
indx= alldata['APOGEE_ID'] == data['ID'][ii]
success = numpy.where(indx==True)[0]
if success.size==0 or success.size>1:
inds.append(0)
elif success.size==1:
inds.append(success[0])
if numpy.sum(indx) == 0:
raise ValueError('allStar match for %s not found ...' % (data['ID'][ii]))
if len(list(set(alldata['LOCATION_ID'][indx]))) > 1:
raise ValueError('Multiple matches found for for %s ...' % (data['ID'][ii]))
locids[ii]= alldata['LOCATION_ID'][indx][0]
hmags[ii]= alldata['H'][indx][0]
snrs[ii] = alldata['SNR'][indx][0]
ras[ii] = alldata['RA'][indx][0]
decs[ii] = alldata['DEC'][indx][0]
for jj in range(alldata['NVISITS'][indx][0]):
fibers[ii,jj]= allvdata[alldata['VISIT_PK'][indx][0,jj]]['FIBERID']
inds = (numpy.array(inds),)
data['LOCATION_ID']= locids
data['H']= hmags
data['FIBERID']= fibers
data['SNR'] = snrs
data['APOGEE_ID'] = data['ID']
data['RA'] = ras
data['DEC'] = decs
data['index'] = inds[0]
data['M_H'] = data['FEH']
data['FE_H'] = alldata['FE_H'][inds]
if dr == '13':
rel = 'FE'
if dr != '13':
rel = 'H'
data['C_{0}'.format(rel)] = alldata['C_{0}'.format(rel)][inds]
data['N_{0}'.format(rel)] = alldata['N_{0}'.format(rel)][inds]
data['O_{0}'.format(rel)] = alldata['O_{0}'.format(rel)][inds]
data['NA_{0}'.format(rel)] = alldata['NA_{0}'.format(rel)][inds]
data['MG_{0}'.format(rel)] = alldata['MG_{0}'.format(rel)][inds]
data['AL_{0}'.format(rel)] = alldata['AL_{0}'.format(rel)][inds]
data['SI_{0}'.format(rel)] = alldata['SI_{0}'.format(rel)][inds]
data['S_{0}'.format(rel)] = alldata['S_{0}'.format(rel)][inds]
data['K_{0}'.format(rel)] = alldata['K_{0}'.format(rel)][inds]
data['CA_{0}'.format(rel)] = alldata['CA_{0}'.format(rel)][inds]
data['TI_{0}'.format(rel)] = alldata['TI_{0}'.format(rel)][inds]
data['V_{0}'.format(rel)] = alldata['V_{0}'.format(rel)][inds]
data['MN_{0}'.format(rel)] = alldata['MN_{0}'.format(rel)][inds]
data['NI_{0}'.format(rel)] = alldata['NI_{0}'.format(rel)][inds]
return numpy.array(data)
def read_spectra(cluster,teffmin=4000.,teffmax=5000.,cont_type='cannon',
cont_deg=4):
"""
NAME:
read_spectra
PURPOSE:
Read the APOGEE spectra and their errors for stars in a given cluster
INPUT:
cluster - Name of the cluster (name in one of the data files)
teffmin= (4000.) minimum temperature
teffmax= (5000.) maximum temperature
cont_type = ('cannon') type of continuum normalization to perform
cont_deg= (4) degree polynomial to fit for continuum normalization
OUTPUT:
(data, spec, specerr) - (full data structure, spectra [nspec,nlam], spectral uncertainties [nspec,nlam]) nlam=7214 on ASPCAP grid
HISTORY:
2015-08-13 - Written based on some older code - Bovy (UofT)
"""
if cluster.upper() in _GCS:
data= read_meszarosgcdata()
else:
data= read_caldata()
# Cut to just this cluster and temperature range
if 'rc' in cluster.lower():
# Only for NGC 6819
rc= True
cluster= cluster[:-2]
else:
rc= False
data= data[data['CLUSTER'] == cluster.upper()]
data= data[(data['TEFF'] < teffmax)\
*(data['TEFF'] > teffmin)]
if cluster.lower() == 'n6819':
g4CN= good4CN(cluster,data)
g4CN[10]= False # another one, by hand!
if rc:
data= data[True-g4CN] # Just those!
else:
data= data[g4CN] # Just those!
# Load all the spectra
nspec= len(data)
spec= numpy.zeros((nspec,7214))
specerr= numpy.zeros((nspec,7214))
# Setup bad pixel mask
badcombpixmask= bitmask.badpixmask()\
+2**bitmask.apogee_pixmask_int("SIG_SKYLINE")
for ii in range(nspec):
sys.stdout.write('\r'+"Loading spectrum %i / %i ...\r" % (ii+1,nspec))
sys.stdout.flush()
spec[ii]= apread.apStar(data['LOCATION_ID'][ii],
data['ID'][ii],
ext=1,header=False,
aspcapWavegrid=True)[_COMBINED_INDEX]
specerr[ii]= apread.apStar(data['LOCATION_ID'][ii],
data['ID'][ii],
ext=2,header=False,
aspcapWavegrid=True)[_COMBINED_INDEX]
# Inflate uncertainties for bad pixels
mask= apread.apStar(data['LOCATION_ID'][ii],
data['ID'][ii],
ext=3,header=False,
aspcapWavegrid=True)[_COMBINED_INDEX]
specerr[ii,(mask & (badcombpixmask)) != 0]+=\
100.*numpy.mean(spec[ii,True-numpy.isnan(spec[ii])])
# Also inflate pixels with high SNR to 0.5%
highsnr= spec[ii]/specerr[ii] > 200.
specerr[ii,highsnr]= 0.005*numpy.fabs(spec[ii,highsnr])
# Continuum-normalize
cont= continuum.fit(spec[ii],specerr[ii],type=cont_type,deg=cont_deg)
spec[ii]/= cont
specerr[ii]/= cont
specerr[ii,highsnr]= 0.005 # like standard APOGEE reduction
sys.stdout.write('\r'+_ERASESTR+'\r')
sys.stdout.flush()
return (data,spec,specerr)
def good4CN(cluster,data):
"""
NAME:
good4CN
PURPOSE:
return the indices of stars that can be used to determine the spread in C/N
INPUT:
cluster - the cluster name
data - the data for this cluster
OUTPUT:
index
HISTORY:
2015-09-04 - Written - Bovy (UofT)
"""
if cluster.lower() == 'm67':
indx= (data['LOGG'] > _m67rccut(data['TEFF']))\
*(numpy.fabs(data['TEFF']-4600.)>3.)
elif cluster.lower() == 'n6819':
apokasc= apread.apokasc()
ma= numpy.zeros(len(data),dtype='int')-1
for ii in range(len(data)):
try:
ma[ii]= (numpy.arange(len(apokasc))[apokasc['APOGEE_ID'] == data['ID'][ii]])[0]
except IndexError: pass
indx= numpy.ones(len(data),dtype='bool')
for ii in range(len(data)):
if ma[ii] >= 0 \
and apokasc[ma[ii]]['SEISMO EVOL'] == 'CLUMP ':
indx[ii]= False
# Also the clump stars' friends, they are all suspicious
indx[numpy.fabs(data['TEFF']-4765.) < 60.]= False
else:
indx= numpy.ones(len(data),dtype='bool')
return indx
def _m67rccut(t):
return (3.4-1.7)/(4950.-4365)*(t-4950)+3.35
|
|
import json
from .test_base import Base, BaseTestCase
from .test_fixtures import Account
from .resource import CollectionResource, SingleResource
class AccountCollectionResource(CollectionResource):
model = Account
default_sort = ['id']
class AccountResource(SingleResource):
model = Account
class GetOnlyAccountCollectionResource(CollectionResource):
model = Account
methods = ['GET']
class GetOnlyAccountResource(SingleResource):
model = Account
methods = ['GET']
class PostOnlyAccountCollectionResource(CollectionResource):
model = Account
methods = ['POST']
class PostOnlyAccountResource(SingleResource):
model = Account
methods = ['POST']
class PutOnlyAccountCollectionResource(CollectionResource):
model = Account
methods = ['PUT']
class PutOnlyAccountResource(SingleResource):
model = Account
methods = ['PUT']
class PatchOnlyAccountCollectionResource(CollectionResource):
model = Account
methods = ['PATCH']
class PatchOnlyAccountResource(SingleResource):
model = Account
methods = ['PATCH']
class DeleteOnlyAccountCollectionResource(CollectionResource):
model = Account
methods = ['DELETE']
class DeleteOnlyAccountResource(SingleResource):
model = Account
methods = ['DELETE']
class MethodTest(BaseTestCase):
def create_test_resources(self):
self.app.add_route('/accounts', AccountCollectionResource(self.db_engine))
self.app.add_route('/accounts/{id}', AccountResource(self.db_engine))
self.app.add_route('/getonly-accounts', GetOnlyAccountCollectionResource(self.db_engine))
self.app.add_route('/getonly-accounts/{id}', GetOnlyAccountResource(self.db_engine))
self.app.add_route('/postonly-accounts', PostOnlyAccountCollectionResource(self.db_engine))
self.app.add_route('/postonly-accounts/{id}', PostOnlyAccountResource(self.db_engine))
self.app.add_route('/putonly-accounts', PutOnlyAccountCollectionResource(self.db_engine))
self.app.add_route('/putonly-accounts/{id}', PutOnlyAccountResource(self.db_engine))
self.app.add_route('/patchonly-accounts', PatchOnlyAccountCollectionResource(self.db_engine))
self.app.add_route('/patchonly-accounts/{id}', PatchOnlyAccountResource(self.db_engine))
self.app.add_route('/deleteonly-accounts', DeleteOnlyAccountCollectionResource(self.db_engine))
self.app.add_route('/deleteonly-accounts/{id}', DeleteOnlyAccountResource(self.db_engine))
def test_get(self):
response, = self.simulate_request('/accounts', method='POST', body=json.dumps({'id': 1, 'name': 'Bob'}), headers={'Accept': 'application/json', 'Content-Type': 'application/json'})
self.assertCreated(response)
response, = self.simulate_request('/accounts', method='GET', headers={'Accept': 'application/json'})
self.assertOK(response, {'data': [{'id': 1, 'name': 'Bob', 'owner': None}]})
response, = self.simulate_request('/accounts/1', method='GET', headers={'Accept': 'application/json'})
self.assertOK(response, {'data': {'id': 1, 'name': 'Bob', 'owner': None}})
response, = self.simulate_request('/getonly-accounts', method='GET', headers={'Accept': 'application/json'})
self.assertOK(response, {'data': [{'id': 1, 'name': 'Bob', 'owner': None}]})
response, = self.simulate_request('/getonly-accounts/1', method='GET', headers={'Accept': 'application/json'})
self.assertOK(response, {'data': {'id': 1, 'name': 'Bob', 'owner': None}})
response = self.simulate_request('/postonly-accounts', method='GET', headers={'Accept': 'application/json'})
self.assertMethodNotAllowed(response)
response = self.simulate_request('/postonly-accounts/1', method='GET', headers={'Accept': 'application/json'})
self.assertMethodNotAllowed(response)
response = self.simulate_request('/putonly-accounts', method='GET', headers={'Accept': 'application/json'})
self.assertMethodNotAllowed(response)
response = self.simulate_request('/putonly-accounts/1', method='GET', headers={'Accept': 'application/json'})
self.assertMethodNotAllowed(response)
response = self.simulate_request('/patchonly-accounts', method='GET', headers={'Accept': 'application/json'})
self.assertMethodNotAllowed(response)
response = self.simulate_request('/patchonly-accounts/1', method='GET', headers={'Accept': 'application/json'})
self.assertMethodNotAllowed(response)
response = self.simulate_request('/deleteonly-accounts', method='GET', headers={'Accept': 'application/json'})
self.assertMethodNotAllowed(response)
response = self.simulate_request('/deleteonly-accounts/1', method='GET', headers={'Accept': 'application/json'})
self.assertMethodNotAllowed(response)
def test_post(self):
response, = self.simulate_request('/accounts', method='POST', body=json.dumps({'id': 1, 'name': 'Bob'}), headers={'Accept': 'application/json', 'Content-Type': 'application/json'})
self.assertCreated(response)
response = self.simulate_request('/accounts/1', method='POST', body=json.dumps({'id': 1, 'name': 'Bob'}), headers={'Accept': 'application/json', 'Content-Type': 'application/json'})
self.assertMethodNotAllowed(response)
response = self.simulate_request('/getonly-accounts', method='POST', body=json.dumps({'id': 3, 'name': 'Jack'}), headers={'Accept': 'application/json', 'Content-Type': 'application/json'})
self.assertMethodNotAllowed(response)
response = self.simulate_request('/getonly-accounts/1', method='POST', body=json.dumps({'id': 3, 'name': 'Jack'}), headers={'Accept': 'application/json', 'Content-Type': 'application/json'})
self.assertMethodNotAllowed(response)
response, = self.simulate_request('/postonly-accounts', method='POST', body=json.dumps({'id': 2, 'name': 'Jim'}), headers={'Accept': 'application/json', 'Content-Type': 'application/json'})
self.assertCreated(response)
response = self.simulate_request('/postonly-accounts/1', method='POST', body=json.dumps({'id': 2, 'name': 'Jim'}), headers={'Accept': 'application/json', 'Content-Type': 'application/json'})
self.assertMethodNotAllowed(response)
response = self.simulate_request('/putonly-accounts', method='POST', body=json.dumps({'id': 3, 'name': 'Jack'}), headers={'Accept': 'application/json', 'Content-Type': 'application/json'})
self.assertMethodNotAllowed(response)
response = self.simulate_request('/putonly-accounts/1', method='POST', body=json.dumps({'id': 3, 'name': 'Jack'}), headers={'Accept': 'application/json', 'Content-Type': 'application/json'})
self.assertMethodNotAllowed(response)
response = self.simulate_request('/patchonly-accounts', method='POST', body=json.dumps({'id': 3, 'name': 'Jack'}), headers={'Accept': 'application/json', 'Content-Type': 'application/json'})
self.assertMethodNotAllowed(response)
response = self.simulate_request('/patchonly-accounts/1', method='POST', body=json.dumps({'id': 3, 'name': 'Jack'}), headers={'Accept': 'application/json', 'Content-Type': 'application/json'})
self.assertMethodNotAllowed(response)
response = self.simulate_request('/deleteonly-accounts', method='POST', body=json.dumps({'id': 3, 'name': 'Jack'}), headers={'Accept': 'application/json', 'Content-Type': 'application/json'})
self.assertMethodNotAllowed(response)
response = self.simulate_request('/deleteonly-accounts/1', method='POST', body=json.dumps({'id': 3, 'name': 'Jack'}), headers={'Accept': 'application/json', 'Content-Type': 'application/json'})
self.assertMethodNotAllowed(response)
response, = self.simulate_request('/accounts', method='GET', headers={'Accept': 'application/json'})
self.assertOK(response, {'data': [{'id': 1, 'name': 'Bob', 'owner': None}, {'id': 2, 'name': 'Jim', 'owner': None}]})
response, = self.simulate_request('/accounts/1', method='GET', headers={'Accept': 'application/json'})
self.assertOK(response, {'data': {'id': 1, 'name': 'Bob', 'owner': None}})
def test_put(self):
response, = self.simulate_request('/accounts', method='POST', body=json.dumps({'id': 1, 'name': 'Bob'}), headers={'Accept': 'application/json', 'Content-Type': 'application/json'})
self.assertCreated(response)
response = self.simulate_request('/accounts', method='PUT', body=json.dumps({'id': 1, 'name': 'Bob'}), headers={'Accept': 'application/json', 'Content-Type': 'application/json'})
self.assertMethodNotAllowed(response)
response = self.simulate_request('/accounts/1', method='PUT', body=json.dumps({'id': 1, 'name': 'Jim'}), headers={'Accept': 'application/json', 'Content-Type': 'application/json'})
self.assertOK(response)
response, = self.simulate_request('/accounts/1', method='GET', headers={'Accept': 'application/json'})
self.assertOK(response, {'data': {'id': 1, 'name': 'Jim', 'owner': None}})
response = self.simulate_request('/getonly-accounts', method='PUT', body=json.dumps({'id': 1, 'name': 'Alice'}), headers={'Accept': 'application/json', 'Content-Type': 'application/json'})
self.assertMethodNotAllowed(response)
response = self.simulate_request('/getonly-accounts/1', method='PUT', body=json.dumps({'id': 1, 'name': 'Alice'}), headers={'Accept': 'application/json', 'Content-Type': 'application/json'})
self.assertMethodNotAllowed(response)
response = self.simulate_request('/postonly-accounts', method='PUT', body=json.dumps({'id': 1, 'name': 'Alice'}), headers={'Accept': 'application/json', 'Content-Type': 'application/json'})
self.assertMethodNotAllowed(response)
response = self.simulate_request('/postonly-accounts/1', method='PUT', body=json.dumps({'id': 1, 'name': 'Alice'}), headers={'Accept': 'application/json', 'Content-Type': 'application/json'})
self.assertMethodNotAllowed(response)
response = self.simulate_request('/putonly-accounts', method='PUT', body=json.dumps({'id': 1, 'name': 'Alice'}), headers={'Accept': 'application/json', 'Content-Type': 'application/json'})
self.assertMethodNotAllowed(response)
response = self.simulate_request('/putonly-accounts/1', method='PUT', body=json.dumps({'id': 1, 'name': 'Jack'}), headers={'Accept': 'application/json', 'Content-Type': 'application/json'})
self.assertOK(response)
response = self.simulate_request('/patchonly-accounts', method='PUT', body=json.dumps({'id': 1, 'name': 'Alice'}), headers={'Accept': 'application/json', 'Content-Type': 'application/json'})
self.assertMethodNotAllowed(response)
response = self.simulate_request('/patchonly-accounts/1', method='PUT', body=json.dumps({'id': 1, 'name': 'Alice'}), headers={'Accept': 'application/json', 'Content-Type': 'application/json'})
self.assertMethodNotAllowed(response)
response = self.simulate_request('/deleteonly-accounts', method='PUT', body=json.dumps({'id': 1, 'name': 'Alice'}), headers={'Accept': 'application/json', 'Content-Type': 'application/json'})
self.assertMethodNotAllowed(response)
response = self.simulate_request('/deleteonly-accounts/1', method='PUT', body=json.dumps({'id': 1, 'name': 'Alice'}), headers={'Accept': 'application/json', 'Content-Type': 'application/json'})
self.assertMethodNotAllowed(response)
response, = self.simulate_request('/accounts/1', method='GET', headers={'Accept': 'application/json'})
self.assertOK(response, {'data': {'id': 1, 'name': 'Jack', 'owner': None}})
def test_patch(self):
response, = self.simulate_request('/accounts', method='POST', body=json.dumps({'id': 1, 'name': 'Bob'}), headers={'Accept': 'application/json', 'Content-Type': 'application/json'})
self.assertCreated(response)
response = self.simulate_request('/accounts', method='PATCH', body=json.dumps({'patches': [{'op': 'add', 'path': '/', 'value': {'id': 2, 'name': 'Jane'}}]}), headers={'Accept': 'application/json', 'Content-Type': 'application/json'})
self.assertOK(response)
response, = self.simulate_request('/accounts', method='GET', headers={'Accept': 'application/json'})
self.assertOK(response, {'data': [{'id': 1, 'name': 'Bob', 'owner': None}, {'id': 2, 'name': 'Jane', 'owner': None}]})
response = self.simulate_request('/accounts/1', method='PATCH', body=json.dumps({'id': 1, 'name': 'Jim'}), headers={'Accept': 'application/json', 'Content-Type': 'application/json'})
self.assertOK(response)
response, = self.simulate_request('/accounts/1', method='GET', headers={'Accept': 'application/json'})
self.assertOK(response, {'data': {'id': 1, 'name': 'Jim', 'owner': None}})
response = self.simulate_request('/getonly-accounts', method='PATCH', body=json.dumps({'id': 1, 'name': 'Alice'}), headers={'Accept': 'application/json', 'Content-Type': 'application/json'})
self.assertMethodNotAllowed(response)
response = self.simulate_request('/getonly-accounts/1', method='PATCH', body=json.dumps({'id': 1, 'name': 'Alice'}), headers={'Accept': 'application/json', 'Content-Type': 'application/json'})
self.assertMethodNotAllowed(response)
response = self.simulate_request('/postonly-accounts', method='PATCH', body=json.dumps({'id': 1, 'name': 'Alice'}), headers={'Accept': 'application/json', 'Content-Type': 'application/json'})
self.assertMethodNotAllowed(response)
response = self.simulate_request('/postonly-accounts/1', method='PATCH', body=json.dumps({'id': 1, 'name': 'Alice'}), headers={'Accept': 'application/json', 'Content-Type': 'application/json'})
self.assertMethodNotAllowed(response)
response = self.simulate_request('/putonly-accounts', method='PATCH', body=json.dumps({'id': 1, 'name': 'Alice'}), headers={'Accept': 'application/json', 'Content-Type': 'application/json'})
self.assertMethodNotAllowed(response)
response = self.simulate_request('/putonly-accounts/1', method='PATCH', body=json.dumps({'id': 1, 'name': 'Alice'}), headers={'Accept': 'application/json', 'Content-Type': 'application/json'})
self.assertMethodNotAllowed(response)
response = self.simulate_request('/patchonly-accounts', method='PATCH', body=json.dumps({'patches': [{'op': 'add', 'path': '/', 'value': {'id': 3, 'name': 'Dan'}}]}), headers={'Accept': 'application/json', 'Content-Type': 'application/json'})
self.assertOK(response)
response = self.simulate_request('/patchonly-accounts/1', method='PATCH', body=json.dumps({'id': 1, 'name': 'Jack'}), headers={'Accept': 'application/json', 'Content-Type': 'application/json'})
self.assertOK(response)
response = self.simulate_request('/deleteonly-accounts', method='PATCH', body=json.dumps({'id': 1, 'name': 'Alice'}), headers={'Accept': 'application/json', 'Content-Type': 'application/json'})
self.assertMethodNotAllowed(response)
response = self.simulate_request('/deleteonly-accounts/1', method='PATCH', body=json.dumps({'id': 1, 'name': 'Alice'}), headers={'Accept': 'application/json', 'Content-Type': 'application/json'})
self.assertMethodNotAllowed(response)
response, = self.simulate_request('/accounts', method='GET', headers={'Accept': 'application/json'})
self.assertOK(response, {'data': [{'id': 1, 'name': 'Jack', 'owner': None}, {'id': 2, 'name': 'Jane', 'owner': None}, {'id': 3, 'name': 'Dan', 'owner': None}]})
def test_delete(self):
response, = self.simulate_request('/accounts', method='POST', body=json.dumps({'id': 1, 'name': 'Bob'}), headers={'Accept': 'application/json', 'Content-Type': 'application/json'})
self.assertCreated(response)
response, = self.simulate_request('/accounts', method='POST', body=json.dumps({'id': 2, 'name': 'Jim'}), headers={'Accept': 'application/json', 'Content-Type': 'application/json'})
self.assertCreated(response)
response = self.simulate_request('/accounts', method='DELETE', headers={'Accept': 'application/json', 'Content-Type': 'application/json'})
self.assertMethodNotAllowed(response)
response = self.simulate_request('/accounts/1', method='DELETE', headers={'Accept': 'application/json', 'Content-Type': 'application/json'})
self.assertOK(response)
response, = self.simulate_request('/accounts', method='GET', headers={'Accept': 'application/json'})
self.assertOK(response, {'data': [{'id': 2, 'name': 'Jim', 'owner': None}]})
response = self.simulate_request('/getonly-accounts', method='DELETE', headers={'Accept': 'application/json', 'Content-Type': 'application/json'})
self.assertMethodNotAllowed(response)
response = self.simulate_request('/getonly-accounts/2', method='DELETE', headers={'Accept': 'application/json', 'Content-Type': 'application/json'})
self.assertMethodNotAllowed(response)
response = self.simulate_request('/postonly-accounts', method='DELETE', headers={'Accept': 'application/json', 'Content-Type': 'application/json'})
self.assertMethodNotAllowed(response)
response = self.simulate_request('/postonly-accounts/2', method='DELETE', headers={'Accept': 'application/json', 'Content-Type': 'application/json'})
self.assertMethodNotAllowed(response)
response = self.simulate_request('/putonly-accounts', method='DELETE', headers={'Accept': 'application/json', 'Content-Type': 'application/json'})
self.assertMethodNotAllowed(response)
response = self.simulate_request('/putonly-accounts/2', method='DELETE', headers={'Accept': 'application/json', 'Content-Type': 'application/json'})
self.assertMethodNotAllowed(response)
response = self.simulate_request('/patchonly-accounts', method='DELETE', headers={'Accept': 'application/json', 'Content-Type': 'application/json'})
self.assertMethodNotAllowed(response)
response = self.simulate_request('/patchonly-accounts/2', method='DELETE', headers={'Accept': 'application/json', 'Content-Type': 'application/json'})
self.assertMethodNotAllowed(response)
response = self.simulate_request('/deleteonly-accounts', method='DELETE', headers={'Accept': 'application/json', 'Content-Type': 'application/json'})
self.assertMethodNotAllowed(response)
response, = self.simulate_request('/accounts', method='GET', headers={'Accept': 'application/json'})
self.assertOK(response, {'data': [{'id': 2, 'name': 'Jim', 'owner': None}]})
response = self.simulate_request('/deleteonly-accounts/2', method='DELETE', headers={'Accept': 'application/json', 'Content-Type': 'application/json'})
self.assertOK(response)
response, = self.simulate_request('/accounts', method='GET', headers={'Accept': 'application/json'})
self.assertOK(response, {'data': []})
|
|
from main import Node
def DeepHoldout():
return Node()
def ClipTest():
return Node()
def BlackOutside():
return Node()
def Denoise():
return Node()
def Laplacian():
return Node()
def SphericalTransform():
return Node()
def Sharpen():
return Node()
def OCIOCDLTransform():
return Node()
def TimeBlur():
return Node()
def ChannelMerge():
return Node()
def Histogram():
return Node()
def DisplaceGeo():
return Node()
def ParticlePointForce():
return Node()
def ColorTransfer():
return Node()
def HueShift():
return Node()
def add32p():
return Node()
def CopyBBox():
return Node()
def DiskCache():
return Node()
def OneView():
return Node()
def Keyer():
return Node()
def DegrainSimple():
return Node()
def RotoPaint():
return Node()
def MotionBlur():
return Node()
def ScannedGrain():
return Node()
def CheckerBoard():
return Node()
def NoOp():
return Node()
def Text():
return Node()
def FrameBlend():
return Node()
def MotionBlur2D():
return Node()
def MergeMat():
return Node()
def VolumeRays():
return Node()
def STMap():
return Node()
def FrameHold():
return Node()
def Invert():
return Node()
def ParticleMerge():
return Node()
def ParticleGravity():
return Node()
def Camera():
return Node()
def FrameRange():
return Node()
def CMSTestPattern():
return Node()
def LensDistortion():
return Node()
def DropShadow():
return Node()
def PointCloudGenerator():
return Node()
def ZDefocus():
return Node()
def ColorLookup():
return Node()
def DeepRead():
return Node()
def ReadGeo():
return Node()
def LevelSet():
return Node()
def Grid():
return Node()
def DeepTransform():
return Node()
def UVProject():
return Node()
def ApplyMaterial():
return Node()
def ProceduralNoise():
return Node()
def Merge2():
return Node()
def Light():
return Node()
def Camera2():
return Node()
def Convolve():
return Node()
def ParticleEmitter():
return Node()
def MergeGeo():
return Node()
def Dither():
return Node()
def TransformMasked():
return Node()
def Cylinder():
return Node()
def Shuffle():
return Node()
def DegrainBlue():
return Node()
def GridWarp():
return Node()
def ModelBuilder():
return Node()
def Axis():
return Node()
def LayerContactSheet():
return Node()
def Group():
return Node()
def Matrix():
return Node()
def Transform():
return Node()
def Sampler():
return Node()
def LightWrap():
return Node()
def CopyRectangle():
return Node()
def DeepRecolor():
return Node()
def Rectangle():
return Node()
def EdgeBlur():
return Node()
def GenerateLUT():
return Node()
def Defocus():
return Node()
def Phong():
return Node()
def Kronos():
return Node()
def Card():
return Node()
def WriteGeo():
return Node()
def ParticleVortex():
return Node()
def Keymix():
return Node()
def DepthToPoints():
return Node()
def Reflection():
return Node()
def Ramp():
return Node()
def Merge():
return Node()
def Switch():
return Node()
def F_Align():
return Node()
def ParticleSpawn():
return Node()
def Refraction():
return Node()
def Expression():
return Node()
def Primatte():
return Node()
def ParticleLookAt():
return Node()
def BlendMat():
return Node()
def CompareMetaData():
return Node()
def Reconcile3D():
return Node()
def RadialDistort():
return Node()
def ZMerge():
return Node()
def TimeDissolve():
return Node()
def IBKGizmo():
return Node()
def DeepMerge():
return Node()
def Emboss():
return Node()
def ColorWheel():
return Node()
def Vectorfield():
return Node()
def ParticleToGeo():
return Node()
def ReLight():
return Node()
def Ultimatte():
return Node()
def Wireframe():
return Node()
def FillMat():
return Node()
def CameraTracker():
return Node()
def Write():
return Node()
def HueKeyer():
return Node()
def HueCorrect():
return Node()
def AppendClip():
return Node()
def AddTimeCode():
return Node()
def PrmanRender():
return Node()
def remove32p():
return Node()
def ReConverge():
return Node()
def DeepColorCorrect():
return Node()
def EdgeDetect():
return Node()
def AudioRead():
return Node()
def Assert():
return Node()
def ParticleSettings():
return Node()
def MotionBlur3D():
return Node()
def CrosstalkGeo():
return Node()
def StickyNote():
return Node()
def IDistort():
return Node()
def Colorspace():
return Node()
def DirBlur():
return Node()
def Stabilize2D():
return Node()
def PositionToPoints():
return Node()
def GodRays():
return Node()
def Keylight():
return Node()
def Gamma():
return Node()
def ViewMetaData():
return Node()
def DeepFromFrames():
return Node()
def ParticleDirectionalForce():
return Node()
def PSDMerge():
return Node()
def JoinViews():
return Node()
def Displacement():
return Node()
def Read():
return Node()
def Retime():
return Node()
def EXPTool():
return Node()
def Dilate():
return Node()
def DeepFromImage():
return Node()
def Precomp():
return Node()
def DepthGenerator():
return Node()
def Truelight3():
return Node()
def Blur():
return Node()
def Card2():
return Node()
def ParticleMotionAlign():
return Node()
def TimeWarp():
return Node()
def Spot():
return Node()
def ColorCorrect():
return Node()
def DeepSample():
return Node()
def Project3D():
return Node()
def Multiply():
return Node()
def Sparkles():
return Node()
def ShuffleCopy():
return Node()
def Diffuse():
return Node()
def Glow():
return Node()
def TimeEcho():
return Node()
def BasicMaterial():
return Node()
def IBKColour():
return Node()
def Specular():
return Node()
def CopyMetaData():
return Node()
def Position():
return Node()
def MarkerRemoval():
return Node()
def Clamp():
return Node()
def Toe2():
return Node()
def OCIOLogConvert():
return Node()
def PointsTo3D():
return Node()
def OCIOColorSpace():
return Node()
def LogGeo():
return Node()
def Add():
return Node()
def Soften():
return Node()
def F_ReGrain():
return Node()
def OCIODisplay():
return Node()
def ColorMatrix():
return Node()
def Unpremult():
return Node()
def ContactSheet():
return Node()
def EditGeo():
return Node()
def CCrosstalk():
return Node()
def Input():
return Node()
def DeepExpression():
return Node()
def Copy():
return Node()
def DustBust():
return Node()
def ParticleBounce():
return Node()
def ZSlice():
return Node()
def Noise():
return Node()
def Emission():
return Node()
def MCID():
return Node()
def F_DeFlicker2():
return Node()
def ParticleWind():
return Node()
def HSVTool():
return Node()
def DeepToPoints():
return Node()
def Bilateral():
return Node()
def AddMix():
return Node()
def Flare():
return Node()
def OFlow():
return Node()
def ShuffleViews():
return Node()
def F_RigRemoval():
return Node()
def DepthToPosition():
return Node()
def CornerPin2D():
return Node()
def Roto():
return Node()
def SplineWarp():
return Node()
def Blend():
return Node()
def CurveTool():
return Node()
def RolloffContrast():
return Node()
def ScanlineRender():
return Node()
def Premult():
return Node()
def Grade():
return Node()
def PlanarTracker():
return Node()
def Log2Lin():
return Node()
def OCIOFileTransform():
return Node()
def DeepToImage():
return Node()
def Mirror():
return Node()
def HistEQ():
return Node()
def BlinkScript():
return Node()
def TemporalMedian():
return Node()
def MatchGrade():
return Node()
def Radial():
return Node()
def Glint():
return Node()
def Environment():
return Node()
def SoftClip():
return Node()
def MergeExpression():
return Node()
def Tile():
return Node()
def CameraShake():
return Node()
def Erode():
return Node()
def AdjBBox():
return Node()
def VectorGenerator():
return Node()
def Posterize():
return Node()
def Dot():
return Node()
def ModifyMetaData():
return Node()
def PostageStamp():
return Node()
def BumpBoss():
return Node()
def ColorBars():
return Node()
def ParticleExpression():
return Node()
def ParticleCache():
return Node()
def PLogLin():
return Node()
def ModifyRIB():
return Node()
def GeoSelect():
return Node()
def Anaglyph():
return Node()
def Tracker():
return Node()
def ParticleSpeedLimit():
return Node()
def VectorBlur():
return Node()
def LookupGeo():
return Node()
def MinColor():
return Node()
def Scene():
return Node()
def FilterErode():
return Node()
def TVIscale():
return Node()
def Crop():
return Node()
def Reformat():
return Node()
def MixViews():
return Node()
def Saturation():
return Node()
def BackdropNode():
return Node()
def Sphere():
return Node()
def DeepReformat():
return Node()
def PoissonMesh():
return Node()
def F_WireRemoval():
return Node()
def ParticleDrag():
return Node()
def TimeOffset():
return Node()
def TransformGeo():
return Node()
def Difference():
return Node()
def SideBySide():
return Node()
def DeepWrite():
return Node()
def Median():
return Node()
def Trilinear():
return Node()
def Remove():
return Node()
def NoTimeBlur():
return Node()
def Cube():
return Node()
def Grain():
return Node()
def Card3D():
return Node()
def Normals():
return Node()
def DeepCrop():
return Node()
def Output():
return Node()
def UVTile2():
return Node()
def Dissolve():
return Node()
def Constant():
return Node()
def Viewer():
return Node()
def Direct():
return Node()
def F_Steadiness():
return Node()
def ParticleTurbulence():
return Node()
def TimeClip():
return Node()
def ParticleCurve():
return Node()
|
|
import pycuda.autoinit
import pycuda.driver as drv
import numpy
import time
from pycuda.compiler import SourceModule
from jinja2 import Environment, PackageLoader
def main():
numpy.set_printoptions(precision=4,
threshold=10000,
linewidth=150)
#Set up global timer
tot_time = time.time()
#Define constants
BankSize = 8 # Do not go beyond 8!
WarpSize = 32 #Do not change...
DimGridX = 19
DimGridY = 19
BlockDimX = 1#256
BlockDimY = 1#256
SearchSpaceSize = 2**24 #BlockDimX * BlockDimY * 32
FitnessValDim = BlockDimX*BlockDimY*WarpSize #SearchSpaceSize
GenomeDim = BlockDimX*BlockDimY*WarpSize #SearchSpaceSize
AlignedByteLengthGenome = 4
print "Total number of genomes:", GenomeDim
#Create dictionary argument for rendering
RenderArgs= {"safe_memory_mapping":1,
"aligned_byte_length_genome":AlignedByteLengthGenome,
"bit_length_edge_type":3,
"curand_nr_threads_per_block":32,
"nr_tile_types":2,
"nr_edge_types":8,
"warpsize":WarpSize,
"fit_dim_thread_x":32*BankSize,
"fit_dim_thread_y":1,
"fit_dim_block_x":BlockDimX,
"fit_dim_grid_x":19,
"fit_dim_grid_y":19,
"fit_nr_four_permutations":24,
"fit_length_movelist":244,
"fit_nr_redundancy_grid_depth":2,
"fit_nr_redundancy_assemblies":10,
"fit_tile_index_starting_tile":0,
"glob_nr_tile_orientations":4,
"banksize":BankSize,
"curand_dim_block_x":BlockDimX
}
# Set environment for template package Jinja2
env = Environment(loader=PackageLoader('main', './'))
# Load source code from file
Source = env.get_template('./alpha.cu') #Template( file(KernelFile).read() )
# Render source code
RenderedSource = Source.render( RenderArgs )
# Save rendered source code to file
f = open('./rendered.cu', 'w')
f.write(RenderedSource)
f.close()
#Load source code into module
KernelSourceModule = SourceModule(RenderedSource, options=None, arch="compute_20", code="sm_20")
Kernel = KernelSourceModule.get_function("SearchSpaceKernel")
CurandKernel = KernelSourceModule.get_function("CurandInitKernel")
#Initialise InteractionMatrix
InteractionMatrix = numpy.zeros( ( 8, 8) ).astype(numpy.float32)
def Delta(a,b):
if a==b:
return 1
else:
return 0
for i in range(InteractionMatrix.shape[0]):
for j in range(InteractionMatrix.shape[1]):
InteractionMatrix[i][j] = ( 1 - i % 2 ) * Delta( i, j+1 ) + ( i % 2 ) * Delta( i, j-1 )
#Set up our InteractionMatrix
InteractionMatrix_h = KernelSourceModule.get_texref("t_ucInteractionMatrix")
drv.matrix_to_texref( InteractionMatrix, InteractionMatrix_h , order="C")
print InteractionMatrix
#Set-up genomes
#dest = numpy.arange(GenomeDim*4).astype(numpy.uint8)
#for i in range(0, GenomeDim/4):
#dest[i*8 + 0] = int('0b00100101',2) #CRASHES
#dest[i*8 + 1] = int('0b00010000',2) #CRASHES
#dest[i*8 + 0] = int('0b00101000',2)
#dest[i*8 + 1] = int('0b00000000',2)
#dest[i*8 + 2] = int('0b00000000',2)
#dest[i*8 + 3] = int('0b00000000',2)
#dest[i*8 + 4] = int('0b00000000',2)
#dest[i*8 + 5] = int('0b00000000',2)
#dest[i*8 + 6] = int('0b00000000',2)
#dest[i*8 + 7] = int('0b00000000',2)
# dest[i*4 + 0] = 40
# dest[i*4 + 1] = 0
# dest[i*4 + 2] = 0
# dest[i*4 + 3] = 0
dest_h = drv.mem_alloc(GenomeDim*AlignedByteLengthGenome) #dest.nbytes)
dest = drv.pagelocked_zeros((GenomeDim*AlignedByteLengthGenome), numpy.uint8, "C", 0)
#drv.memcpy_htod(dest_h, dest)
#print "Genomes before: "
#print dest
#Set-up grids
#grids = numpy.zeros((10000, DimGridX, DimGridY)).astype(numpy.uint8) #TEST
#grids_h = drv.mem_alloc(GenomeDim*DimGridX*DimGridY) #TEST
#drv.memcpy_htod(grids_h, grids)
#print "Grids:"
#print grids
#Set-up fitness values
#fitness = numpy.zeros(FitnessValDim).astype(numpy.float32)
#fitness_h = drv.mem_alloc(fitness.nbytes)
#fitness_size = numpy.zeros(FitnessValDim).astype(numpy.uint32)
fitness_size = drv.pagelocked_zeros((FitnessValDim), numpy.uint32, "C", 0)
fitness_size_h = drv.mem_alloc(fitness_size.nbytes)
#fitness_hash = numpy.zeros(FitnessValDim).astype(numpy.uint32)
fitness_hash = drv.pagelocked_zeros((FitnessValDim), numpy.uint32, "C", 0)
fitness_hash_h = drv.mem_alloc(fitness_hash.nbytes)
#drv.memcpy_htod(fitness_h, fitness)
#print "Fitness values:"
#print fitness
#Set-up grids
#grids = numpy.zeros((GenomeDim, DimGridX, DimGridY)).astype(numpy.uint8) #TEST
grids = drv.pagelocked_zeros((GenomeDim, DimGridX, DimGridY), numpy.uint8, "C", 0)
grids_h = drv.mem_alloc(GenomeDim*DimGridX*DimGridY) #TEST
#drv.memcpy_htod(grids_h, grids)
#print "Grids:"
#print grids
#Set-up curand
#curand = numpy.zeros(40*GenomeDim).astype(numpy.uint8);
#curand_h = drv.mem_alloc(curand.nbytes)
curand_h = drv.mem_alloc(40*GenomeDim)
#SearchSpace control
#SearchSpaceSize = 2**24
#BlockDimY = SearchSpaceSize / (2**16)
#BlockDimX = SearchSpaceSize / (BlockDimY)
#print "SearchSpaceSize: ", SearchSpaceSize, " (", BlockDimX, ", ", BlockDimY,")"
#Schedule kernel calls
#MaxBlockDim = 100
OffsetBlocks = (SearchSpaceSize) % (BlockDimX*BlockDimY*WarpSize)
MaxBlockCycles = (SearchSpaceSize - OffsetBlocks)/(BlockDimX*BlockDimY*WarpSize)
BlockCounter = 0
print "Will do that many kernels a ", BlockDimX,"x", BlockDimY,"x ", WarpSize, ":", MaxBlockCycles
#quit()
#SET UP PROCESSING
histo = {}
#INITIALISATION
CurandKernel(curand_h, block=(WarpSize,1,1), grid=(BlockDimX, BlockDimY))
print "Finished Curand kernel, starting main kernel..."
#FIRST GENERATION
proc_time = time.time()
print "Starting first generation..."
start = drv.Event()
stop = drv.Event()
start.record()
Kernel(dest_h, grids_h, fitness_size_h, fitness_hash_h, curand_h, numpy.int64(0), block=(WarpSize*BankSize,1,1), grid=(BlockDimX,BlockDimY))
stop.record()
stop.synchronize()
print "Total kernel time taken: %fs"%(start.time_till(stop)*1e-3)
print "Copying..."
drv.memcpy_dtoh(fitness_size, fitness_size_h)
drv.memcpy_dtoh(fitness_hash, fitness_hash_h)
drv.memcpy_dtoh(grids, grids_h)
drv.memcpy_dtoh(dest, dest_h)
#INTERMEDIATE GENERATION
for i in range(MaxBlockCycles-1):
print "Starting generation: ", i+1
start = drv.Event()
stop = drv.Event()
start.record()
Kernel(dest_h, grids_h, fitness_size_h, fitness_hash_h, curand_h, numpy.int64((i+1)*BlockDimX*BlockDimY*WarpSize), block=(WarpSize*BankSize,1,1), grid=(BlockDimX,BlockDimY))
print "Processing..."
for j in range(len(fitness_hash)):
# if (fitness_hash[j]!=33) and (fitness_hash[j]!=44) and (fitness_hash[j]!=22):
if fitness_hash[j] in histo:
histo[fitness_hash[j]] = (grids[j], dest[j*AlignedByteLengthGenome]+dest[j*AlignedByteLengthGenome+1]*2**8+dest[j*AlignedByteLengthGenome+2]*2**16+dest[j*AlignedByteLengthGenome+3]*2**24, histo[fitness_hash[j]][2]+1, fitness_size[j])
#(histo[fitness_hash[j]][0], histo[fitness_hash[j]][1], histo[fitness_hash[j]][2]+1, histo[fitness_hash[j]][3])
else:
histo[fitness_hash[j]] = (grids[j], dest[j*AlignedByteLengthGenome]+dest[j*AlignedByteLengthGenome+1]*2**8+dest[j*AlignedByteLengthGenome+2]*2**16+dest[j*AlignedByteLengthGenome+3]*2**24, 1, fitness_size[j])
#DEBUG
f = open("S28_tmp.s28", "w")
for j in range(len(fitness_size)):
print >>f, "Size: ", fitness_size[j], " Hash:", fitness_hash[j], " Genome:", dest[j*AlignedByteLengthGenome], "|",dest[j*AlignedByteLengthGenome+1]," | ",dest[j*AlignedByteLengthGenome+2], " | ", dest[j*AlignedByteLengthGenome+3]
print >>f, grids[j]
f.close()
quit() #DEBUG
if __name__ == '__main__':
main()
|
|
##########################################################################
#
# Copyright (c) 2013, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import unittest
import IECore
import Gaffer
import GafferTest
class ExecutableNodeTest( unittest.TestCase ) :
class MyNode( Gaffer.ExecutableNode ) :
def __init__( self, withHash ) :
Gaffer.ExecutableNode.__init__( self )
self.__withHash = withHash
self.executionCount = 0
def execute( self, contexts ):
self.executionCount += 1
def executionRequirements( self, context ) :
return self._defaultRequirements( context )
def executionHash( self, context ) :
h = IECore.MurmurHash()
if self.__withHash :
h.append( context['time'] )
return h
def acceptsInput( self, plug, inputPlug ) :
return Gaffer.ExecutableNode._acceptsRequirementsInput( plug, inputPlug )
def testIsExecutable( self ) :
c = Gaffer.Context()
self.assertFalse( Gaffer.ExecutableNode.isExecutable( Gaffer.Despatcher ) )
self.assertFalse( Gaffer.ExecutableNode.isExecutable( Gaffer.Node ) )
self.assertFalse( Gaffer.ExecutableNode.isExecutable( c ) )
self.assertTrue( Gaffer.ExecutableNode.isExecutable( Gaffer.ExecutableNode ) )
n = Gaffer.ExecutableNode()
self.assertTrue( Gaffer.ExecutableNode.isExecutable( n ) )
n = ExecutableNodeTest.MyNode(False)
self.assertTrue( Gaffer.ExecutableNode.isExecutable( ExecutableNodeTest.MyNode ) )
self.assertTrue( Gaffer.ExecutableNode.isExecutable( n ) )
def testExecutionHash( self ) :
c1 = Gaffer.Context()
c1['time'] = 1.0
c2 = Gaffer.Context()
c2['time'] = 2.0
c3 = Gaffer.Context()
c3['time'] = 3.0
n = ExecutableNodeTest.MyNode(False)
taskList = list()
taskList.append( Gaffer.ExecutableNode.Task( n, c1 ) )
taskList.append( Gaffer.ExecutableNode.Task( n, c2 ) )
taskList.append( Gaffer.ExecutableNode.Task( n, c3 ) )
# since the hash is the same, no matter the context, it should return one single task
self.assertEqual( Gaffer.Despatcher._uniqueTasks( taskList ), [ ( Gaffer.ExecutableNode.Task( n, c1 ), [] ) ] )
n2 = ExecutableNodeTest.MyNode(True)
taskList = list()
taskList.append( Gaffer.ExecutableNode.Task( n2, c1 ) )
taskList.append( Gaffer.ExecutableNode.Task( n2, c2 ) )
taskList.append( Gaffer.ExecutableNode.Task( n2, c3 ) )
# since the hash includes the 'time' each Task is considered diferent
self.assertEqual( Gaffer.Despatcher._uniqueTasks( taskList ), [ ( Gaffer.ExecutableNode.Task( n2, c1 ), [] ), ( Gaffer.ExecutableNode.Task( n2, c2 ), [] ), ( Gaffer.ExecutableNode.Task( n2, c3 ), [] ) ] )
def testExecutionRequirements( self ) :
"""Test the function executionRequirements and Executable::defaultRequirements """
c1 = Gaffer.Context()
c1['time'] = 1.0
c2 = Gaffer.Context()
c2['time'] = 2.0
n = ExecutableNodeTest.MyNode(True)
n2 = ExecutableNodeTest.MyNode(True)
# make n2 require n
n2["requirements"][0].setInput( n['requirement'] )
self.assertEqual( n.executionRequirements(c1), [] )
self.assertEqual( n2.executionRequirements(c1), [ Gaffer.ExecutableNode.Task( n, c1 ) ] )
self.assertEqual( n2.executionRequirements(c2), [ Gaffer.ExecutableNode.Task( n, c2 ) ] )
# if we ask for executing n2 we should get n followed by n2
l = list()
l.append( Gaffer.ExecutableNode.Task( n2, c1 ) )
Gaffer.Despatcher._uniqueTasks( l )
( Gaffer.ExecutableNode.Task( n, c1 ), [] )
( Gaffer.ExecutableNode.Task( n2, c1 ), [ Gaffer.ExecutableNode.Task( n, c1 ) ] )
self.assertEqual( Gaffer.Despatcher._uniqueTasks( [ Gaffer.ExecutableNode.Task( n2, c1 ) ] ), [ ( Gaffer.ExecutableNode.Task( n, c1 ), [] ), ( Gaffer.ExecutableNode.Task( n2, c1 ), [ Gaffer.ExecutableNode.Task( n, c1 ) ] ) ] )
def testExecute( self ):
n = ExecutableNodeTest.MyNode(False)
n2 = ExecutableNodeTest.MyNode(False)
# make n3 requiring n
r1 = Gaffer.Plug( name = "r1" )
n2['requirements'].addChild( r1 )
r1.setInput( n['requirement'] )
despatcher = Gaffer.Despatcher.despatcher("local")
self.assertEqual( n2.executionCount, 0 )
self.assertEqual( n.executionCount, 0 )
despatcher.despatch( [ n2 ] )
self.assertEqual( n2.executionCount, 1 )
self.assertEqual( n.executionCount, 1 )
def testTaskConstructors( self ) :
c = Gaffer.Context()
t = Gaffer.ExecutableNode.Task()
n = Gaffer.OpHolder()
t.node = n
t.context = c
t2 = Gaffer.ExecutableNode.Task( n, c )
t3 = Gaffer.ExecutableNode.Task( t2 )
self.assertEqual( t.node, n )
self.assertEqual( t.context, c )
self.assertEqual( t2.node, n )
self.assertEqual( t2.context, c )
self.assertEqual( t3.node, n )
self.assertEqual( t3.context, c )
def testTaskComparison( self ) :
c = Gaffer.Context()
n = Gaffer.OpHolder()
t1 = Gaffer.ExecutableNode.Task( n, c )
t2 = Gaffer.ExecutableNode.Task()
t2.node = n
t2.context = c
c2 = Gaffer.Context()
c2["a"] = 2
t3 = Gaffer.ExecutableNode.Task( n, c2 )
n2 = Gaffer.OpHolder()
t4 = Gaffer.ExecutableNode.Task( n2, c2 )
self.assertEqual( t1, t1 )
self.assertEqual( t1, t2 )
self.assertEqual( t2, t1 )
self.assertNotEqual( t1, t3 )
self.assertNotEqual( t3, t1 )
self.assertNotEqual( t3, t4 )
self.assertNotEqual( t4, t3 )
def testTaskSet( self ) :
c = Gaffer.Context()
n = Gaffer.OpHolder()
t1 = Gaffer.ExecutableNode.Task( n, c )
t2 = Gaffer.ExecutableNode.Task( n, c )
self.assertEqual( t1, t2 )
c2 = Gaffer.Context()
c2["a"] = 2
t3 = Gaffer.ExecutableNode.Task( n, c2 )
n2 = Gaffer.OpHolder()
t4 = Gaffer.ExecutableNode.Task( n2, c2 )
t5 = Gaffer.ExecutableNode.Task( n2, c )
s = set( [ t1, t2, t3, t4, t4, t4, t1, t2, t4, t3, t2 ] )
self.assertEqual( len(s), 3 )
self.assertEqual( s, set( [ t1, t3, t4 ] ) )
self.assertTrue( t1 in s )
self.assertTrue( t2 in s )
self.assertTrue( t3 in s )
self.assertTrue( t4 in s )
self.assertFalse( t5 in s )
if __name__ == "__main__":
unittest.main()
|
|
#!/usr/bin/env python
# Copyright (C) 2012 OpenStack, LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# Manage jobs in Jenkins server
import os
import sys
import hashlib
import yaml
import json
import xml.etree.ElementTree as XML
from xml.dom import minidom
import jenkins
import re
import pkg_resources
import logging
import copy
import itertools
import fnmatch
from jenkins_jobs.errors import JenkinsJobsException
import copy
import traceback
import sys
import imp
import pprint
logger = logging.getLogger(__name__)
MAGIC_MANAGE_STRING = "<!-- Managed by Jenkins Job Builder -->"
def format_item(item, paramdict, eval_params):
if not isinstance(item, str):
return item
match = re.search(r'^{(.*)}$', item)
if match:
return paramdict.get(match.group(1))
ret = item.format(**paramdict)
while True:
match = re.search(r'(<\?\??)(.*?[^\\])(>)', ret)
if not match:
break;
evalstr = match.group(2).replace(r'\>', '>')
try:
evalresult = eval(evalstr, globals(), copy.copy(eval_params))
if match.group(0) == ret and (isinstance(evalresult, dict) or isinstance(evalresult, list)):
return evalresult
ret = ret[:match.start(1)] + evalresult + ret[match.end(3):]
except Exception as e:
print traceback.format_exc()
desc = "Got exception trying to evaluate expression: %s\nParams are %s" % (
evalstr, pprint.PrettyPrinter(indent=2).pprint(eval_params))
raise JenkinsJobsException(desc)
return ret
# Python <= 2.7.3's minidom toprettyxml produces broken output by adding
# extraneous whitespace around data. This patches the broken implementation
# with one taken from Python > 2.7.3.
def writexml(self, writer, indent="", addindent="", newl=""):
# indent = current indentation
# addindent = indentation to add to higher levels
# newl = newline string
writer.write(indent + "<" + self.tagName)
attrs = self._get_attributes()
a_names = attrs.keys()
a_names.sort()
for a_name in a_names:
writer.write(" %s=\"" % a_name)
minidom._write_data(writer, attrs[a_name].value)
writer.write("\"")
if self.childNodes:
writer.write(">")
if (len(self.childNodes) == 1 and
self.childNodes[0].nodeType == minidom.Node.TEXT_NODE):
self.childNodes[0].writexml(writer, '', '', '')
else:
writer.write(newl)
for node in self.childNodes:
node.writexml(writer, indent + addindent, addindent, newl)
writer.write(indent)
writer.write("</%s>%s" % (self.tagName, newl))
else:
writer.write("/>%s" % (newl))
if sys.version_info[:3] <= (2, 7, 3):
minidom.Element.writexml = writexml
def deep_format(obj, paramdict, eval_params):
"""Apply the paramdict via str.format() to all string objects found within
the supplied obj. Lists and dicts are traversed recursively."""
# YAML serialisation was originally used to achieve this, but that places
# limitations on the values in paramdict - the post-format result must
# still be valid YAML (so substituting-in a string containing quotes, for
# example, is problematic).
if isinstance(obj, str):
try:
ret = format_item(obj, paramdict, eval_params)
except KeyError as exc:
missing_key = exc.message
desc = "%s parameter missing to format %s\nGiven: %s" % (
missing_key, obj, paramdict)
raise JenkinsJobsException(desc)
elif isinstance(obj, list):
ret = []
for item in obj:
formattedItem = deep_format(item, paramdict, eval_params)
if isinstance(item, str) and isinstance(formattedItem, list) and item.startswith('<??'):
ret.extend(formattedItem)
else:
ret.append(formattedItem)
elif isinstance(obj, dict):
ret = {}
for item in obj:
ret[format_item(item, paramdict, eval_params)] = deep_format(obj[item], paramdict, eval_params)
else:
ret = obj
return ret
def matches(what, where):
"""
Checks if the given string matches against the given list of glob patterns
:arg str what: String that we want to test if matches
:arg list where: list of glob patters to match
"""
for pattern in where:
if re.match(fnmatch.translate(pattern), what):
return True
return False
class Context(object):
def __init__(self, context_name, data, params):
self.context_name = context_name
self.data = data
self.context = self.data.get('context', {}).get(self.context_name, {})
self.context_var_name = self.context.get('var-name', '')
self.context_var_value = params.get(self.context_var_name, '')
def get_folded_context(self):
folded_configs = self.get_folded_configs()
folded_children = self.get_folded_children()
folded_parents = self.get_folded_parents()
folded_context = {'configs' : folded_configs, 'children' : folded_children, 'parents' : folded_parents}
return folded_context
def fold(self, configs):
expanded_configs = deep_format(configs, {self.context_var_name: self.context_var_value}, {})
folded_configs = {}
if expanded_configs:
for k, v in expanded_configs.items():
value = v.get(self.context_var_value)
if value == None:
value = v.get('default')
folded_configs[k] = value
return folded_configs
def get_folded_configs(self):
configs = self.context.get('configs', {})
folded_configs = self.fold(configs)
return folded_configs
def get_folded_child(self, child_name, child_configs):
folded_child = self.fold(child_configs)
if folded_child.get(self.context_var_name) == None:
folded_child[self.context_var_name] = self.context_var_value
child_context = Context(child_name, self.data, {self.context_var_name: folded_child[self.context_var_name]})
folded_child.update(child_context.get_folded_configs())
folded_child['children'] = child_context.get_folded_children()
return folded_child
def get_folded_children(self):
children = self.context.get('children', {})
folded_children = {}
for child_name, child_configs in children.items():
folded_child = self.get_folded_child(child_name, child_configs)
folded_children[child_name] = folded_child
return folded_children
def get_parent_contexts(self):
parent_contexts = {}
all_contexts = self.data.get('context', {})
for context_name, context in all_contexts.items():
for child_name in context.get('children', {}).keys():
if child_name == self.context_name:
parent_contexts[context_name] = context
return parent_contexts
def get_parent_context_var_values(self, parent_context):
child_configs = parent_context['children'][self.context_name]
context_var_value_mapping = child_configs.get(self.context_var_name) if child_configs else None
parent_context_var_values = []
if context_var_value_mapping:
for k, v in context_var_value_mapping.items():
if v == self.context_var_value:
parent_context_var_values.append(k)
else:
parent_context_var_values.append(self.context_var_value)
return parent_context_var_values
def get_folded_parent(self, parent_context_name, parent_context, parent_context_var_value):
parent_context = Context(parent_context_name, self.data, {self.context_var_name : parent_context_var_value})
folded_parent_configs = parent_context.get_folded_configs()
folded_parent_configs[self.context_var_name] = parent_context_var_value
folded_parent_configs['parents'] = parent_context.get_folded_parents()
return folded_parent_configs
def get_folded_parents(self):
folded_parents = {}
for parent_context_name, parent_context in self.get_parent_contexts().items():
parent_context_var_values = self.get_parent_context_var_values(parent_context)
if parent_context_var_values:
folded_parent_list = []
for parent_context_var_value in parent_context_var_values:
folded_parent = self.get_folded_parent(parent_context_name, parent_context, parent_context_var_value)
folded_parent_list.append(folded_parent)
folded_parents[parent_context_name] = folded_parent_list
return folded_parents
class YamlParser(object):
def __init__(self, config=None):
self.registry = ModuleRegistry(config)
self.data = {}
self.jobs = []
def parse(self, fn):
data = yaml.load(open(fn))
if data:
if not isinstance(data, list):
raise JenkinsJobsException(
"The topmost collection in file '{fname}' must be a list,"
" not a {cls}".format(fname=fn, cls=type(data)))
for item in data:
cls, dfn = item.items()[0]
group = self.data.get(cls, {})
if len(item.items()) > 1:
n = None
for k, v in item.items():
if k == "name":
n = v
break
# Syntax error
raise JenkinsJobsException("Syntax error, for item "
"named '{0}'. Missing indent?"
.format(n))
name = dfn['name']
group[name] = dfn
self.data[cls] = group
def getJob(self, name):
job = self.data.get('job', {}).get(name, None)
if not job:
return job
return self.applyDefaults(job)
def getJobGroup(self, name):
return self.data.get('job-group', {}).get(name, None)
def getJobTemplate(self, name):
job = self.data.get('job-template', {}).get(name, None)
if not job:
return job
return self.applyDefaults(job)
def applyDefaults(self, data):
whichdefaults = data.get('defaults', 'global')
defaults = self.data.get('defaults', {}).get(whichdefaults, {})
newdata = {}
newdata.update(defaults)
newdata.update(data)
return newdata
def generateXML(self, jobs_filter=None):
changed = True
while changed:
changed = False
for module in self.registry.modules:
if hasattr(module, 'handle_data'):
if module.handle_data(self):
changed = True
for job in self.data.get('job', {}).values():
if jobs_filter and not matches(job['name'], jobs_filter):
logger.debug("Ignoring job {0}".format(job['name']))
continue
logger.debug("XMLifying job '{0}'".format(job['name']))
job = self.applyDefaults(job)
job = deep_format(job, {}, {})
self.getXMLForJob(job)
for project in self.data.get('project', {}).values():
logger.debug("XMLifying project '{0}'".format(project['name']))
for jobspec in project.get('jobs', []):
if isinstance(jobspec, dict):
# Singleton dict containing dict of job-specific params
jobname, jobparams = jobspec.items()[0]
if not isinstance(jobparams, dict):
jobparams = {}
else:
jobname = jobspec
jobparams = {}
job = self.getJob(jobname)
if job:
# Just naming an existing defined job
continue
# see if it's a job group
group = self.getJobGroup(jobname)
if group:
for group_jobspec in group['jobs']:
if isinstance(group_jobspec, dict):
group_jobname, group_jobparams = \
group_jobspec.items()[0]
if not isinstance(group_jobparams, dict):
group_jobparams = {}
else:
group_jobname = group_jobspec
group_jobparams = {}
job = self.getJob(group_jobname)
if job:
continue
template = self.getJobTemplate(group_jobname)
# Allow a group to override parameters set by a project
d = {}
d.update(project)
d.update(jobparams)
d.update(group)
d.update(group_jobparams)
# Except name, since the group's name is not useful
d['name'] = project['name']
if template:
self.getXMLForTemplateJob(d, template, jobs_filter)
continue
# see if it's a template
template = self.getJobTemplate(jobname)
if template:
d = {}
d.update(project)
d.update(jobparams)
self.getXMLForTemplateJob(d, template, jobs_filter)
else:
raise JenkinsJobsException("Failed to find suitable "
"template named '{0}'"
.format(jobname))
def getXMLForTemplateJob(self, project, template, jobs_filter=None):
dimensions = []
for (k, v) in project.items():
if type(v) == list and k not in ['jobs']:
dimensions.append(zip([k] * len(v), v))
# XXX somewhat hackish to ensure we actually have a single
# pass through the loop
if len(dimensions) == 0:
dimensions = [(("", ""),)]
checksums = set([])
for values in itertools.product(*dimensions):
params = copy.deepcopy(project)
params.update(values)
context_name = template.get('context', '')
context_name = context_name.format(**params)
context = Context(context_name, self.data, params)
context_vars = context.get_folded_context()
expanded = deep_format(template, params, context_vars)
# Keep track of the resulting expansions to avoid
# regenerating the exact same job. Whenever a project has
# different values for a parameter and that parameter is not
# used in the template, we ended up regenerating the exact
# same job.
# To achieve that we serialize the expanded template making
# sure the dict keys are always in the same order. Then we
# record the checksum in an unordered unique set which let
# us guarantee a group of parameters will not be added a
# second time.
uniq = json.dumps(expanded, sort_keys=True)
checksum = hashlib.md5(uniq).hexdigest()
# Lookup the checksum
if checksum not in checksums:
# We also want to skip XML generation whenever the user did
# not ask for that job.
job_name = expanded.get('name')
if jobs_filter and not matches(job_name, jobs_filter):
continue
logger.debug("Generating XML for template job {0}"
" (params {1})".format(
template['name'], params))
self.getXMLForJob(expanded)
checksums.add(checksum)
def getXMLForJob(self, data):
kind = data.get('project-type', 'freestyle')
data["description"] = (data.get("description", "") +
self.get_managed_string()).lstrip()
for ep in pkg_resources.iter_entry_points(
group='jenkins_jobs.projects', name=kind):
Mod = ep.load()
mod = Mod(self.registry)
xml = mod.root_xml(data)
self.gen_xml(xml, data)
job = XmlJob(xml, data['name'])
self.jobs.append(job)
break
def gen_xml(self, xml, data):
for module in self.registry.modules:
if hasattr(module, 'gen_xml'):
module.gen_xml(self, xml, data)
def get_managed_string(self):
# The \n\n is not hard coded, because they get stripped if the
# project does not otherwise have a description.
return "\n\n" + MAGIC_MANAGE_STRING
class ModuleRegistry(object):
entry_points_cache = {}
def __init__(self, config):
self.modules = []
self.modules_by_component_type = {}
self.handlers = {}
self.global_config = config
for entrypoint in pkg_resources.iter_entry_points(
group='jenkins_jobs.modules'):
Mod = entrypoint.load()
mod = Mod(self)
self.modules.append(mod)
self.modules.sort(lambda a, b: cmp(a.sequence, b.sequence))
if mod.component_type is not None:
self.modules_by_component_type[mod.component_type] = mod
def registerHandler(self, category, name, method):
cat_dict = self.handlers.get(category, {})
if not cat_dict:
self.handlers[category] = cat_dict
cat_dict[name] = method
def getHandler(self, category, name):
return self.handlers[category][name]
def dispatch(self, component_type,
parser, xml_parent,
component, template_data={}):
"""This is a method that you can call from your implementation of
Base.gen_xml or component. It allows modules to define a type
of component, and benefit from extensibility via Python
entry points and Jenkins Job Builder :ref:`Macros <macro>`.
:arg string component_type: the name of the component
(e.g., `builder`)
:arg YAMLParser parser: the global YMAL Parser
:arg Element xml_parent: the parent XML element
:arg dict template_data: values that should be interpolated into
the component definition
See :py:class:`jenkins_jobs.modules.base.Base` for how to register
components of a module.
See the Publishers module for a simple example of how to use
this method.
"""
if component_type not in self.modules_by_component_type:
raise JenkinsJobsException("Unknown component type: "
"'{0}'.".format(component_type))
component_list_type = self.modules_by_component_type[component_type] \
.component_list_type
if isinstance(component, dict):
# The component is a sigleton dictionary of name: dict(args)
name, component_data = component.items()[0]
else:
# The component is a simple string name, eg "run-tests"
name = component
component_data = {}
# Look for a component function defined in an entry point
cache_key = '%s:%s' % (component_list_type, name)
eps = ModuleRegistry.entry_points_cache.get(cache_key)
if eps is None:
eps = list(pkg_resources.iter_entry_points(
group='jenkins_jobs.{0}'.format(component_list_type),
name=name))
if len(eps) > 1:
raise JenkinsJobsException(
"Duplicate entry point found for component type: '{0}',"
"name: '{1}'".format(component_type, name))
elif len(eps) == 1:
ModuleRegistry.entry_points_cache[cache_key] = eps
logger.debug("Cached entry point %s = %s", cache_key,
ModuleRegistry.entry_points_cache[cache_key])
if len(eps) == 1:
func = eps[0].load()
func(parser, xml_parent, component_data)
else:
# Otherwise, see if it's defined as a macro
component = parser.data.get(component_type, {}).get(name)
if component:
expanded = component[component_list_type]
if component_data:
expanded = deep_format(component[component_list_type], component_data, {})
for b in expanded:
# Pass component_data in as template data to this function
# so that if the macro is invoked with arguments,
# the arguments are interpolated into the real defn.
self.dispatch(component_type,
parser, xml_parent, b, component_data)
else:
raise JenkinsJobsException("Unknown entry point or macro '{0}'"
" for component type: '{1}'.".
format(name, component_type))
class XmlJob(object):
def __init__(self, xml, name):
self.xml = xml
self.name = name
def md5(self):
return hashlib.md5(self.output()).hexdigest()
def output(self):
out = minidom.parseString(XML.tostring(self.xml))
return out.toprettyxml(indent=' ', encoding='utf-8')
class CacheStorage(object):
def __init__(self, jenkins_url, flush=False):
cache_dir = self.get_cache_dir()
# One cache per remote Jenkins URL:
host_vary = re.sub('[^A-Za-z0-9\-\~]', '_', jenkins_url)
self.cachefilename = os.path.join(
cache_dir, 'cache-host-jobs-' + host_vary + '.yml')
if flush or not os.path.isfile(self.cachefilename):
self.data = {}
return
with file(self.cachefilename, 'r') as yfile:
self.data = yaml.load(yfile)
logger.debug("Using cache: '{0}'".format(self.cachefilename))
@staticmethod
def get_cache_dir():
home = os.path.expanduser('~')
if home == '~':
raise OSError('Could not locate home folder')
xdg_cache_home = os.environ.get('XDG_CACHE_HOME') or \
os.path.join(home, '.cache')
path = os.path.join(xdg_cache_home, 'jenkins_jobs')
if not os.path.isdir(path):
os.makedirs(path)
return path
def set(self, job, md5):
self.data[job] = md5
yfile = file(self.cachefilename, 'w')
yaml.dump(self.data, yfile)
yfile.close()
def is_cached(self, job):
if job in self.data:
return True
return False
def has_changed(self, job, md5):
if job in self.data and self.data[job] == md5:
return False
return True
class Jenkins(object):
def __init__(self, url, user, password):
self.jenkins = jenkins.Jenkins(url, user, password)
def update_job(self, job_name, xml):
if self.is_job(job_name):
logger.info("Reconfiguring jenkins job {0}".format(job_name))
self.jenkins.reconfig_job(job_name, xml)
else:
logger.info("Creating jenkins job {0}".format(job_name))
self.jenkins.create_job(job_name, xml)
def is_job(self, job_name):
return self.jenkins.job_exists(job_name)
def get_job_md5(self, job_name):
xml = self.jenkins.get_job_config(job_name)
return hashlib.md5(xml).hexdigest()
def delete_job(self, job_name):
if self.is_job(job_name):
logger.info("Deleting jenkins job {0}".format(job_name))
self.jenkins.delete_job(job_name)
def get_jobs(self):
return self.jenkins.get_jobs()
def is_managed(self, job_name):
xml = self.jenkins.get_job_config(job_name)
try:
out = XML.fromstring(xml)
description = out.find(".//description").text
return description.endswith(MAGIC_MANAGE_STRING)
except (TypeError, AttributeError):
pass
return False
class Builder(object):
def __init__(self, jenkins_url, jenkins_user, jenkins_password,
config=None, ignore_cache=False, flush_cache=False):
self.jenkins = Jenkins(jenkins_url, jenkins_user, jenkins_password)
self.cache = CacheStorage(jenkins_url, flush=flush_cache)
self.global_config = config
self.ignore_cache = ignore_cache
def load_files(self, fn):
if os.path.isdir(fn):
files_to_process = [os.path.join(fn, f)
for f in os.listdir(fn)
if (f.endswith('.yml') or f.endswith('.yaml') or f.endswith('.py'))]
else:
files_to_process = [fn]
self.parser = YamlParser(self.global_config)
for in_file in files_to_process:
if in_file.endswith('.yml') or in_file.endswith('.yaml'):
logger.debug("Parsing YAML file {0}".format(in_file))
self.parser.parse(in_file)
else:
logger.debug("Loading python file {0}".format(in_file))
module_name = os.path.splitext(os.path.basename(in_file))[0]
globals()[module_name] = imp.load_source(module_name, in_file)
def delete_old_managed(self, keep):
jobs = self.jenkins.get_jobs()
for job in jobs:
if job['name'] not in keep and \
self.jenkins.is_managed(job['name']):
logger.info("Removing obsolete jenkins job {0}"
.format(job['name']))
self.delete_job(job['name'])
else:
logger.debug("Ignoring unmanaged jenkins job %s",
job['name'])
def delete_job(self, glob_name, fn=None):
if fn:
self.load_files(fn)
self.parser.generateXML(glob_name)
jobs = [j.name
for j in self.parser.jobs
if matches(j.name, [glob_name])]
else:
jobs = [glob_name]
for job in jobs:
self.jenkins.delete_job(job)
if(self.cache.is_cached(job)):
self.cache.set(job, '')
def delete_all_jobs(self):
jobs = self.jenkins.get_jobs()
for job in jobs:
self.delete_job(job['name'])
def update_job(self, fn, names=None, output_dir=None):
self.load_files(fn)
self.parser.generateXML(names)
self.parser.jobs.sort(lambda a, b: cmp(a.name, b.name))
for job in self.parser.jobs:
if names and not matches(job.name, names):
continue
if output_dir:
if names:
print job.output()
continue
fn = os.path.join(output_dir, job.name)
logger.debug("Writing XML to '{0}'".format(fn))
f = open(fn, 'w')
f.write(job.output())
f.close()
continue
md5 = job.md5()
if (self.jenkins.is_job(job.name)
and not self.cache.is_cached(job.name)):
old_md5 = self.jenkins.get_job_md5(job.name)
self.cache.set(job.name, old_md5)
if self.cache.has_changed(job.name, md5) or self.ignore_cache:
self.jenkins.update_job(job.name, job.output())
self.cache.set(job.name, md5)
else:
logger.debug("'{0}' has not changed".format(job.name))
return self.parser.jobs
|
|
# GPIO Zero: a library for controlling the Raspberry Pi's GPIO pins
# Copyright (c) 2019 Jeevan M R <[email protected]>
# Copyright (c) 2019 Dave Jones <[email protected]>
# Copyright (c) 2019 Ben Nuttall <[email protected]>
# Copyright (c) 2018 SteveAmor <[email protected]>
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from __future__ import (
unicode_literals,
absolute_import,
print_function,
division,
)
str = type('')
import io
import errno
import warnings
from posix import statvfs_result
from subprocess import CalledProcessError
import pytest
from mock import patch
from gpiozero import *
from datetime import datetime, time
file_not_found = IOError(errno.ENOENT, 'File not found')
bad_ping = CalledProcessError(1, 'returned non-zero exit status 1')
def test_timeofday_bad_init(mock_factory):
with pytest.raises(TypeError):
TimeOfDay()
with pytest.raises(ValueError):
TimeOfDay(7, 12)
with pytest.raises(TypeError):
TimeOfDay(time(7))
with pytest.raises(ValueError):
TimeOfDay(time(7), time(7))
with pytest.raises(ValueError):
TimeOfDay(time(7), time(7))
with pytest.raises(ValueError):
TimeOfDay('7:00', '8:00')
with pytest.raises(ValueError):
TimeOfDay(7.00, 8.00)
with pytest.raises(ValueError):
TimeOfDay(datetime(2019, 1, 24, 19), time(19)) # lurch edge case
def test_timeofday_init(mock_factory):
TimeOfDay(time(7), time(8), utc=False)
TimeOfDay(time(7), time(8), utc=True)
TimeOfDay(time(0), time(23, 59))
TimeOfDay(time(0), time(23, 59))
TimeOfDay(time(12, 30), time(13, 30))
TimeOfDay(time(23), time(1))
TimeOfDay(time(6), time(18))
TimeOfDay(time(18), time(6))
TimeOfDay(datetime(2019, 1, 24, 19), time(19, 1)) # lurch edge case
def test_timeofday_value(mock_factory):
with TimeOfDay(time(7), time(8), utc=False) as tod:
assert repr(tod).startswith('<gpiozero.TimeOfDay object')
assert tod.start_time == time(7)
assert tod.end_time == time(8)
assert not tod.utc
with patch('gpiozero.internal_devices.datetime') as dt:
dt.now.return_value = datetime(2018, 1, 1, 6, 59, 0)
assert not tod.is_active
dt.now.return_value = datetime(2018, 1, 1, 7, 0, 0)
assert tod.is_active
dt.now.return_value = datetime(2018, 1, 2, 8, 0, 0)
assert tod.is_active
dt.now.return_value = datetime(2018, 1, 2, 8, 1, 0)
assert not tod.is_active
with TimeOfDay(time(1, 30), time(23, 30)) as tod:
assert tod.start_time == time(1, 30)
assert tod.end_time == time(23, 30)
assert tod.utc
with patch('gpiozero.internal_devices.datetime') as dt:
dt.utcnow.return_value = datetime(2018, 1, 1, 1, 29, 0)
assert not tod.is_active
dt.utcnow.return_value = datetime(2018, 1, 1, 1, 30, 0)
assert tod.is_active
dt.utcnow.return_value = datetime(2018, 1, 1, 12, 30, 0)
assert tod.is_active
dt.utcnow.return_value = datetime(2018, 1, 1, 23, 30, 0)
assert tod.is_active
dt.utcnow.return_value = datetime(2018, 1, 1, 23, 31, 0)
assert not tod.is_active
with TimeOfDay(time(23), time(1)) as tod:
with patch('gpiozero.internal_devices.datetime') as dt:
dt.utcnow.return_value = datetime(2018, 1, 1, 22, 59, 0)
assert not tod.is_active
dt.utcnow.return_value = datetime(2018, 1, 1, 23, 0, 0)
assert tod.is_active
dt.utcnow.return_value = datetime(2018, 1, 2, 1, 0, 0)
assert tod.is_active
dt.utcnow.return_value = datetime(2018, 1, 2, 1, 1, 0)
assert not tod.is_active
dt.utcnow.return_value = datetime(2018, 1, 3, 12, 0, 0)
assert not tod.is_active
with TimeOfDay(time(6), time(5)) as tod:
with patch('gpiozero.internal_devices.datetime') as dt:
dt.utcnow.return_value = datetime(2018, 1, 1, 5, 30, 0)
assert not tod.is_active
dt.utcnow.return_value = datetime(2018, 1, 1, 5, 59, 0)
assert not tod.is_active
dt.utcnow.return_value = datetime(2018, 1, 1, 6, 0, 0)
assert tod.is_active
dt.utcnow.return_value = datetime(2018, 1, 1, 18, 0, 0)
assert tod.is_active
dt.utcnow.return_value = datetime(2018, 1, 1, 5, 0, 0)
assert tod.is_active
dt.utcnow.return_value = datetime(2018, 1, 2, 5, 1, 0)
assert not tod.is_active
dt.utcnow.return_value = datetime(2018, 1, 2, 5, 30, 0)
assert not tod.is_active
dt.utcnow.return_value = datetime(2018, 1, 2, 5, 59, 0)
assert not tod.is_active
dt.utcnow.return_value = datetime(2018, 1, 2, 6, 0, 0)
assert tod.is_active
def test_pingserver_bad_init(mock_factory):
with pytest.raises(TypeError):
PingServer()
def test_pingserver_init(mock_factory):
with patch('gpiozero.internal_devices.subprocess') as sp:
sp.check_call.return_value = True
with PingServer('example.com') as server:
assert repr(server).startswith('<gpiozero.PingServer object')
assert server.host == 'example.com'
with PingServer('192.168.1.10') as server:
assert server.host == '192.168.1.10'
with PingServer('8.8.8.8') as server:
assert server.host == '8.8.8.8'
with PingServer('2001:4860:4860::8888') as server:
assert server.host == '2001:4860:4860::8888'
def test_pingserver_value(mock_factory):
with patch('gpiozero.internal_devices.subprocess.check_call') as check_call:
with PingServer('example.com') as server:
assert server.is_active
check_call.side_effect = bad_ping
assert not server.is_active
check_call.side_effect = None
assert server.is_active
def test_cputemperature_bad_init(mock_factory):
with patch('io.open') as m:
m.return_value.__enter__.side_effect = file_not_found
with pytest.raises(IOError):
with CPUTemperature('') as temp:
temp.value
with pytest.raises(IOError):
with CPUTemperature('badfile') as temp:
temp.value
m.return_value.__enter__.return_value.readline.return_value = '37000'
with pytest.raises(ValueError):
CPUTemperature(min_temp=100)
with pytest.raises(ValueError):
CPUTemperature(min_temp=10, max_temp=10)
with pytest.raises(ValueError):
CPUTemperature(min_temp=20, max_temp=10)
def test_cputemperature(mock_factory):
with patch('io.open') as m:
m.return_value.__enter__.return_value.readline.return_value = '37000'
with CPUTemperature() as cpu:
assert repr(cpu).startswith('<gpiozero.CPUTemperature object')
assert cpu.temperature == 37.0
assert cpu.value == 0.37
with warnings.catch_warnings(record=True) as w:
warnings.resetwarnings()
with CPUTemperature(min_temp=30, max_temp=40) as cpu:
assert cpu.value == 0.7
assert not cpu.is_active
assert len(w) == 1
assert w[0].category == ThresholdOutOfRange
assert cpu.temperature == 37.0
with CPUTemperature(min_temp=30, max_temp=40, threshold=35) as cpu:
assert cpu.is_active
def test_loadaverage_bad_init(mock_factory):
with patch('io.open') as m:
foo = m.return_value.__enter__
foo.side_effect = file_not_found
with pytest.raises(IOError):
with LoadAverage('') as load:
load.value
with pytest.raises(IOError):
with LoadAverage('badfile') as load:
load.value
foo.return_value.readline.return_value = '0.09 0.10 0.09 1/292 20758'
with pytest.raises(ValueError):
LoadAverage(min_load_average=1)
with pytest.raises(ValueError):
LoadAverage(min_load_average=0.5, max_load_average=0.5)
with pytest.raises(ValueError):
LoadAverage(min_load_average=1, max_load_average=0.5)
with pytest.raises(ValueError):
LoadAverage(minutes=0)
with pytest.raises(ValueError):
LoadAverage(minutes=10)
def test_loadaverage(mock_factory):
with patch('io.open') as m:
foo = m.return_value.__enter__
foo.return_value.readline.return_value = '0.09 0.10 0.09 1/292 20758'
with LoadAverage() as la:
assert repr(la).startswith('<gpiozero.LoadAverage object')
assert la.min_load_average == 0
assert la.max_load_average == 1
assert la.threshold == 0.8
assert la.load_average == 0.1
assert la.value == 0.1
assert not la.is_active
foo.return_value.readline.return_value = '1.72 1.40 1.31 3/457 23102'
with LoadAverage(min_load_average=0.5, max_load_average=2,
threshold=1, minutes=5) as la:
assert la.min_load_average == 0.5
assert la.max_load_average == 2
assert la.threshold == 1
assert la.load_average == 1.4
assert la.value == 0.6
assert la.is_active
with warnings.catch_warnings(record=True) as w:
warnings.resetwarnings()
with LoadAverage(min_load_average=1, max_load_average=2,
threshold=0.8, minutes=5) as la:
assert len(w) == 1
assert w[0].category == ThresholdOutOfRange
assert la.load_average == 1.4
def test_diskusage_bad_init(mock_factory):
with pytest.raises(OSError):
DiskUsage(filesystem='badfilesystem')
def test_diskusage(mock_factory):
with patch('os.statvfs') as statvfs:
statvfs.return_value = statvfs_result((
4096, 4096, 100000, 48000, 48000, 0, 0, 0, 0, 255))
with DiskUsage() as disk:
assert repr(disk).startswith('<gpiozero.DiskUsage object')
assert disk.filesystem == '/'
assert disk.usage == 52.0
assert disk.is_active == False
assert disk.value == 0.52
with DiskUsage(threshold=50.0) as disk:
assert disk.is_active == True
with warnings.catch_warnings(record=True) as w:
warnings.resetwarnings()
with DiskUsage(threshold=125) as disk:
assert disk.threshold == 125
assert not disk.is_active
assert len(w) == 1
assert w[0].category == ThresholdOutOfRange
assert disk.usage == 52.0
|
|
# Copyright 2014 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import re
import shutil
from oslo.config import cfg
from fuel_agent import errors
from fuel_agent.openstack.common import log as logging
from fuel_agent.utils import utils
LOG = logging.getLogger(__name__)
gu_opts = [
cfg.IntOpt(
'grub_timeout',
default=5,
help='Timeout in secs for GRUB'
),
]
CONF = cfg.CONF
CONF.register_opts(gu_opts)
def guess_grub2_conf(chroot=''):
for filename in ('/boot/grub/grub.cfg', '/boot/grub2/grub.cfg'):
if os.path.isdir(os.path.dirname(chroot + filename)):
return filename
def guess_grub2_default(chroot=''):
for filename in ('/etc/default/grub', '/etc/sysconfig/grub'):
if os.path.isfile(chroot + filename):
return filename
def guess_grub2_mkconfig(chroot=''):
for grub_mkconfig in \
('/sbin/grub-mkconfig', '/sbin/grub2-mkconfig',
'/usr/sbin/grub-mkconfig', '/usr/sbin/grub2-mkconfig'):
if os.path.isfile(chroot + grub_mkconfig):
return grub_mkconfig
def guess_grub_version(chroot=''):
grub_install = guess_grub_install(chroot=chroot)
LOG.debug('Trying to run %s --version' % grub_install)
cmd = [grub_install, '--version']
if chroot:
cmd[:0] = ['chroot', chroot]
result = utils.execute(*cmd)
version = 1 if result[0].find('0.97') > 0 else 2
LOG.debug('Looks like grub version is %s' % version)
return version
def guess_grub(chroot=''):
for grub in ('/sbin/grub', '/usr/sbin/grub'):
LOG.debug('Looking for grub: trying %s' % grub)
if os.path.isfile(chroot + grub):
LOG.debug('grub found: %s' % grub)
return grub
raise errors.GrubUtilsError('grub not found')
def guess_grub_install(chroot=''):
for grub_install in ('/sbin/grub-install', '/sbin/grub2-install',
'/usr/sbin/grub-install', '/usr/sbin/grub2-install'):
LOG.debug('Looking for grub-install: trying %s' % grub_install)
if os.path.isfile(chroot + grub_install):
LOG.debug('grub-install found: %s' % grub_install)
return grub_install
raise errors.GrubUtilsError('grub-install not found')
def guess_grub1_datadir(chroot='', arch='x86_64'):
LOG.debug('Looking for grub data directory')
for d in os.listdir(chroot + '/usr/share/grub'):
if arch in d:
LOG.debug('Looks like grub data directory '
'is /usr/share/grub/%s' % d)
return '/usr/share/grub/' + d
def guess_kernel(chroot='', regexp=None):
"""Tries to guess kernel by regexp
:param chroot: Path to chroot
:param regexp: (String) Regular expression (must have python syntax).
Default is r'^vmlinuz.*'
"""
kernel = utils.guess_filename(
path=os.path.join(chroot, 'boot'),
regexp=(regexp or r'^vmlinuz.*'))
if kernel:
return kernel
raise errors.GrubUtilsError('Error while trying to find kernel: '
'regexp=%s' % regexp)
def guess_initrd(chroot='', regexp=None):
"""Tries to guess initrd by regexp
:param chroot: Path to chroot
:param regexp: (String) Regular expression (must have python syntax).
Default is r'^(initrd|initramfs).*'
"""
initrd = utils.guess_filename(
path=os.path.join(chroot, 'boot'),
regexp=(regexp or r'^(initrd|initramfs).*'))
if initrd:
return initrd
raise errors.GrubUtilsError('Error while trying to find initrd: '
'regexp=%s' % regexp)
def grub1_install(install_devices, boot_device, chroot=''):
match = re.search(r'(.+?)(p?)(\d*)$', boot_device)
# Checking whether boot device is a partition
# !!! It must be a partition not a whole disk. !!!
if not match.group(3):
raise errors.GrubUtilsError(
'Error while installing legacy grub: '
'boot device must be a partition')
boot_disk = match.group(1)
boot_part = str(int(match.group(3)) - 1)
grub1_stage1(chroot=chroot)
for install_device in install_devices:
grub1_mbr(install_device, boot_disk, boot_part, chroot=chroot)
def grub1_mbr(install_device, boot_disk, boot_part, chroot=''):
# The device on which we are going to install
# stage1 needs to be mapped as hd0, otherwise system won't be able to boot.
batch = 'device (hd0) {0}\n'.format(install_device)
# That is much easier to use grub-install, but unfortunately
# it is not able to install bootloader on huge disks.
# Instead we set drive geometry manually to avoid grub register
# overlapping. We set it so as to make grub
# thinking that disk size is equal to 1G.
# 130 cylinders * (16065 * 512 = 8225280 bytes) = 1G
# We also assume that boot partition is in the beginning
# of disk between 0 and 1G.
batch += 'geometry (hd0) 130 255 63\n'
if boot_disk != install_device:
batch += 'device (hd1) {0}\n'.format(boot_disk)
batch += 'geometry (hd1) 130 255 63\n'
batch += 'root (hd1,{0})\n'.format(boot_part)
else:
batch += 'root (hd0,{0})\n'.format(boot_part)
batch += 'setup (hd0)\n'
batch += 'quit\n'
with open(chroot + '/tmp/grub.batch', 'wb') as f:
LOG.debug('Grub batch content: \n%s' % batch)
f.write(batch)
script = 'cat /tmp/grub.batch | {0} --no-floppy --batch'.format(
guess_grub(chroot=chroot))
with open(chroot + '/tmp/grub.sh', 'wb') as f:
LOG.debug('Grub script content: \n%s' % script)
f.write(script)
os.chmod(chroot + '/tmp/grub.sh', 0o755)
cmd = ['/tmp/grub.sh']
if chroot:
cmd[:0] = ['chroot', chroot]
stdout, stderr = utils.execute(*cmd, run_as_root=True, check_exit_code=[0])
LOG.debug('Grub script stdout: \n%s' % stdout)
LOG.debug('Grub script stderr: \n%s' % stderr)
def grub1_stage1(chroot=''):
LOG.debug('Installing grub stage1 files')
for f in os.listdir(chroot + '/boot/grub'):
if f in ('stage1', 'stage2') or 'stage1_5' in f:
LOG.debug('Removing: %s' % chroot + os.path.join('/boot/grub', f))
os.remove(chroot + os.path.join('/boot/grub', f))
grub1_datadir = guess_grub1_datadir(chroot=chroot)
for f in os.listdir(chroot + grub1_datadir):
if f in ('stage1', 'stage2') or 'stage1_5' in f:
LOG.debug('Copying %s from %s to /boot/grub' % (f, grub1_datadir))
shutil.copy(chroot + os.path.join(grub1_datadir, f),
chroot + os.path.join('/boot/grub', f))
def grub1_cfg(kernel=None, initrd=None,
kernel_params='', chroot='', grub_timeout=CONF.grub_timeout):
if not kernel:
kernel = guess_kernel(chroot=chroot)
if not initrd:
initrd = guess_initrd(chroot=chroot)
config = """
default=0
timeout={grub_timeout}
title Default ({kernel})
kernel /{kernel} {kernel_params}
initrd /{initrd}
""".format(kernel=kernel, initrd=initrd,
kernel_params=kernel_params,
grub_timeout=grub_timeout)
with open(chroot + '/boot/grub/grub.conf', 'wb') as f:
f.write(config)
def grub2_install(install_devices, chroot=''):
grub_install = guess_grub_install(chroot=chroot)
for install_device in install_devices:
cmd = [grub_install, install_device]
if chroot:
cmd[:0] = ['chroot', chroot]
utils.execute(*cmd, run_as_root=True, check_exit_code=[0])
def grub2_cfg(kernel_params='', chroot='', grub_timeout=CONF.grub_timeout):
grub_defaults = chroot + guess_grub2_default(chroot=chroot)
rekerparams = re.compile(r'^.*GRUB_CMDLINE_LINUX=.*')
retimeout = re.compile(r'^.*GRUB_HIDDEN_TIMEOUT=.*')
new_content = ''
with open(grub_defaults) as f:
for line in f:
line = rekerparams.sub(
'GRUB_CMDLINE_LINUX="{kernel_params}"'.
format(kernel_params=kernel_params), line)
line = retimeout.sub('GRUB_HIDDEN_TIMEOUT={grub_timeout}'.
format(grub_timeout=grub_timeout), line)
new_content += line
# NOTE(agordeev): explicitly add record fail timeout, in order to
# prevent user confirmation appearing if unexpected reboot occured.
new_content += '\nGRUB_RECORDFAIL_TIMEOUT={grub_timeout}\n'.\
format(grub_timeout=grub_timeout)
with open(grub_defaults, 'wb') as f:
f.write(new_content)
cmd = [guess_grub2_mkconfig(chroot), '-o', guess_grub2_conf(chroot)]
if chroot:
cmd[:0] = ['chroot', chroot]
utils.execute(*cmd, run_as_root=True)
|
|
import argparse
from datetime import datetime
import fflogs
import re
def timestamp_type(arg):
"""Defines the timestamp input format"""
if re.match('\d{2}:\d{2}:\d{2}\.\d{3}', arg) is None:
raise argparse.ArgumentTypeError("Invalid timestamp format. Use the format 12:34:56.789")
return arg
def parse_time(timestamp):
"""Parses a timestamp into a datetime object"""
return datetime.strptime(timestamp, '%Y-%m-%dT%H:%M:%S')
def parse_line_time(line):
"""Parses the line's timestamp into a datetime object"""
time = parse_time(line[3:22])
time = time.replace(microsecond=int(line[23:29]))
return time
def parse_report(args):
"""Reads an fflogs report and return a list of entries"""
# Default values
report_start_time = 0
start_time = 0
end_time = 0
enemies = {}
last_ability = start_time
# Get report information
report_data = fflogs.api('fights', args.report, 'www', {'api_key': args.key})
report_start_time = report_data['start']
# Get the start and end timestamps for the specific fight
fight_id_found = False
for fight in report_data['fights']:
if args.fight and fight['id'] == args.fight:
start_time = fight['start_time']
end_time = fight['end_time']
fight_id_found = True
break
elif fight['end_time'] - fight['start_time'] > end_time - start_time:
start_time = fight['start_time']
end_time = fight['end_time']
if args.fight and not fight_id_found:
raise Exception('Fight ID not found in report')
# Build an enemy name list, since these aren't in the events
for enemy in report_data['enemies']:
enemies[enemy['id']] = enemy['name']
# Get the actual event list for the single fight
options = {
'api_key': args.key,
'start': start_time,
'end': end_time,
'filter': 'source.disposition="enemy" and type="cast"',
'translate': 'true',
}
event_data = fflogs.api('events', args.report, 'www', options)
entries = []
# Actually make the entry dicts
for event in event_data['events']:
entry = {
'time': datetime.fromtimestamp((report_start_time + event['timestamp']) / 1000),
'combatant': enemies[event['sourceID']],
'ability_id': hex(event['ability']['guid'])[2:].upper(),
'ability_name': event['ability']['name'],
}
entries.append(entry)
return entries, datetime.fromtimestamp((report_start_time + start_time) / 1000)
def parse_file(args):
"""Reads a file specified by arguments, and returns an entry list"""
entries = []
started = False
with args.file as file:
for line in file:
# Scan the file until the start timestamp
if not started and line[14:26] != args.start:
continue
if line[14:26] == args.end:
break
# We're at the start of the encounter now.
if not started:
started = True
last_ability_time = parse_line_time(line)
# We're looking for enemy casts
# These lines will start with 21 or 22, and have an NPC ID (400#####)
# If this isn't one, skip the line
if not (line[0:2] == '21' or line[0:2] == '22') or not line[37:40] == '400':
continue
# At this point, we have a combat line for the timeline.
line_fields = line.split('|')
entry = {
'time': parse_line_time(line),
'combatant': line_fields[3],
'ability_id': line_fields[4],
'ability_name': line_fields[5],
}
entries.append(entry)
return entries, last_ability_time
def main(args):
"""Starting point for execution with args"""
timeline_position = 0
last_ability_time = 0
# ACT log doesn't include friendly/enemy information per-line, so this is a set of default friendly npcs to filter
npc_combatants = ['Eos', 'Selene', 'Garuda-Egi', 'Titan-Egi', 'Ifrit-Egi', 'Emerald Carbuncle', 'Ruby Carbuncle', 'Rook Autoturret', 'Bishop Autoturret', 'Demi-Bahamut', 'Earthly Star', '']
# Format the phase timings
phases = {}
for phase in args.phase:
ability, time = phase.split(':')
phases[ability] = int(time)
# Get the entry list
if args.report:
entries, start_time = parse_report(args)
elif args.file:
entries, start_time = parse_file(args)
last_ability_time = start_time
last_entry = {'time': 0, 'ability_id': ''}
print('0 "Start"')
for entry in entries:
# First up, check if it's an ignored entry
# Ignore autos, probably need a better rule than this
if entry['ability_name'] == 'Attack':
continue
# Ignore abilities by NPC allies
if entry['combatant'] in npc_combatants:
continue
# Ignore lines by arguments
if (entry['ability_name'] in args.ignore_ability or
entry['ability_id'] in args.ignore_id or
entry['combatant'] in args.ignore_combatant):
continue
# Ignore aoe spam
if entry['time'] == last_entry['time'] and entry['ability_id'] == last_entry['ability_id']:
continue
# Find out how long it's been since our last ability
line_time = entry['time']
last_time_diff = line_time - last_ability_time
last_time_diff_sec = last_time_diff.seconds
last_time_diff_us = last_time_diff.microseconds
drift = False
# Round up to the second
if last_time_diff_us > 800000:
last_time_diff_sec += 1
# Round up with a note about exceptional drift
elif last_time_diff_us > 500000:
last_time_diff_sec += 1
drift = -1000000 + last_time_diff_us
# Round down with a note about exceptional drift
elif last_time_diff_us > 200000:
drift = last_time_diff_us
# If <200ms then there's no need to adjust sec or drift
else:
pass
# Set the time, possibly adjusting to specified phase
if entry['ability_id'] not in phases:
timeline_position += last_time_diff_sec
else:
timeline_position = phases[entry['ability_id']]
del phases[entry['ability_id']]
# Update the last ability time
last_ability_time = line_time
entry['position'] = timeline_position
# Write the line
output_entry = '{position} "{ability_name}" sync /:{combatant}:{ability_id}:/'.format(**entry)
if drift:
output_entry += ' # drift {}'.format(drift/1000000)
print(output_entry)
# Save the entry til the next line for filtering
last_entry = entry
if __name__ == "__main__":
# Set up all of the arguments
example_usage = """
example:
make_timeline.py -f "%APPDATA%\\Advanced Combat Tracker\\FFXIVLogs\\Network_20180206.log" -s 12:30:45.156 -e 12:43:51.395 -ia Attack Explosion "Vacuum Claw" -ic "Aratama Soul"
Scans Network_20180206.log, starts the encounter at 12:30:45.156, and crawls until
12:43:51.395, collecting enemy ability usages along and printing them in a timeline format"""
parser = argparse.ArgumentParser(
description="Creates a timeline from a logged encounter",
epilog=example_usage,
formatter_class=argparse.RawDescriptionHelpFormatter)
# Add main input vector, fflogs report or network log file
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument('-r', '--report', help="The ID of an FFLogs report")
group.add_argument('-f', '--file', type=argparse.FileType('r', encoding="utf8"), help="The path of the log file")
# Report arguments
parser.add_argument('-k', '--key', help="The FFLogs API key to use, from https://www.fflogs.com/accounts/changeuser")
parser.add_argument('-rf', '--fight', type=int, help="Fight ID of the report to use. Defaults to longest in the report")
# Log file arguments
parser.add_argument('-s', '--start', type=timestamp_type, help="Timestamp of the start, e.g. '12:34:56.789")
parser.add_argument('-e', '--end', type=timestamp_type, help="Timestamp of the end, e.g. '12:34:56.789")
# Filtering arguments
parser.add_argument('-ii', '--ignore-id', nargs='*', default=[], help="Ability IDs to ignore, e.g. 27EF")
parser.add_argument('-ia', '--ignore-ability', nargs='*', default=[], help="Ability names to ignore, e.g. Attack")
parser.add_argument('-ic', '--ignore-combatant', nargs='*', default=[], help="Combatant names to ignore, e.g. Aratama Soul")
parser.add_argument('-p', '--phase', nargs='*', default=[], help="Abilities that indicate a new phase, and the time to jump to, e.g. 28EC:1000")
args = parser.parse_args()
# Check dependent args
if args.file and not (args.start and args.end):
raise parser.error("Log file input requires start and end timestamps")
if args.report and not args.key:
raise parser.error("FFlogs parsing requires an API key. Visit https://www.fflogs.com/accounts/changeuser and use the Public key")
# Actually call the script
main(args)
|
|
# Copyright (C) 2012 Nippon Telegraph and Telephone Corporation.
# Copyright (C) 2012 Isaku Yamahata <yamahata at private email ne jp>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
import logging
import ryu.exception as ryu_exc
from ryu.base import app_manager
from ryu.controller import event
class RemoteDPIDAlreadyExist(ryu_exc.RyuException):
message = ('port (%(dpid)s, %(port)s) has already '
'remote dpid %(remote_dpid)s')
class TunnelKeyAlreadyExist(ryu_exc.RyuException):
message = 'tunnel key %(tunnel_key)s already exists'
class TunnelKeyNotFound(ryu_exc.RyuException):
message = 'no tunnel key for network %(network_id)s'
class EventTunnelKeyBase(event.EventBase):
def __init__(self, network_id, tunnel_key):
super(EventTunnelKeyBase, self).__init__()
self.network_id = network_id
self.tunnel_key = tunnel_key
class EventTunnelKeyAdd(EventTunnelKeyBase):
def __init__(self, network_id, tunnel_key):
super(EventTunnelKeyAdd, self).__init__(network_id, tunnel_key)
class EventTunnelKeyDel(EventTunnelKeyBase):
def __init__(self, network_id, tunnel_key):
super(EventTunnelKeyDel, self).__init__(network_id, tunnel_key)
class EventTunnelPort(event.EventBase):
def __init__(self, dpid, port_no, remote_dpid, add_del):
super(EventTunnelPort, self).__init__()
self.dpid = dpid
self.port_no = port_no
self.remote_dpid = remote_dpid
self.add_del = add_del
class TunnelKeys(dict):
"""network id(uuid) <-> tunnel key(32bit unsigned int)"""
def __init__(self, f):
super(TunnelKeys, self).__init__()
self.send_event = f
def get_key(self, network_id):
try:
return self[network_id]
except KeyError:
raise TunnelKeyNotFound(network_id=network_id)
def _set_key(self, network_id, tunnel_key):
self[network_id] = tunnel_key
self.send_event(EventTunnelKeyAdd(network_id, tunnel_key))
def register_key(self, network_id, tunnel_key):
if network_id in self:
raise ryu_exc.NetworkAlreadyExist(network_id=network_id)
if tunnel_key in self.values():
raise TunnelKeyAlreadyExist(tunnel_key=tunnel_key)
self._set_key(network_id, tunnel_key)
def update_key(self, network_id, tunnel_key):
if network_id not in self and tunnel_key in self.values():
raise TunnelKeyAlreadyExist(key=tunnel_key)
key = self.get(network_id)
if key is None:
self._set_key(network_id, tunnel_key)
return
if key != tunnel_key:
raise ryu_exc.NetworkAlreadyExist(network_id=network_id)
def delete_key(self, network_id):
try:
tunnel_key = self[network_id]
self.send_event(EventTunnelKeyDel(network_id, tunnel_key))
del self[network_id]
except KeyError:
raise ryu_exc.NetworkNotFound(network_id=network_id)
class DPIDs(object):
"""dpid -> port_no -> remote_dpid"""
def __init__(self, f):
super(DPIDs, self).__init__()
self.dpids = collections.defaultdict(dict)
self.send_event = f
def list_ports(self, dpid):
return self.dpids[dpid]
def _add_remote_dpid(self, dpid, port_no, remote_dpid):
self.dpids[dpid][port_no] = remote_dpid
self.send_event(EventTunnelPort(dpid, port_no, remote_dpid, True))
def add_remote_dpid(self, dpid, port_no, remote_dpid):
if port_no in self.dpids[dpid]:
raise ryu_exc.PortAlreadyExist(dpid=dpid, port=port_no,
network_id=None)
self._add_remote_dpid(dpid, port_no, remote_dpid)
def update_remote_dpid(self, dpid, port_no, remote_dpid):
remote_dpid_ = self.dpids[dpid].get(port_no)
if remote_dpid_ is None:
self._add_remote_dpid(dpid, port_no, remote_dpid)
elif remote_dpid_ != remote_dpid:
raise ryu_exc.RemoteDPIDAlreadyExist(dpid=dpid, port=port_no,
remote_dpid=remote_dpid)
def get_remote_dpid(self, dpid, port_no):
try:
return self.dpids[dpid][port_no]
except KeyError:
raise ryu_exc.PortNotFound(dpid=dpid, port=port_no)
def delete_port(self, dpid, port_no):
try:
remote_dpid = self.dpids[dpid][port_no]
self.send_event(EventTunnelPort(dpid, port_no, remote_dpid, False))
del self.dpids[dpid][port_no]
except KeyError:
raise ryu_exc.PortNotFound(dpid=dpid, port=port_no)
def get_port(self, dpid, remote_dpid):
try:
dp = self.dpids[dpid]
except KeyError:
raise ryu_exc.PortNotFound(dpid=dpid, port=None, network_id=None)
res = [port_no for (port_no, remote_dpid_) in dp.items()
if remote_dpid_ == remote_dpid]
assert len(res) <= 1
if len(res) == 0:
raise ryu_exc.PortNotFound(dpid=dpid, port=None, network_id=None)
return res[0]
class Tunnels(app_manager.RyuApp):
def __init__(self):
super(Tunnels, self).__init__()
self.name = 'tunnels'
self.tunnel_keys = TunnelKeys(self.send_event_to_observers)
self.dpids = DPIDs(self.send_event_to_observers)
def get_key(self, network_id):
return self.tunnel_keys.get_key(network_id)
def register_key(self, network_id, tunnel_key):
self.tunnel_keys.register_key(network_id, tunnel_key)
def update_key(self, network_id, tunnel_key):
self.tunnel_keys.update_key(network_id, tunnel_key)
def delete_key(self, network_id):
self.tunnel_keys.delete_key(network_id)
def list_ports(self, dpid):
return self.dpids.list_ports(dpid).keys()
def register_port(self, dpid, port_no, remote_dpid):
self.dpids.add_remote_dpid(dpid, port_no, remote_dpid)
def update_port(self, dpid, port_no, remote_dpid):
self.dpids.update_remote_dpid(dpid, port_no, remote_dpid)
def get_remote_dpid(self, dpid, port_no):
return self.dpids.get_remote_dpid(dpid, port_no)
def delete_port(self, dpid, port_no):
self.dpids.delete_port(dpid, port_no)
#
# methods for gre tunnel
#
def get_port(self, dpid, remote_dpid):
return self.dpids.get_port(dpid, remote_dpid)
|
|
#!/usr/bin/env python
# Copyright 2008-2013 Nokia Siemens Networks Oyj
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module implementing the command line entry point for the `Testdoc` tool.
This module can be executed from the command line using the following
approaches::
python -m robot.testdoc
python path/to/robot/testdoc.py
Instead of ``python`` it is possible to use also other Python interpreters.
This module also provides :func:`testdoc` and :func:`testdoc_cli` functions
that can be used programmatically. Other code is for internal usage.
"""
from __future__ import with_statement
USAGE = """robot.testdoc -- Robot Framework test data documentation tool
Version: <VERSION>
Usage: python -m robot.testdoc [options] data_sources output_file
Testdoc generates a high level test documentation based on Robot Framework
test data. Generated documentation includes name, documentation and other
metadata of each test suite and test case, as well as the top-level keywords
and their arguments.
Options
=======
-T --title title Set the title of the generated documentation.
Underscores in the title are converted to spaces.
The default title is the name of the top level suite.
-N --name name Override the name of the top level suite.
-D --doc document Override the documentation of the top level suite.
-M --metadata name:value * Set/override metadata of the top level suite.
-G --settag tag * Set given tag(s) to all test cases.
-t --test name * Include tests by name.
-s --suite name * Include suites by name.
-i --include tag * Include tests by tags.
-e --exclude tag * Exclude tests by tags.
-h -? --help Print this help.
All options except --title have exactly same semantics as same options have
when executing test cases.
Execution
=========
Data can be given as a single file, directory, or as multiple files and
directories. In all these cases, the last argument must be the file where
to write the output. The output is always created in HTML format.
Testdoc works with all interpreters supported by Robot Framework (Python,
Jython and IronPython). It can be executed as an installed module like
`python -m robot.testdoc` or as a script like `python path/robot/testdoc.py`.
Examples:
python -m robot.testdoc my_test.html testdoc.html
jython -m robot.testdoc -N smoke_tests -i smoke path/to/my_tests smoke.html
ipy path/to/robot/testdoc.py first_suite.txt second_suite.txt output.html
"""
import os.path
import sys
import time
# Allows running as a script. __name__ check needed with multiprocessing:
# http://code.google.com/p/robotframework/issues/detail?id=1137
if 'robot' not in sys.modules and __name__ == '__main__':
import pythonpathsetter
from robot import utils
from robot.conf import RobotSettings
from robot.htmldata import HtmlFileWriter, ModelWriter, JsonWriter, TESTDOC
from robot.parsing import disable_curdir_processing
from robot.running import TestSuiteBuilder
class TestDoc(utils.Application):
def __init__(self):
utils.Application.__init__(self, USAGE, arg_limits=(2,))
def main(self, datasources, title=None, **options):
outfile = utils.abspath(datasources.pop())
suite = TestSuiteFactory(datasources, **options)
self._write_test_doc(suite, outfile, title)
self.console(outfile)
def _write_test_doc(self, suite, outfile, title):
with open(outfile, 'w') as output:
model_writer = TestdocModelWriter(output, suite, title)
HtmlFileWriter(output, model_writer).write(TESTDOC)
@disable_curdir_processing
def TestSuiteFactory(datasources, **options):
settings = RobotSettings(options)
if isinstance(datasources, basestring):
datasources = [datasources]
suite = TestSuiteBuilder().build(*datasources)
suite.configure(**settings.suite_config)
return suite
class TestdocModelWriter(ModelWriter):
def __init__(self, output, suite, title=None):
self._output = output
self._output_path = getattr(output, 'name', None)
self._suite = suite
self._title = title.replace('_', ' ') if title else suite.name
def write(self, line):
self._output.write('<script type="text/javascript">\n')
self.write_data()
self._output.write('</script>\n')
def write_data(self):
generated_time = time.localtime()
model = {
'suite': JsonConverter(self._output_path).convert(self._suite),
'title': self._title,
'generated': utils.format_time(generated_time, gmtsep=' '),
'generatedMillis': long(time.mktime(generated_time) * 1000)
}
JsonWriter(self._output).write_json('testdoc = ', model)
class JsonConverter(object):
def __init__(self, output_path=None):
self._output_path = output_path
def convert(self, suite):
return self._convert_suite(suite)
def _convert_suite(self, suite):
return {
'source': suite.source or '',
'relativeSource': self._get_relative_source(suite.source),
'id': suite.id,
'name': self._escape(suite.name),
'fullName': self._escape(suite.longname),
'doc': self._html(suite.doc),
'metadata': [(self._escape(name), self._html(value))
for name, value in suite.metadata.items()],
'numberOfTests': suite.test_count ,
'suites': self._convert_suites(suite),
'tests': self._convert_tests(suite),
'keywords': list(self._convert_keywords(suite))
}
def _get_relative_source(self, source):
if not source or not self._output_path:
return ''
return utils.get_link_path(source, os.path.dirname(self._output_path))
def _escape(self, item):
return utils.html_escape(item)
def _html(self, item):
return utils.html_format(utils.unescape(item))
def _convert_suites(self, suite):
return [self._convert_suite(s) for s in suite.suites]
def _convert_tests(self, suite):
return [self._convert_test(t) for t in suite.tests]
def _convert_test(self, test):
return {
'name': self._escape(test.name),
'fullName': self._escape(test.longname),
'id': test.id,
'doc': self._html(test.doc),
'tags': [self._escape(t) for t in test.tags],
'timeout': self._get_timeout(test.timeout),
'keywords': list(self._convert_keywords(test))
}
def _convert_keywords(self, item):
for kw in getattr(item, 'keywords', []):
if kw.type == 'setup':
yield self._convert_keyword(kw, 'SETUP')
elif kw.type == 'teardown':
yield self._convert_keyword(kw, 'TEARDOWN')
elif kw.is_for_loop():
yield self._convert_for_loop(kw)
else:
yield self._convert_keyword(kw, 'KEYWORD')
def _convert_for_loop(self, kw):
return {
'name': self._escape(self._get_for_loop(kw)),
'arguments': '',
'type': 'FOR'
}
def _convert_keyword(self, kw, kw_type):
return {
'name': self._escape(self._get_kw_name(kw)),
'arguments': self._escape(', '.join(kw.args)),
'type': kw_type
}
def _get_kw_name(self, kw):
if kw.assign:
return '%s = %s' % (', '.join(a.rstrip('= ') for a in kw.assign), kw.name)
return kw.name
def _get_for_loop(self, kw):
joiner = ' IN RANGE ' if kw.range else ' IN '
return ', '.join(kw.vars) + joiner + utils.seq2str2(kw.items)
def _get_timeout(self, timeout):
if timeout is None:
return ''
try:
tout = utils.secs_to_timestr(utils.timestr_to_secs(timeout.value))
except ValueError:
tout = timeout.value
if timeout.message:
tout += ' :: ' + timeout.message
return tout
def testdoc_cli(arguments):
"""Executes `Testdoc` similarly as from the command line.
:param arguments: command line arguments as a list of strings.
For programmatic usage the :func:`testdoc` function is typically better. It
has a better API for that and does not call :func:`sys.exit` like
this function.
Example::
from robot.testdoc import testdoc_cli
testdoc_cli(['--title', 'Test Plan', 'mytests', 'plan.html'])
"""
TestDoc().execute_cli(arguments)
def testdoc(*arguments, **options):
"""Executes `Testdoc` programmatically.
Arguments and options have same semantics, and options have same names,
as arguments and options to Testdoc.
Example::
from robot.testdoc import testdoc
testdoc('mytests', 'plan.html', title='Test Plan')
"""
TestDoc().execute(*arguments, **options)
if __name__ == '__main__':
testdoc_cli(sys.argv[1:])
|
|
# -*- coding: utf-8 -*-
from classytags.arguments import Argument
from classytags.core import Options, Tag
from classytags.helpers import InclusionTag
from cms.constants import PUBLISHER_STATE_PENDING
from cms.toolbar.utils import get_plugin_toolbar_js
from cms.utils.admin import render_admin_rows
from sekizai.helpers import get_varname
from django import template
from django.conf import settings
from django.utils.encoding import force_text
from django.utils.safestring import mark_safe
from django.utils.translation import ugettext_lazy as _
register = template.Library()
CMS_ADMIN_ICON_BASE = "%sadmin/img/" % settings.STATIC_URL
@register.simple_tag(takes_context=True)
def show_admin_menu_for_pages(context, pages):
request = context['request']
if 'cl' in context:
filtered = context['cl'].is_filtered or context['cl'].query
else:
filtered = False
content = render_admin_rows(
request,
pages=pages,
site=context['cms_current_site'],
filtered=filtered,
language=context['preview_language'],
)
return mark_safe(content)
class TreePublishRow(Tag):
name = "tree_publish_row"
options = Options(
Argument('page'),
Argument('language')
)
def render_tag(self, context, page, language):
if page.is_published(language) and page.publisher_public_id and page.publisher_public.is_published(language):
if page.is_dirty(language):
cls = "cms-pagetree-node-state cms-pagetree-node-state-dirty dirty"
text = _("unpublished changes")
else:
cls = "cms-pagetree-node-state cms-pagetree-node-state-published published"
text = _("published")
else:
page_languages = page.get_languages()
if language in page_languages:
public_pending = page.publisher_public_id and page.publisher_public.get_publisher_state(
language) == PUBLISHER_STATE_PENDING
if public_pending or page.get_publisher_state(
language) == PUBLISHER_STATE_PENDING:
cls = "cms-pagetree-node-state cms-pagetree-node-state-unpublished-parent unpublishedparent"
text = _("unpublished parent")
else:
cls = "cms-pagetree-node-state cms-pagetree-node-state-unpublished unpublished"
text = _("unpublished")
else:
cls = "cms-pagetree-node-state cms-pagetree-node-state-empty empty"
text = _("no content")
return mark_safe(
'<span class="cms-hover-tooltip cms-hover-tooltip-left cms-hover-tooltip-delay %s" '
'data-cms-tooltip="%s"></span>' % (cls, force_text(text)))
register.tag(TreePublishRow)
@register.filter
def is_published(page, language):
if page.is_published(language) and page.publisher_public_id and page.publisher_public.is_published(language):
return True
else:
page_languages = page.get_languages()
if language in page_languages and page.publisher_public_id and page.publisher_public.get_publisher_state(
language) == PUBLISHER_STATE_PENDING:
return True
return False
@register.filter
def is_dirty(page, language):
return page.is_dirty(language)
@register.filter
def all_ancestors_are_published(page, language):
"""
Returns False if any of the ancestors of page (and language) are
unpublished, otherwise True.
"""
page = page.parent
while page:
if not page.is_published(language):
return False
page = page.parent
return True
class CleanAdminListFilter(InclusionTag):
"""
used in admin to display only these users that have actually edited a page
and not everybody
"""
name = 'clean_admin_list_filter'
template = 'admin/cms/page/tree/filter.html'
options = Options(
Argument('cl'),
Argument('spec'),
)
def get_context(self, context, cl, spec):
choices = sorted(list(spec.choices(cl)), key=lambda k: k['query_string'])
query_string = None
unique_choices = []
for choice in choices:
if choice['query_string'] != query_string:
unique_choices.append(choice)
query_string = choice['query_string']
return {'title': spec.title, 'choices': unique_choices}
register.tag(CleanAdminListFilter)
@register.filter
def boolean_icon(value):
BOOLEAN_MAPPING = {True: 'yes', False: 'no', None: 'unknown'}
return mark_safe(
u'<img src="%sicon-%s.gif" alt="%s" />' % (CMS_ADMIN_ICON_BASE, BOOLEAN_MAPPING.get(value, 'unknown'), value))
@register.filter
def preview_link(page, language):
if settings.USE_I18N:
# Which one of page.get_slug() and page.get_path() is the right
# one to use in this block? They both seem to return the same thing.
try:
# attempt to retrieve the localized path/slug and return
return page.get_absolute_url(language, fallback=False)
except:
# no localized path/slug. therefore nothing to preview. stay on the same page.
# perhaps the user should be somehow notified for this.
return ''
return page.get_absolute_url(language)
class PageSubmitRow(InclusionTag):
name = 'page_submit_row'
template = 'admin/cms/page/submit_row.html'
def get_context(self, context):
opts = context['opts']
change = context['change']
is_popup = context['is_popup']
save_as = context['save_as']
basic_info = context.get('basic_info', False)
advanced_settings = context.get('advanced_settings', False)
change_advanced_settings = context.get('can_change_advanced_settings', False)
language = context.get('language', '')
filled_languages = context.get('filled_languages', [])
show_buttons = language in filled_languages
if show_buttons:
show_buttons = (basic_info or advanced_settings) and change_advanced_settings
context = {
# TODO check this (old code: opts.get_ordered_objects() )
'onclick_attrib': (opts and change
and 'onclick="submitOrderForm();"' or ''),
'show_delete_link': False,
'show_save_as_new': not is_popup and change and save_as,
'show_save_and_add_another': False,
'show_save_and_continue': not is_popup and context['has_change_permission'],
'is_popup': is_popup,
'basic_info_active': basic_info,
'advanced_settings_active': advanced_settings,
'show_buttons': show_buttons,
'show_save': True,
'language': language,
'language_is_filled': language in filled_languages,
'object_id': context.get('object_id', None)
}
return context
register.tag(PageSubmitRow)
def in_filtered(seq1, seq2):
return [x for x in seq1 if x in seq2]
in_filtered = register.filter('in_filtered', in_filtered)
@register.simple_tag
def admin_static_url():
"""
If set, returns the string contained in the setting ADMIN_MEDIA_PREFIX, otherwise returns STATIC_URL + 'admin/'.
"""
return getattr(settings, 'ADMIN_MEDIA_PREFIX', None) or ''.join([settings.STATIC_URL, 'admin/'])
class CMSAdminIconBase(Tag):
name = 'cms_admin_icon_base'
def render_tag(self, context):
return CMS_ADMIN_ICON_BASE
register.tag(CMSAdminIconBase)
@register.simple_tag(takes_context=True)
def render_plugin_toolbar_config(context, plugin):
content_renderer = context['cms_content_renderer']
instance, plugin_class = plugin.get_plugin_instance()
if not instance:
return ''
with context.push():
content = content_renderer.render_editable_plugin(
instance,
context,
plugin_class,
)
# render_editable_plugin will populate the plugin
# parents and children cache.
placeholder_cache = content_renderer.get_rendered_plugins_cache(instance.placeholder)
toolbar_js = get_plugin_toolbar_js(
instance,
request_language=content_renderer.request_language,
children=placeholder_cache['plugin_children'][instance.plugin_type],
parents=placeholder_cache['plugin_parents'][instance.plugin_type],
)
varname = get_varname()
toolbar_js = '<script>{}</script>'.format(toolbar_js)
# Add the toolbar javascript for this plugin to the
# sekizai "js" namespace.
context[varname]['js'].append(toolbar_js)
return mark_safe(content)
@register.inclusion_tag('admin/cms/page/plugin/submit_line.html', takes_context=True)
def submit_row_plugin(context):
"""
Displays the row of buttons for delete and save.
"""
opts = context['opts']
change = context['change']
is_popup = context['is_popup']
save_as = context['save_as']
ctx = {
'opts': opts,
'show_delete_link': context.get('has_delete_permission', False) and change and context.get('show_delete', True),
'show_save_as_new': not is_popup and change and save_as,
'show_save_and_add_another': context['has_add_permission'] and not is_popup and (not save_as or context['add']),
'show_save_and_continue': not is_popup and context['has_change_permission'],
'is_popup': is_popup,
'show_save': True,
'preserved_filters': context.get('preserved_filters'),
}
if context.get('original') is not None:
ctx['original'] = context['original']
return ctx
|
|
# Copyright (c) 2015 Scality
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Collection of functions for
- Addition and removal of exports
- Management of client permissions on export locations
"""
import errno
import functools
import io
import json
import logging
import os
import subprocess
import time
try:
import ConfigParser as configparser
except ImportError:
import configparser
from scality_manila_utils import utils
from scality_manila_utils.exceptions import (ClientExistsException,
ClientNotFoundException,
EnvironmentException,
ExportException,
ExportAlreadyExists,
ExportNotFoundException,
ExportHasGrantsException)
log = logging.getLogger(__name__)
# From http://prosseek.blogspot.fr/2012/10/
# reading-ini-file-into-dictionary-in.html
class SmbConfParser(configparser.ConfigParser):
def as_dict(self):
d = dict(self._sections)
for k in d:
d[k] = dict(self._defaults, **d[k])
d[k].pop('__name__', None)
return d
def _get_defined_exports():
"""Retrieve all defined exports from the Samba registry."""
with utils.elevated_privileges():
cmd = ['net', 'conf', 'list']
msg = ("Something went wrong while dumping the Samba "
"registry: stdout='{stdout}', stderr='{stderr}'")
stdout, stderr = utils.execute(cmd, msg)
config = SmbConfParser()
output = stdout.replace('\t', '')
config.readfp(io.StringIO(output))
return config.as_dict()
def verify_environment(root_export):
"""
Preliminary checks for installed binaries and running services.
:param root_export: SOFS directory which holds the export points exposed
through manila
:type root_export: string (unicode)
:raises:
:py:class:`scality_manila_utils.exceptions.EnvironmentException`
if the environment is not ready
"""
if not utils.is_stored_on_sofs(root_export):
raise EnvironmentException("%s doesn't seem to be stored on a SOFS "
"filesystem" % root_export)
env_path = os.getenv('PATH').split(':')
for binary in ('net', 'sfused'):
utils.binary_check(binary, env_path)
for process in ('sfused', 'smbd'):
utils.process_check(process)
with io.open('/etc/samba/smb.conf') as f:
# We can't use `for line in f` here because it seems unmockable...
for line in f.readlines():
if line.strip() == 'registry shares = yes':
break
else:
msg = ("You must enable 'registry shares' in your Samba "
"configuration: add 'registry shares = yes' in the [global]"
" section.")
raise EnvironmentException(msg)
def ensure_environment(f):
"""
Decorator function which verifies that expected services are running etc.
"""
@functools.wraps(f)
def wrapper(root_export, *args, **kwargs):
verify_environment(root_export)
return f(root_export=root_export, *args, **kwargs)
return wrapper
def ensure_export_exists(f):
"""
Decorator function which verifies that a given export exists and pass
the `dict` of all defined exports to the decorated function.
"""
@functools.wraps(f)
def wrapper(export_name, *args, **kwargs):
exports = _get_defined_exports()
if export_name not in exports:
msg = "Share '{0:s}' not found in Samba registry.".format(
export_name)
raise ExportNotFoundException(msg)
return f(export_name=export_name, exports=exports, *args, **kwargs)
return wrapper
@ensure_environment
@ensure_export_exists
def get_export(export_name, exports, *args, **kwargs):
"""
Retrieve client details of an export.
:param export_name: name of export
:type export_name: string (unicode)
:param exports: all the defined shares in the Samba registry
:type exports: dictionary
:returns: string with export client details in json format
"""
export = exports[export_name]
clients = dict((host, ["rw"]) for host in export['hosts allow'].split())
return json.dumps(clients)
@ensure_environment
def add_export(root_export, export_name, *args, **kwargs):
"""
Add an export.
:param root_export: SOFS directory which holds the export points exposed
through manila
:type root_export: string (unicode)
:param export_name: name of export to add
:type export_name: string (unicode)
"""
if not export_name or '/' in export_name:
raise ExportException('Invalid export name')
export_point = os.path.join(root_export, export_name)
create_cmd = [
'net', 'conf', 'addshare', export_name, export_point,
'writeable=y', 'guest_ok=y',
]
parameters = {
'browseable': 'yes',
'create mask': '0755',
'hosts deny': '0.0.0.0/0', # deny all by default
'hosts allow': '127.0.0.1',
'read only': 'no',
}
set_of_commands = [['net', 'conf', 'setparm', export_name,
param, value] for param, value in parameters.items()]
with utils.elevated_privileges():
try:
os.mkdir(export_point)
# On some systems, the `mode` argument of mkdir is ignored.
# So be safe, and do an explicit chmod.
os.chmod(export_point, 0o0777)
except OSError as exc:
if exc.errno != errno.EEXIST:
raise
else:
log.debug("The share/directory %s already exists on SOFS",
export_name)
exports = _get_defined_exports()
if export_name in exports:
msg = ("Share '{0:s}' already defined in Samba "
"registry.".format(export_name))
raise ExportAlreadyExists(msg)
subprocess.check_call(create_cmd)
for cmd in set_of_commands:
subprocess.check_call(cmd)
@ensure_environment
@ensure_export_exists
def wipe_export(root_export, export_name, exports):
"""
Remove an export.
The export point is not actually removed, but renamed with the prefix
"TRASH-".
:param root_export: SOFS directory which holds the export points exposed
through manila
:type root_export: string (unicode)
:param export_name: name of export to remove
:type export_name: string (unicode)
:param exports: all the defined shares in the Samba registry
:type exports: dictionary
"""
export = exports[export_name]
export_path = os.path.join(root_export, export_name)
# Wipe export if and only if no "external host" has access to it
if export['hosts allow'] not in ['', '127.0.0.1']:
raise ExportHasGrantsException('Unable to remove export with grants')
# We need to introduce a "variable" part (i.e a date)
# in case an export with the same name is deleted twice
tombstone = u'TRASH-{0:s}-{1:s}'.format(export_name,
time.strftime("%Y-%b-%d-%X-%Z"))
tombstone_path = os.path.join(root_export, tombstone)
with utils.elevated_privileges():
log.info("Deleting the export '%s' from the Samba registry",
export_name)
cmd = ['net', 'conf', 'delshare', export_name]
msg = ("Something went wrong while deleting the export {0:s}: "
"stdout={{stdout}}, stderr={{stderr}}").format(export_name)
utils.execute(cmd, msg)
log.info("Renaming export '%s' to '%s'", export_name, tombstone)
try:
os.rename(export_path, tombstone_path)
except OSError as exc:
log.error("Unable to rename '%s' for removal : %r",
export_name, exc)
# Two concurrent wipe_export could happen at the same time so
# the loser of the race could see a ENOENT.
if exc.errno != errno.ENOENT:
raise
# Persisting the parent of the moved directory is required, as
# it keeps track of its contents.
utils.fsync_path(root_export)
def _set_hosts_allow(export_name, hosts_allow):
"""
Set the `hosts_allow` parameter for a given share.
:param export_name: name of export to grant access to
:type export_name: string (unicode)
:param hosts_allow: hosts allowed on this share
:type hosts_allow: iterable of `str`
"""
cmd = ['net', 'conf', 'setparm', export_name,
'hosts allow', ' '.join(hosts_allow)]
msg = ("Something went wrong while setting '{0!r}' as "
"the list of 'hosts allow' for share '{1:s}': stdout={{stdout}}, "
"stderr={{stderr}}").format(hosts_allow, export_name)
with utils.elevated_privileges():
utils.execute(cmd, msg)
@ensure_environment
@ensure_export_exists
def grant_access(export_name, host, exports, *args, **kwargs):
"""
Grant access for a host to an export.
:param export_name: name of export to grant access to
:type export_name: string (unicode)
:param host: host to grant access for
:type host: string (unicode)
:param exports: all the defined shares in the Samba registry
:type exports: dictionary
"""
hosts_allow = exports[export_name]['hosts allow'].split()
if host in hosts_allow:
msg = "Host '{0:s}' already allowed on share '{1:s}'".format(
host, export_name)
raise ClientExistsException(msg)
hosts_allow.append(host)
_set_hosts_allow(export_name, hosts_allow)
@ensure_environment
@ensure_export_exists
def revoke_access(export_name, host, exports, *args, **kwargs):
"""
Revoke access for a host to an export.
:param export_name: name of export for revocation
:type export_name: string (unicode)
:param host: host to revoke access for
:type host: string (unicode)
:param exports: all the defined shares in the Samba registry
:type exports: dictionary
"""
hosts_allow = exports[export_name]['hosts allow'].split()
if host not in hosts_allow:
raise ClientNotFoundException("'{0:s}' has no access defined on share "
"'{1:s}'".format(host, export_name))
hosts_allow.remove(host)
_set_hosts_allow(export_name, hosts_allow)
|
|
#!/usr/bin/env python
#
# Copyright 2016, 2017 IBM US, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import copy
import os
import signal
import sys
import yaml
import netaddr
OSA_USER_CFG_FILE = 'openstack_user_config.yml'
OSA_USER_VAR_HAPROXY = 'user_var_haproxy.yml'
OSA_USER_VAR_RABBITMQ = 'user_var_rabbitmq.yml'
OSA_USER_VAR_CEILOMETER = 'user_var_ceilometer.yml'
OSA_USER_VAR_CEPH = 'user_var_ceph.yml'
# deployment_environment_variables go into user_var_deploy_env.yml.
# OpenStack-Ansible applies these environment variables during deployment
# phase only. These environment variables are not available in hosts
# and containers once the deployment is completed.
OSA_USER_VAR_DEPLOY_ENV = 'user_var_deploy_env.yml'
SWIFT_MINIMUM_HARDWARE = 'swift-minimum-hardware'
SWIFT = 'swift'
PRIVATE_COMPUTE_CLOUD = 'private-compute-cloud'
DBAAS_REF_CLOUD = 'dbaas'
ARCHITECTURE = 'architecture'
X86_64 = 'x86_64'
PPC64 = 'ppc64'
PPC64LE = 'ppc64le'
REPO_INFRA_HOSTS = 'repo-infra_hosts'
OS_INFRA_HOSTS = 'os-infra_hosts'
SHARED_INFRA_HOSTS = 'shared-infra_hosts'
IDENTITY_HOSTS = 'identity_hosts'
COMPUTE_HOSTS = 'compute_hosts'
SWIFT_PROXY_HOSTS = 'swift-proxy_hosts'
SWIFT_HOSTS = 'swift_hosts'
class OSAFileGenerator(object):
"""Class for generating various OSA configuration files."""
def __init__(self, inventory_name, output_dir):
"""Initializer.
:param inventory_name: Name of a genesis inventory file.
:param output_dir: Directory to which files will be generated.
"""
super(OSAFileGenerator, self).__init__()
self.inventory_name = inventory_name
self.output_dir = output_dir
self.gen_dict = {}
self.user_config = {}
def _load_yml(self):
with open(self.inventory_name, 'r') as stream:
try:
self.gen_dict = yaml.safe_load(stream)
except yaml.YAMLError:
raise
def _dump_yml(self, data, fname):
fname = os.path.join(self.output_dir, fname)
with open(fname, 'w') as stream:
try:
yaml.dump(data, stream, explicit_start=True,
default_flow_style=False)
except yaml.YAMLError:
raise
def _configure_cidr_networks(self):
"""Configure the CIDR networks."""
networks = self.gen_dict.get('networks', None)
if not networks:
return
cidr = {}
mgmt_network = networks.get('openstack-mgmt', None)
if mgmt_network:
cidr['container'] = mgmt_network.get('addr', 'N/A')
stg_network = networks.get('openstack-stg', None)
if stg_network:
cidr['storage'] = stg_network.get('addr', 'N/A')
tenant_network = networks.get('openstack-tenant-vxlan', None)
if tenant_network:
cidr['tunnel'] = tenant_network.get('addr', 'N/A')
swift_repl_network = networks.get('swift-replication', None)
if swift_repl_network:
cidr['swift_repl'] = swift_repl_network.get('addr', 'N/A')
self.user_config['cidr_networks'] = cidr
def is_host_type_of_role(self, role, node_template):
""" Check if the given node_template has the given role"""
roles = node_template.get('roles', [])
if role in roles:
return True
return False
def _get_nodes_through_roles(self, role):
""" Go through all node-templates and get all the host types which
have the given role. This method looks at roles only. It
doesn't look for sections like "controllers", "computes", etc
under nodes unless "controllers" or "compute" node-template has
controller/compute role defined.
"""
hosts = [] # hosts of given role
# Find the node-templates section.
node_templates = self.gen_dict.get('node-templates', None)
nodes = self.gen_dict.get('nodes', None)
if not node_templates or not nodes:
return hosts
# Go through all the node-templates and look for the given role
for host_type, template in node_templates.iteritems():
if self.is_host_type_of_role(role, template):
# Add nodes of this host type to hosts list
hosts.extend(nodes.get(host_type, []))
return hosts
def _get_nodes_for_role(self, role):
nodes = self.gen_dict.get('nodes', {})
nodes_for_role = []
nodes_for_role.extend(nodes.get(role, []))
by_role = self._get_nodes_through_roles(role)
for node in by_role:
if node not in nodes_for_role:
nodes_for_role.append(node)
return nodes_for_role
def _get_controllers(self):
nodes = self.gen_dict.get('nodes', None)
if not nodes:
return None
controllers_list1 = nodes.get('controllers', [])
controllers_list2 = self._get_nodes_through_roles('controller')
for node in controllers_list1:
if node not in controllers_list2:
controllers_list2.append(node)
return controllers_list2
def _configure_infra_hosts(self):
"""Configure the infra hosts."""
controllers = self._get_controllers()
if not controllers:
return
# Build a list of all of the controllers / ips
hosts = {}
for controller in controllers:
hostname = controller.get('hostname', None)
if hostname:
hosts[hostname] = {
'ip': controller.get('openstack-mgmt-addr', 'N/A')
}
# Set all the common services across all the controllers which
# provides the minimal control plane.
self.user_config[SHARED_INFRA_HOSTS] = hosts
self.user_config[REPO_INFRA_HOSTS] = copy.deepcopy(hosts)
self.user_config[IDENTITY_HOSTS] = copy.deepcopy(hosts)
self.user_config['dashboard_hosts'] = copy.deepcopy(hosts)
self.user_config['haproxy_hosts'] = copy.deepcopy(hosts)
self.user_config['log_hosts'] = copy.deepcopy(hosts)
if PRIVATE_COMPUTE_CLOUD in self.get_ref_arch():
# Private compute cloud adds additional services to the
# control plane.
self.user_config['storage-infra_hosts'] = copy.deepcopy(hosts)
self.user_config['network_hosts'] = copy.deepcopy(hosts)
self.user_config['image_hosts'] = copy.deepcopy(hosts)
self.user_config['compute-infra_hosts'] = copy.deepcopy(hosts)
self.user_config['orchestration_hosts'] = copy.deepcopy(hosts)
if DBAAS_REF_CLOUD in self.get_ref_arch():
self.user_config['trove-infra_hosts'] = copy.deepcopy(hosts)
return
@staticmethod
def _get_address(addr_cidr):
# Given 'N/A' or '1.2.3.4/22' return the address only part
if addr_cidr == 'N/A':
return addr_cidr
else:
return addr_cidr.split('/')[0]
def _configure_global_overrides(self):
"""Configure the global overrides section."""
net_mgmt = net_stg = net_tunnel = net_vlan = None
br_mgmt = br_tunnel = br_stg = br_vlan = None
networks = self.gen_dict.get('networks', None)
if not networks:
return
net_mgmt = networks.get('openstack-mgmt', None)
if net_mgmt:
br_mgmt = net_mgmt.get('bridge', 'N/A')
net_stg = networks.get('openstack-stg', None)
if net_stg:
br_stg = net_stg.get('bridge', 'N/A')
ref_arch_list = self.get_ref_arch()
if PRIVATE_COMPUTE_CLOUD in ref_arch_list:
net_tunnel = networks.get('openstack-tenant-vxlan', None)
net_vlan = networks.get('openstack-tenant-vlan', None)
if net_vlan:
br_vlan = net_vlan.get('bridge', 'N/A')
controllers = self._get_controllers()
e_addr = self._get_external_vip_value()
if len(controllers) > 1:
i_addr = self.gen_dict.get('internal-floating-ipaddr', 'N/A')
else:
# This is only 1 controller, don't use the external vip, use
# the controllers ip addresses for haproxy instead
i_addr = controllers[0].get('openstack-mgmt-addr', 'N/A')
e_net, e_net_details = self.find_external_network(e_addr,
networks)
# Now substitute the controllers address
e_addr = controllers[0].get(e_net + '-addr', 'N/A')
self.user_config['global_overrides'] = {
# Set the load balancing addresses
# They are in the form 1.2.3.4/22, we only need the address here
'internal_lb_vip_address': self._get_address(i_addr),
'external_lb_vip_address': self._get_address(e_addr),
'management_bridge': br_mgmt,
}
if net_tunnel:
br_tunnel = net_tunnel.get('bridge', 'N/A')
self.user_config['global_overrides']['tunnel_bridge'] = br_tunnel
# provider networks
networks = []
mgmt_network = {
'container_bridge': br_mgmt,
'container_type': 'veth',
'container_interface': 'eth1',
'ip_from_q': 'container',
'type': 'raw',
'group_binds': [
'all_containers',
'hosts'
],
'is_container_address': True,
'is_ssh_address': True
}
if 'mtu' in net_mgmt:
mgmt_network['container_mtu'] = net_mgmt.get('mtu')
if DBAAS_REF_CLOUD in ref_arch_list:
mgmt_network['type'] = 'flat'
mgmt_network['host_bind_override'] = net_mgmt.get('bridge-port')
mgmt_network['net_name'] = 'infra'
networks.append({'network': mgmt_network})
if net_stg:
storage_network = {
'container_bridge': br_stg,
'container_type': 'veth',
'container_interface': 'eth2',
'ip_from_q': 'storage',
'type': 'raw',
'group_binds': [
'glance_api',
'cinder_api',
'cinder_volume',
'nova_compute',
'swift_proxy',
],
}
if 'mtu' in net_stg:
storage_network['container_mtu'] = net_stg.get('mtu')
networks.append({'network': storage_network})
if PRIVATE_COMPUTE_CLOUD in ref_arch_list:
vxlan_network = {
'container_bridge': br_tunnel,
'container_type': 'veth',
'container_interface': 'eth10',
'ip_from_q': 'tunnel',
'type': 'vxlan',
'range': '1:1000',
'net_name': 'vxlan',
'group_binds': [
'neutron_linuxbridge_agent',
]
}
if 'mtu' in net_tunnel:
vxlan_network['container_mtu'] = net_tunnel.get('mtu')
vlan_vlan_network = {
'container_bridge': br_vlan,
'container_type': 'veth',
'container_interface': 'eth11',
'type': 'vlan',
'range': '1:4094',
'net_name': 'vlan',
'group_binds': [
'neutron_linuxbridge_agent',
],
}
if 'mtu' in net_vlan:
vlan_vlan_network['container_mtu'] = net_vlan.get('mtu')
host_vlan_intf = net_vlan.get('bridge-port', 'eth12')
vlan_flat_network = {
'container_bridge': br_vlan,
'container_type': 'veth',
'container_interface': 'eth12',
'host_bind_override': host_vlan_intf,
'type': 'flat',
'net_name': 'external',
'group_binds': [
'neutron_linuxbridge_agent',
],
}
if 'mtu' in net_vlan:
vlan_flat_network['container_mtu'] = net_vlan.get('mtu')
networks.append({'network': vxlan_network})
networks.append({'network': vlan_vlan_network})
networks.append({'network': vlan_flat_network})
self.user_config['global_overrides']['provider_networks'] = networks
def _get_compute_hosts(self):
if PRIVATE_COMPUTE_CLOUD not in self.get_ref_arch():
return None
nodes = self.gen_dict.get('nodes', None)
if not nodes:
return None
compute_list = self._get_nodes_for_role('compute')
return compute_list
def _configure_compute_hosts(self):
"""Configure the compute hosts."""
hosts = self._get_compute_hosts()
if not hosts:
return
# Compute Hosts
compute_hosts = {}
for compute in hosts:
hostname = compute.get('hostname', None)
if hostname:
compute_hosts[hostname] = {
'ip': compute.get('openstack-mgmt-addr', 'N/A')
}
self.user_config[COMPUTE_HOSTS] = compute_hosts
def _configure_storage_hosts(self):
"""Configure the storage hosts."""
if PRIVATE_COMPUTE_CLOUD not in self.get_ref_arch():
return
nodes = self.gen_dict.get('nodes', None)
if not nodes:
return
controllers = self._get_controllers()
if not controllers:
return
# Storage Hosts (assuming ceph as cinder backend)
default_volume_hdd = {
'volume_driver': 'cinder.volume.drivers.rbd.RBDDriver',
'rbd_pool': 'volumes',
'rbd_ceph_conf': '/etc/ceph/ceph.conf',
'rbd_flatten_volume_from_snapshot': False,
'rbd_max_clone_depth': 5,
'rbd_store_chunk_size': 4,
'rados_connect_timeout': -1,
'volume_backend_name': 'ceph',
'rbd_user': '{{ cinder_ceph_client }}',
'rbd_secret_uuid': '{{ cinder_ceph_client_uuid }}',
}
# Storage Hosts
storage_hosts = {}
for controller in controllers:
hostname = controller.get('hostname', None)
if hostname:
ceph_data = {
'ip': controller.get('openstack-mgmt-addr', 'N/A'),
'container_vars': {
'cinder_backends': {
'limit_container_types': 'cinder_volume',
'ceph': copy.deepcopy(default_volume_hdd),
}
}
}
storage_hosts[hostname] = ceph_data
self.user_config['storage_hosts'] = storage_hosts
return
def _configure_swift_general(self):
"""Configure general user variables for swift."""
# Find the storage network bridge name.
networks = self.gen_dict.get('networks', None)
if not networks:
return
stg_network = networks.get('openstack-stg', None)
if not stg_network:
return
bridge_name = stg_network.get('bridge', None)
if not bridge_name:
return
swift_rep_network = networks.get('swift-replication', None)
br_swift_repl = None
if swift_rep_network:
br_swift_repl = swift_rep_network.get('bridge', None)
# General swift vars
swift = {}
swift['storage_network'] = bridge_name
swift['part_power'] = 8
swift['mount_point'] = '/srv/node'
if br_swift_repl:
swift['repl_network'] = br_swift_repl
if 'global_overrides' not in self.user_config:
self.user_config['global_overrides'] = {}
self.user_config['global_overrides']['swift'] = swift
return
def _configure_swift_policies(self):
"""Configure storage_policies for swift."""
storage_policies = []
policy = {
'name': 'default',
'index': 0,
'default': 'True',
}
storage_policies.append({'policy': policy})
self.user_config['global_overrides']['swift']['storage_policies'] = (
storage_policies)
return
def _get_swift_proxy_hosts(self):
nodes = self.gen_dict.get('nodes', None)
if not nodes:
return None
proxies = []
ref_arch_list = self.get_ref_arch()
if SWIFT in ref_arch_list:
proxies = self._get_nodes_for_role('swift-proxy')
# for backward compatibility we fall back to the controller
# nodes if we haven't found proxies yet and SWIFT_MINIMUM_HARDWARE
# is specified.
if not proxies and (SWIFT_MINIMUM_HARDWARE in ref_arch_list):
proxies = self._get_controllers()
return proxies
def _configure_swift_proxy_hosts(self):
"""Configure list of swift proxy hosts."""
swift_proxy_hosts = self._get_swift_proxy_hosts()
if swift_proxy_hosts is None:
return
proxy_hosts = {}
# Swift Proxy Hosts.
for proxy in swift_proxy_hosts:
hostname = proxy.get('hostname', None)
if hostname:
proxy_hosts[hostname] = {
'ip': proxy.get('openstack-mgmt-addr', 'N/A')
}
self.user_config[SWIFT_PROXY_HOSTS] = proxy_hosts
return
def _configure_swift_template(self, host, template_vars):
"""Grab values from the node-template for the given host_type."""
# Find the node-templates section.
node_templates = self.gen_dict.get('node-templates', None)
if not node_templates:
return
# The host_type is either swift-metadata or swift-object.
template = node_templates.get(host['template'], None)
if not template:
return
# Find the domain-settings section.
domain_settings = template.get('domain-settings', None)
if not domain_settings:
return
# Override the default zone_count if necessary.
zcount = domain_settings.get('zone-count', None)
if zcount:
template_vars['zone_count'] = zcount
# Override the default mount_point if necessary.
mpoint = domain_settings.get('mount-point', None)
if mpoint:
template_vars['mount_point'] = mpoint
return
def _configure_swift_host(self, host, zone, mount_point, swift_vars):
"""Configure a single swift_host.
This typically includes a list of drives specific to this host.
"""
domain_settings = host.get('domain-settings', None)
if not domain_settings:
return
# There are three different disk lists we need to check.
drive_types = (
'account-ring-disks',
'container-ring-disks',
'object-ring-disks')
name_to_drive = {}
for drive_type in drive_types:
ring_disks = domain_settings.get(drive_type, None)
if not ring_disks:
continue
for disk in ring_disks:
drive = name_to_drive.get(disk)
if not drive:
drive = {
'name': disk,
'groups': [],
}
name_to_drive[disk] = drive
if drive_type == 'object-ring-disks':
drive['groups'].append('default')
elif drive_type == 'account-ring-disks':
drive['groups'].append('account')
elif drive_type == 'container-ring-disks':
drive['groups'].append('container')
# This list of drives for this host will be inserted into swift_vars.
drives = []
for drive in sorted(name_to_drive.keys()):
drives.append(name_to_drive[drive])
swift_vars['zone'] = zone
swift_vars['drives'] = drives
# If the mount_point value was specified in the node-template,
# use it here. Otherwise, don't specify a node specific mount_point
# here. That way we default to the mount point generated by
# _configure_swift_general.
if mount_point:
swift_vars['mount_point'] = mount_point
return
def _get_swift_ring_hosts(self):
nodes = self.gen_dict.get('nodes', None)
if not nodes:
return None
object_nodes = self._get_nodes_for_role('swift-object')
md_nodes = self._get_nodes_for_role('swift-metadata')
for node in md_nodes:
if node not in object_nodes:
object_nodes.append(node)
return object_nodes
def _configure_swift_hosts(self):
"""Configure list of swift_hosts.
This typically includes a list of drives specific to this host.
"""
nodes = self.gen_dict.get('nodes', None)
if not nodes:
return
def is_converged_metadata_object(meta, object):
meta_copy = list(meta)
object_copy = list(object)
try:
for node in meta_copy:
object_copy.remove(node)
except ValueError:
return False
return not object_copy
object_nodes = self._get_nodes_for_role('swift-object')
md_nodes = self._get_nodes_for_role('swift-metadata')
# Here we simply see if the two node lists have the same elements.
# Better checking of ring device settings, etc is done in the
# validate config step which is called earlier.
converged = is_converged_metadata_object(md_nodes, object_nodes)
if converged:
swift_hosts = self._build_swift_hosts_section(object_nodes)
else:
# Separate metadata and object nodes.
# We build the swift host sections for each and merge them
# and add the merged dict to the config.
swift_hosts = self._build_swift_hosts_section(object_nodes)
mh = self._build_swift_hosts_section(md_nodes)
swift_hosts.update(mh)
# Avoid adding an empty value for the key 'swift_hosts' in the
# case where no swift metadata or swift object nodes exist
# in the inventory.
if swift_hosts:
self.user_config[SWIFT_HOSTS] = swift_hosts
def _build_swift_hosts_section(self, nodes):
# We automatically set the zone index for each host. This
# assumes the default 3x swift replication, so the zone
# values cycle between 0-2.
swift_hosts = {}
zone = 0
for host in nodes:
hostname = host.get('hostname', None)
if hostname:
# See if there are values for zone_count and mount_point
# specified in the node-templates.
template_vars = {}
self._configure_swift_template(host, template_vars)
zone_count = template_vars.get('zone_count', 3)
mount_point = template_vars.get('mount_point', None)
# Fill out the dictionary of swift_vars for
# this host (including the zone and drives list).
swift_vars = {}
self._configure_swift_host(host, zone, mount_point,
swift_vars)
swift_hosts[hostname] = {
'ip': host.get('openstack-mgmt-addr', 'N/A'),
'container_vars': {'swift_vars': swift_vars},
}
zone += 1
if zone % zone_count == 0:
zone = 0
return swift_hosts
def _configure_swift(self):
"""Configure user variables for swift."""
ref_arch_list = self.get_ref_arch()
if SWIFT not in ref_arch_list:
return
self._configure_swift_general()
self._configure_swift_policies()
self._configure_swift_proxy_hosts()
self._configure_swift_hosts()
def get_ref_arch(self):
return self.gen_dict.get('reference-architecture', [])
def _do_configure_repo_hosts(self, hosts, hosts_type,
repo_hosts_archs_set):
"""Configure repo hosts of any other architecture if the
hosts of given host type are of different architecture
compared to the controller nodes.
"""
if not hosts:
return
for host in hosts:
hostname = host.get('hostname', None)
if hostname:
arch = host.get(ARCHITECTURE, X86_64)
if arch.lower().startswith(PPC64):
arch = PPC64LE
if arch not in repo_hosts_archs_set:
self.user_config[REPO_INFRA_HOSTS][hostname] = \
copy.deepcopy(self.user_config[hosts_type][hostname])
repo_hosts_archs_set.add(arch)
def _get_repo_hosts_archs_set(self):
controllers = self._get_controllers()
if not controllers:
return None
repo_hosts_archs = set()
for controller in controllers:
hostname = controller.get('hostname', None)
if hostname:
arch = controller.get(ARCHITECTURE, X86_64)
if arch.lower().startswith(PPC64):
arch = PPC64LE
repo_hosts_archs.add(arch)
return repo_hosts_archs
def _configure_extra_repo_hosts(self):
"""Configure repo hosts of any other architecture if the
hosts (compute nodes, storage nodes, etc) are of different
architecture compared to the controller nodes.
"""
# repo_hosts_archs is the set of architectures for which
# repo container will be created and repo will be built.
repo_hosts_archs = self._get_repo_hosts_archs_set()
if not repo_hosts_archs:
return
hosts = self._get_compute_hosts()
self._do_configure_repo_hosts(hosts, COMPUTE_HOSTS, repo_hosts_archs)
hosts = self._get_swift_proxy_hosts()
self._do_configure_repo_hosts(hosts, SWIFT_PROXY_HOSTS,
repo_hosts_archs)
hosts = self._get_swift_ring_hosts()
self._do_configure_repo_hosts(hosts, SWIFT_HOSTS, repo_hosts_archs)
def create_user_config(self):
"""Process the inventory input and generate the OSA user config."""
self._load_yml()
self._configure_cidr_networks()
self._configure_infra_hosts()
self._configure_global_overrides()
self._configure_compute_hosts()
self._configure_storage_hosts()
self._configure_swift()
self._configure_extra_repo_hosts()
self._dump_yml(self.user_config, OSA_USER_CFG_FILE)
def get_network_interface(self, net_details):
"""Find network interface from the given network details."""
port_keys = ['bond', 'eth-port']
ext_intf = None
for port_key in port_keys:
intf = net_details.get(port_key, None)
if intf:
ext_intf = intf
break
return ext_intf
def find_external_network(self, ext_floating_ip, networks):
"""Find external network based on the network address that matches
external-floating-ipaddr.
If not found, return the management network interface.
Returns: network name, network details
"""
if ext_floating_ip == 'N/A':
return None
for net, net_details in networks.iteritems():
if 'addr' in net_details:
# Check if matching
if (netaddr.IPAddress(ext_floating_ip) in
netaddr.IPSet([net_details['addr']])):
return net, net_details
return 'openstack-mgmt', networks.get('openstack-mgmt', None)
def find_external_interface(self, ext_floating_ip, networks):
"""Find external interface based on the network address that matches
external-floating-ipaddr.
If not found, return the management network interface.
"""
if ext_floating_ip == 'N/A':
return None
net, net_details = self.find_external_network(ext_floating_ip,
networks)
if net_details:
ext_intf = self.get_network_interface(net_details)
else:
mgmt_network = networks.get('openstack-mgmt', None)
ext_intf = mgmt_network.get('eth-port', None)
return ext_intf
def _get_external_vip_value(self):
external_vip = self.gen_dict.get('external-floating-ipaddr', 'N/A')
if external_vip != 'N/A' and '/' in external_vip:
# remove cidr prefix, if exists
external_vip = external_vip.split('/')[0]
return external_vip
def generate_haproxy(self):
"""Generate user variable file for HAProxy."""
external_vip = self._get_external_vip_value()
internal_vip = self.gen_dict.get('internal-floating-ipaddr', 'N/A')
networks = self.gen_dict.get('networks', None)
mgmt_network = networks.get('openstack-mgmt', None)
bridge = mgmt_network.get('bridge', None)
ext_intf = self.find_external_interface(external_vip, networks)
settings = {
'haproxy_keepalived_external_vip_cidr': external_vip,
'haproxy_keepalived_internal_vip_cidr': internal_vip,
'haproxy_keepalived_external_interface': ext_intf,
'haproxy_keepalived_internal_interface': bridge,
}
self._dump_yml(settings, OSA_USER_VAR_HAPROXY)
def generate_ceilometer(self):
"""Generate user variable file for ceilometer."""
settings = {
'swift_ceilometer_enabled': False,
'nova_ceilometer_enabled': False,
}
self._dump_yml(settings, OSA_USER_VAR_CEILOMETER)
def generate_rabbitmq(self):
"""Generate user variable file for rabbitmq."""
settings = {
# Disable rabbitmq management plugin by default
'rabbitmq_plugins': [{
'name': 'rabbitmq_management', 'state': 'disabled'
}]
}
self._dump_yml(settings, OSA_USER_VAR_RABBITMQ)
def generate_ceph(self):
"""Generate user variable file for ceph."""
if PRIVATE_COMPUTE_CLOUD not in self.get_ref_arch():
return
nodes = self.gen_dict.get('nodes', None)
if not nodes:
return
mons = self._get_ceph_monitors()
if not mons:
return
monitors = []
for c in mons:
monitors.append(c.get('openstack-stg-addr', 'N/A'))
settings = {
'ceph_pkg_source': 'uca',
'glance_default_store': 'rbd',
'glance_rbd_store_pool': 'images',
'nova_libvirt_images_rbd_pool': 'vms',
'ceph_mons': monitors,
}
self._dump_yml(settings, OSA_USER_VAR_CEPH)
def _get_ceph_monitors(self):
# Get ceph monitors by template name
nodes = self.gen_dict.get('nodes', {})
mons = []
mons.extend(nodes.get('ceph-monitor', []))
# Get ceph monitors by role
role_mons = self._get_nodes_through_roles('ceph-monitor')
for mon in role_mons:
if mon not in mons:
mons.append(mon)
if mons:
return mons
# If no nodes were found using the ceph-monitor role, we
# return the controller nodes for backward compatibility with
# configurations that assumed the ceph monitors were controllers.
# The implication here is that if node templates are using the
# ceph-monitor role then the 'controllers' node template will not be
# automatically added as a monitor node.
return self._get_controllers()
def define_no_proxy(self, env_vars_dict):
""" Define the variable 'no_proxy' if not already defined and
append the required addresses to it if missing. """
internal_lb_addr = self.user_config['global_overrides'][
'internal_lb_vip_address']
external_lb_addr = self.user_config['global_overrides'][
'external_lb_vip_address']
required_addresses = ["localhost", "127.0.0.1",
internal_lb_addr, external_lb_addr]
no_proxy_value = env_vars_dict.get('no_proxy')
if not no_proxy_value:
env_vars_dict['no_proxy'] = ",".join(required_addresses)
else:
addresses = no_proxy_value.split(",")
changed = False
for addr in required_addresses:
if addr not in addresses:
addresses.append(addr)
changed = True
if changed:
env_vars_dict['no_proxy'] = ','.join(addresses)
def generate_deployment_env_vars(self):
"""Generate user variable file for deployment environment variables."""
env_vars_dict = self.gen_dict.get('deployment-environment')
if env_vars_dict:
if 'http_proxy' in env_vars_dict or 'https_proxy' in env_vars_dict:
# Make sure no_proxy is defined and has the required
# addresses
self.define_no_proxy(env_vars_dict)
settings = {
'deployment_environment_variables': env_vars_dict
}
self._dump_yml(settings, OSA_USER_VAR_DEPLOY_ENV)
def process_inventory(inv_name, output_dir):
"""Process the input inventory file.
:param inv_name: The path name of the input genesis inventory.
:param output_dir: The name of path for the generated files.
"""
generator = OSAFileGenerator(inv_name, output_dir)
generator._load_yml()
if 'reference-architecture' not in generator.gen_dict:
print "The inventory file is missing the reference-architecture."
sys.exit(1)
generator.create_user_config()
generator.generate_haproxy()
generator.generate_ceilometer()
generator.generate_ceph()
generator.generate_deployment_env_vars()
def parse_command():
"""Parse the command arguments for generate user config."""
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
description=('A command to generate the ansible user configuration'
' based on the Genesis inventory YAML file.'))
parser.add_argument('-i', '--input-file', required=True,
help=('Path to the Genesis inventory YAML file'))
parser.add_argument('-d', '--output-dir', default='.',
help=('Path to the OpenStack user config file to '
'be generated'))
parser.set_defaults(func=process_inventory)
return parser
def signal_handler(signal, frame):
"""Signal handler to for processing, e.g. keyboard interrupt signals."""
sys.exit(0)
def main():
"""Main function."""
parser = parse_command()
args = parser.parse_args()
signal.signal(signal.SIGINT, signal_handler)
if (len(sys.argv) < 1):
parser.print_help()
sys.exit(1)
args.func(args.input_file, args.output_dir)
return 0
if __name__ == "__main__":
main()
|
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Distribute Coordinator."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import contextlib
import copy
import json
import os
import sys
import threading
import time
import six
_portpicker_import_error = None
try:
import portpicker # pylint: disable=g-import-not-at-top
except ImportError as _error: # pylint: disable=invalid-name
_portpicker_import_error = _error
portpicker = None
# pylint: disable=g-import-not-at-top
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.client import session
from tensorflow.python.distribute import distribute_coordinator
from tensorflow.python.distribute import distribute_coordinator_context
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training import monitored_session
from tensorflow.python.training import session_manager
CHIEF = distribute_coordinator._TaskType.CHIEF
WORKER = distribute_coordinator._TaskType.WORKER
PS = distribute_coordinator._TaskType.PS
EVALUATOR = distribute_coordinator._TaskType.EVALUATOR
STANDALONE_CLIENT = distribute_coordinator.CoordinatorMode.STANDALONE_CLIENT
INDEPENDENT_WORKER = distribute_coordinator.CoordinatorMode.INDEPENDENT_WORKER
NUM_WORKERS = 3
NUM_PS = 2
original_sys_exit = sys.exit
def _bytes_to_str(maybe_bytes):
if isinstance(maybe_bytes, six.string_types):
return maybe_bytes
else:
return str(maybe_bytes, "utf-8")
def _strip_protocol(target):
# cluster_spec expects "host:port" strings.
if "//" in target:
return target.split("//")[1]
else:
return target
class MockExtended(object):
def __init__(self,
between_graph=False,
should_init=None,
should_checkpoint=None,
should_save_summary=None):
self.experimental_between_graph = between_graph
self.experimental_should_init = should_init
self.should_checkpoint = should_checkpoint
self.should_save_summary = should_save_summary
class MockStrategy(object):
def __init__(self,
between_graph=False,
should_init=None,
should_checkpoint=None,
should_save_summary=None):
self.extended = MockExtended(between_graph, should_init, should_checkpoint,
should_save_summary)
def configure(self,
session_config=None,
cluster_spec=None,
task_type=None,
task_id=None):
if self.extended.experimental_should_init is None:
if task_id == 0:
self.extended.experimental_should_init = True
else:
self.extended.experimental_should_init = False
if self.extended.should_checkpoint is None:
if task_id == 0:
self.extended.should_checkpoint = True
else:
self.extended.should_checkpoint = False
if self.extended.should_save_summary is None:
if task_id == 0:
self.extended.should_save_summary = True
else:
self.extended.should_save_summary = False
if session_config:
if (cluster_spec and task_type and task_id is not None and
self.extended.experimental_between_graph):
session_config.intra_op_parallelism_threads += 1
if task_type in ["chief", "worker"]:
session_config.device_filters.extend(
["/job:%s/task:%d" % (task_type, task_id), "/job:ps"])
else:
session_config.inter_op_parallelism_threads += 1
session_config.device_filters.append("/job:somejob")
class MockServer(object):
def __init__(self):
self._joined = False
self._started = False
def start(self):
self._started = True
def join(self):
assert not self._joined
self._joined = True
@property
def joined(self):
return self._joined
@property
def started(self):
return self._started
class DistributeCoordinatorTestBase(test.TestCase):
@classmethod
def setUpClass(cls):
# We have to create a global in-process cluster because once an in-process
# tensorflow server is created, there is no way to terminate it. Please see
# multi_worker_test_base.py for more details.
# TODO(yuefengz): use the utitliy from multi_worker_test_base.
cls._workers, cls._ps = test_util.create_local_cluster(
NUM_WORKERS, num_ps=NUM_PS)
cls._cluster_spec = {
WORKER: [
_strip_protocol(_bytes_to_str(w.target)) for w in cls._workers
],
PS: [_strip_protocol(_bytes_to_str(ps.target)) for ps in cls._ps]
}
def setUp(self):
self._result_correct = 0
self._lock = threading.Lock()
self._worker_context = {}
self._strategy_property = {}
self._std_servers = {}
self._barrier = distribute_coordinator._Barrier(NUM_WORKERS)
@contextlib.contextmanager
def _test_session(self, target):
config = config_pb2.ConfigProto(allow_soft_placement=True)
config.graph_options.optimizer_options.opt_level = -1
with session.Session(graph=None, config=config, target=target) as sess:
yield sess
# TODO(yuefengz): use the utitliy from multi_worker_test_base.
def _create_cluster_spec(self,
has_chief=False,
num_workers=1,
num_ps=0,
has_eval=False):
if _portpicker_import_error:
raise _portpicker_import_error # pylint: disable=raising-bad-type
cluster_spec = {}
if has_chief:
cluster_spec[CHIEF] = ["localhost:%s" % portpicker.pick_unused_port()]
if num_workers:
cluster_spec[WORKER] = [
"localhost:%s" % portpicker.pick_unused_port()
for _ in range(num_workers)
]
if num_ps:
cluster_spec[PS] = [
"localhost:%s" % portpicker.pick_unused_port() for _ in range(num_ps)
]
if has_eval:
cluster_spec[EVALUATOR] = ["localhost:%s" % portpicker.pick_unused_port()]
return cluster_spec
def _in_graph_worker_fn(self, strategy):
context = distribute_coordinator_context.get_current_worker_context()
self.assertTrue(context is not None)
with self._test_session(target=context.master_target) as sess:
xs = []
expected = 0.0
for i in range(context.num_workers):
with ops.device("/job:worker/task:%d" % i):
x = variable_scope.get_variable("x_%d" % i, initializer=10.0)
x_add = x.assign_add(float(i))
xs.append(x_add)
expected += i + 10.0
with ops.device("/job:worker/task:0"):
result = math_ops.add_n(xs)
variables.global_variables_initializer().run()
result_value = sess.run(result)
self.assertEqual(result_value, expected)
if result_value == expected:
self._result_correct += 1
def _run_coordinator_in_thread(self, worker_fn, strategy, **kwargs):
t = threading.Thread(
target=distribute_coordinator.run_distribute_coordinator,
args=(worker_fn, strategy),
kwargs=kwargs)
t.start()
return t
def _run_multiple_coordinator_in_threads(self, worker_fn, strategy,
cluster_spec, **kwargs):
threads = {}
for task_type in cluster_spec.keys():
threads[task_type] = []
for task_id in range(len(cluster_spec[task_type])):
t = self._run_coordinator_in_thread(
worker_fn,
strategy,
cluster_spec=cluster_spec,
task_type=task_type,
task_id=task_id,
**kwargs)
threads[task_type].append(t)
return threads
def _between_graph_worker_fn(self, strategy):
context = distribute_coordinator_context.get_current_worker_context()
self.assertTrue(context is not None)
with self._test_session(target=context.master_target) as sess:
with ops.device("/job:ps/task:0"):
# TODO(yuefengz): investigate why not using resource variable will make
# the test flaky.
x = variable_scope.get_variable(
"x", initializer=10.0, use_resource=True)
with ops.device("/job:ps/task:1"):
y = variable_scope.get_variable(
"y", initializer=20.0, use_resource=True)
x_add = x.assign_add(2.0)
y_sub = y.assign_sub(2.0)
train_op = control_flow_ops.group([x_add, y_sub])
if context.is_chief:
variables.global_variables_initializer().run()
# Synchronize workers after initializaton.
if context.has_barrier:
context.wait_for_other_workers()
else:
while True:
uninit_vars = sess.run(variables.report_uninitialized_variables())
# pylint: disable=g-explicit-length-test
if len(uninit_vars) == 0:
break
sess.run(train_op)
# Synchronize workers after one step to make sure they all have finished
# training.
if context.has_barrier:
context.wait_for_other_workers()
else:
self._barrier.wait()
x_val, y_val = sess.run([x, y])
self.assertEqual(x_val, 16.0)
self.assertEqual(y_val, 14.0)
if x_val == 16.0 and y_val == 14.0:
with self._lock:
self._result_correct += 1
def _between_graph_with_monitored_session(self, strategy):
context = distribute_coordinator_context.get_current_worker_context()
self.assertTrue(context is not None)
with ops.device("/job:ps/task:0"):
# TODO(yuefengz): investigate why not using resource variable will make
# the test flaky.
x = variable_scope.get_variable("xx", initializer=10.0, use_resource=True)
with ops.device("/job:ps/task:1"):
y = variable_scope.get_variable("yy", initializer=20.0, use_resource=True)
x_add = x.assign_add(2.0)
y_sub = y.assign_sub(2.0)
train_op = control_flow_ops.group([x_add, y_sub])
# The monitored session will run init or ready ops.
with monitored_session.MonitoredSession() as sess:
sess.run(train_op)
# Synchronize workers after one step to make sure they all have finished
# training.
if context.has_barrier:
context.wait_for_other_workers()
else:
self._barrier.wait()
x_val, y_val = sess.run([x, y])
self.assertEqual(x_val, 16.0)
self.assertEqual(y_val, 14.0)
if x_val == 16.0 and y_val == 14.0:
with self._lock:
self._result_correct += 1
def _dump_worker_context(self, strategy):
"""Dumps the propoerties of each worker context.
It dumps the context properties to a dict mapping from task_type to a list
of tuples of master_target, num_workers, is_chief and distribute_mode, where
the list is indexed by the task_id.
Args:
strategy: a `DistributionStrategy` object.
"""
context = distribute_coordinator_context.get_current_worker_context()
self.assertTrue(context is not None)
task_type = str(context.task_type)
task_id = context.task_id or 0
with self._lock:
if task_type not in self._worker_context:
self._worker_context[task_type] = []
while len(self._worker_context[task_type]) <= task_id:
self._worker_context[task_type].append(None)
self._worker_context[task_type][task_id] = (context.master_target,
context.num_workers,
context.is_chief,
context.distributed_mode)
def _dump_strategy_property(self, strategy):
context = distribute_coordinator_context.get_current_worker_context()
self.assertTrue(context is not None)
self.assertEqual(context._strategy.extended.experimental_should_init,
strategy.extended.experimental_should_init)
self.assertEqual(context.should_checkpoint,
strategy.extended.should_checkpoint)
self.assertEqual(context.should_save_summary,
strategy.extended.should_save_summary)
task_type = str(context.task_type)
task_id = context.task_id or 0
with self._lock:
if task_type not in self._strategy_property:
self._strategy_property[task_type] = []
while len(self._strategy_property[task_type]) <= task_id:
self._strategy_property[task_type].append(None)
self._strategy_property[task_type][task_id] = (
context._strategy.extended.experimental_should_init,
context.should_checkpoint,
context.should_save_summary)
def _run_mock_std_server(self,
session_config=None,
cluster_spec=None,
task_type=None,
task_id=None,
rpc_layer=None,
environment=None):
task_type = str(task_type)
task_id = task_id or 0
with self._lock:
if task_type not in self._std_servers:
self._std_servers[task_type] = []
while len(self._std_servers[task_type]) <= task_id:
self._std_servers[task_type].append(None)
server = MockServer()
self._std_servers[task_type][task_id] = server
return server
class DistributeCoordinatorTestStandaloneMode(DistributeCoordinatorTestBase):
def testInGraphStandaloneMode(self):
"""Test it runs in-graph replication in standalone client mode."""
distribute_coordinator.run_distribute_coordinator(
self._in_graph_worker_fn,
MockStrategy(between_graph=False),
cluster_spec=self._cluster_spec)
self.assertEqual(self._result_correct, 1)
def testBetweenGraph(self):
"""Test it runs between-graph replication in standalone client mode."""
distribute_coordinator.run_distribute_coordinator(
self._between_graph_worker_fn,
MockStrategy(between_graph=True),
cluster_spec=self._cluster_spec)
# Each finished worker will increment self._result_correct.
self.assertEqual(self._result_correct, NUM_WORKERS)
def testBetweenGraphWithMonitoredSession(self):
"""Test monitored session in standalone client mode."""
distribute_coordinator.run_distribute_coordinator(
self._between_graph_with_monitored_session,
MockStrategy(between_graph=True),
cluster_spec=self._cluster_spec)
# Each finished worker will increment self._result_correct.
self.assertEqual(self._result_correct, NUM_WORKERS)
def testBetweenGraphContext(self):
# Dumps the task contexts to the self._worker_context dict.
distribute_coordinator.run_distribute_coordinator(
self._dump_worker_context,
MockStrategy(between_graph=True),
cluster_spec=self._cluster_spec)
# There is only one type of task and there three such tasks.
self.assertEqual(len(self._worker_context), 1)
self.assertTrue(WORKER in self._worker_context)
self.assertEqual(len(self._worker_context[WORKER]), NUM_WORKERS)
# Check whether each task has the right master_target, num_workers, is_chief
# and distributed_mode.
self.assertEqual(
self._worker_context[WORKER][0],
(_bytes_to_str(self._workers[0].target), NUM_WORKERS, True, True))
self.assertEqual(
self._worker_context[WORKER][1],
(_bytes_to_str(self._workers[1].target), NUM_WORKERS, False, True))
self.assertEqual(
self._worker_context[WORKER][2],
(_bytes_to_str(self._workers[2].target), NUM_WORKERS, False, True))
def testBetweenGraphStrategyProperties(self):
# Dumps properties of the strategy objects.
distribute_coordinator.run_distribute_coordinator(
self._dump_strategy_property,
MockStrategy(between_graph=True, should_init=True),
cluster_spec=self._cluster_spec)
# There is only one type of task and there three such tasks.
self.assertEqual(len(self._strategy_property), 1)
self.assertTrue(WORKER in self._strategy_property)
self.assertEqual(len(self._strategy_property[WORKER]), NUM_WORKERS)
# Check whether each task has the right properties of should_init,
# should_checkpoint and should_save_summary.
self.assertEqual(self._strategy_property[WORKER][0], (True, True, True))
self.assertEqual(self._strategy_property[WORKER][1], (True, False, False))
self.assertEqual(self._strategy_property[WORKER][2], (True, False, False))
def testInGraphContext(self):
# Dumps the task contexts to the self._worker_context dict.
distribute_coordinator.run_distribute_coordinator(
self._dump_worker_context,
MockStrategy(between_graph=False),
cluster_spec=self._cluster_spec)
# There is only a "None" task in the dumped task context.
self.assertEqual(len(self._worker_context), 1)
self.assertTrue("None" in self._worker_context)
self.assertEqual(len(self._worker_context["None"]), 1)
# Check whether each task has the right master_target, num_workers, is_chief
# and distributed_mode.
self.assertEqual(
self._worker_context["None"][0],
(_bytes_to_str(self._workers[0].target), NUM_WORKERS, True, True))
def testLocalContext(self):
# Dumps the task contexts to the self._worker_context dict.
distribute_coordinator.run_distribute_coordinator(
self._dump_worker_context,
MockStrategy(between_graph=False),
cluster_spec=None)
# There is only a "None" task.
self.assertEqual(len(self._worker_context), 1)
self.assertTrue("None" in self._worker_context)
self.assertEqual(len(self._worker_context["None"]), 1)
# Check whether each task has the right master_target, num_workers, is_chief
# and distributed_mode.
self.assertEqual(self._worker_context["None"][0], ("", 0, True, False))
def testBetweenGraphContextWithChief(self):
# Adds a chief node, so there are NUM_WORKERS + 1 workers in total.
cluster_spec = copy.deepcopy(self._cluster_spec)
cluster_spec[CHIEF] = ["fake_chief"]
# Dumps the task contexts to the self._worker_context dict.
distribute_coordinator.run_distribute_coordinator(
self._dump_worker_context,
MockStrategy(between_graph=True),
cluster_spec=cluster_spec,
rpc_layer="grpc")
# There are one CHIEF and three workers.
self.assertEqual(len(self._worker_context), 2)
self.assertTrue(CHIEF in self._worker_context)
self.assertTrue(WORKER in self._worker_context)
self.assertEqual(len(self._worker_context[CHIEF]), 1)
self.assertEqual(len(self._worker_context[WORKER]), NUM_WORKERS)
# Check whether each task has the right master_target, num_workers, is_chief
# and distributed_mode.
self.assertEqual(self._worker_context[CHIEF][0],
("grpc://fake_chief", 4, True, True))
self.assertEqual(
self._worker_context[WORKER][0],
(_bytes_to_str(self._workers[0].target), NUM_WORKERS + 1, False, True))
self.assertEqual(
self._worker_context[WORKER][1],
(_bytes_to_str(self._workers[1].target), NUM_WORKERS + 1, False, True))
self.assertEqual(
self._worker_context[WORKER][2],
(_bytes_to_str(self._workers[2].target), NUM_WORKERS + 1, False, True))
def testInGraphContextWithEval(self):
# Adds a EVALUATOR job.
cluster_spec = copy.deepcopy(self._cluster_spec)
cluster_spec[EVALUATOR] = ["fake_evaluator"]
# Dumps the task contexts to the self._worker_context dict.
distribute_coordinator.run_distribute_coordinator(
self._dump_worker_context,
MockStrategy(between_graph=False),
cluster_spec=cluster_spec,
rpc_layer=None)
# There are one "None" task and one EVALUATOR task.
self.assertEqual(len(self._worker_context), 2)
self.assertTrue("None" in self._worker_context)
self.assertTrue(EVALUATOR in self._worker_context)
self.assertEqual(len(self._worker_context["None"]), 1)
self.assertEqual(len(self._worker_context[EVALUATOR]), 1)
# Check whether each task has the right master_target, num_workers, is_chief
# and distributed_mode.
self.assertEqual(self._worker_context["None"][0], (_strip_protocol(
_bytes_to_str(self._workers[0].target)), 3, True, True))
self.assertEqual(self._worker_context[EVALUATOR][0],
("fake_evaluator", 3, True, False))
class DistributeCoordinatorTestInpendentWorkerMode(
DistributeCoordinatorTestBase):
def testInGraph(self):
cluster_spec = self._create_cluster_spec(num_workers=NUM_WORKERS)
threads = self._run_multiple_coordinator_in_threads(
self._in_graph_worker_fn,
MockStrategy(between_graph=False),
cluster_spec,
mode=INDEPENDENT_WORKER)
threads[WORKER][0].join()
self.assertEqual(self._result_correct, 1)
def testBetweenGraph(self):
cluster_spec = self._create_cluster_spec(
num_workers=NUM_WORKERS, num_ps=NUM_PS)
threads = self._run_multiple_coordinator_in_threads(
self._between_graph_worker_fn,
MockStrategy(between_graph=True),
cluster_spec,
mode=INDEPENDENT_WORKER)
for task_id in range(NUM_WORKERS):
threads[WORKER][task_id].join()
# Each finished worker will increment self._result_correct.
self.assertEqual(self._result_correct, NUM_WORKERS)
def testBetweenGraphWithMonitoredSession(self):
cluster_spec = self._create_cluster_spec(
num_workers=NUM_WORKERS, num_ps=NUM_PS)
threads = self._run_multiple_coordinator_in_threads(
self._between_graph_with_monitored_session,
MockStrategy(between_graph=True),
cluster_spec,
mode=INDEPENDENT_WORKER)
for task_id in range(NUM_WORKERS):
threads[WORKER][task_id].join()
# Each finished worker will increment self._result_correct.
self.assertEqual(self._result_correct, NUM_WORKERS)
def testBetweenGraphContext(self):
cluster_spec = self._create_cluster_spec(num_workers=NUM_WORKERS)
# Dumps the task contexts and std server arguments.
with test.mock.patch.object(distribute_coordinator, "_run_std_server",
self._run_mock_std_server):
threads = self._run_multiple_coordinator_in_threads(
self._dump_worker_context,
MockStrategy(between_graph=True),
cluster_spec,
mode=INDEPENDENT_WORKER,
rpc_layer=None)
for task_id in range(NUM_WORKERS):
threads[WORKER][task_id].join()
# There is only one type of task and three such tasks.
self.assertEqual(len(self._worker_context), 1)
self.assertTrue(WORKER in self._worker_context)
self.assertEqual(len(self._worker_context[WORKER]), NUM_WORKERS)
# Check whether each task has the right master_target, num_workers, is_chief
# and distributed_mode.
self.assertEqual(
self._worker_context[WORKER][0],
(_bytes_to_str(cluster_spec[WORKER][0]), NUM_WORKERS, True, True))
self.assertEqual(
self._worker_context[WORKER][1],
(_bytes_to_str(cluster_spec[WORKER][1]), NUM_WORKERS, False, True))
self.assertEqual(
self._worker_context[WORKER][2],
(_bytes_to_str(cluster_spec[WORKER][2]), NUM_WORKERS, False, True))
# Make sure each worker runs a std server.
self.assertEqual(len(self._std_servers), 1)
self.assertTrue(WORKER in self._std_servers)
self.assertEqual(len(self._std_servers[WORKER]), 3)
self.assertFalse(self._std_servers[WORKER][0].joined)
self.assertFalse(self._std_servers[WORKER][1].joined)
self.assertFalse(self._std_servers[WORKER][2].joined)
def testBetweenGraphStrategyProperties(self):
cluster_spec = self._create_cluster_spec(num_workers=NUM_WORKERS)
# Dumps properties of the strategy objects.
with test.mock.patch.object(distribute_coordinator, "_run_std_server",
self._run_mock_std_server):
threads = self._run_multiple_coordinator_in_threads(
self._dump_strategy_property,
MockStrategy(between_graph=True, should_init=True),
cluster_spec,
mode=INDEPENDENT_WORKER,
rpc_layer=None)
for task_id in range(NUM_WORKERS):
threads[WORKER][task_id].join()
# There is only one type of task and there three such tasks.
self.assertEqual(len(self._strategy_property), 1)
self.assertTrue(WORKER in self._strategy_property)
self.assertEqual(len(self._strategy_property[WORKER]), NUM_WORKERS)
# Check whether each task has the right properties of should_init,
# should_checkpoint and should_save_summary.
self.assertEqual(self._strategy_property[WORKER][0], (True, True, True))
self.assertEqual(self._strategy_property[WORKER][1], (True, False, False))
self.assertEqual(self._strategy_property[WORKER][2], (True, False, False))
def testInGraphContext(self):
cluster_spec = self._create_cluster_spec(num_workers=NUM_WORKERS)
# Dumps the task contexts and std server arguments.
with test.mock.patch.object(distribute_coordinator, "_run_std_server",
self._run_mock_std_server):
threads = self._run_multiple_coordinator_in_threads(
self._dump_worker_context,
MockStrategy(between_graph=False),
cluster_spec,
mode=INDEPENDENT_WORKER,
rpc_layer=None)
for task_id in range(NUM_WORKERS):
threads[WORKER][task_id].join()
# There is only a "None" task in the dumped task context.
self.assertEqual(len(self._worker_context), 1)
self.assertTrue("None" in self._worker_context)
self.assertEqual(len(self._worker_context["None"]), 1)
# Check whether each task has the right master_target, num_workers, is_chief
# and distributed_mode.
self.assertEqual(
self._worker_context["None"][0],
(_bytes_to_str(cluster_spec[WORKER][0]), NUM_WORKERS, True, True))
# Make sure each worker runs a std server.
self.assertEqual(len(self._std_servers), 1)
self.assertTrue(WORKER in self._std_servers)
self.assertEqual(len(self._std_servers[WORKER]), 3)
self.assertFalse(self._std_servers[WORKER][0].joined)
self.assertTrue(self._std_servers[WORKER][1].joined)
self.assertTrue(self._std_servers[WORKER][2].joined)
def testInGraphContextWithEval(self):
# Adds a EVALUATOR job.
cluster_spec = self._create_cluster_spec(
num_workers=NUM_WORKERS, has_eval=True)
# Dumps the task contexts and std server arguments.
with test.mock.patch.object(distribute_coordinator, "_run_std_server",
self._run_mock_std_server):
threads = self._run_multiple_coordinator_in_threads(
self._dump_worker_context,
MockStrategy(between_graph=False),
cluster_spec,
mode=INDEPENDENT_WORKER,
rpc_layer=None)
for task_id in range(NUM_WORKERS):
threads[WORKER][task_id].join()
threads[EVALUATOR][0].join()
# There are one "None" task and one EVALUATOR task.
self.assertEqual(len(self._worker_context), 2)
self.assertTrue("None" in self._worker_context)
self.assertTrue(EVALUATOR in self._worker_context)
self.assertEqual(len(self._worker_context["None"]), 1)
self.assertEqual(len(self._worker_context[EVALUATOR]), 1)
# Check whether each task has the right master_target, num_workers, is_chief
# and distributed_mode.
self.assertEqual(self._worker_context["None"][0],
(_bytes_to_str(cluster_spec[WORKER][0]), 3, True, True))
self.assertEqual(self._worker_context[EVALUATOR][0],
(cluster_spec[EVALUATOR][0], 3, True, False))
# Make sure each worker runs a std server.
self.assertEqual(len(self._std_servers), 2)
self.assertTrue(WORKER in self._std_servers)
self.assertTrue(EVALUATOR in self._std_servers)
self.assertEqual(len(self._std_servers[WORKER]), 3)
self.assertEqual(len(self._std_servers[EVALUATOR]), 1)
self.assertFalse(self._std_servers[WORKER][0].joined)
self.assertTrue(self._std_servers[WORKER][1].joined)
self.assertTrue(self._std_servers[WORKER][2].joined)
self.assertFalse(self._std_servers[EVALUATOR][0].joined)
def testRunStdServerInGoogleEnvironment(self):
cluster_spec = {"worker": ["fake_worker"], "ps": ["localhost:0"]}
tf_config = {"cluster": cluster_spec, "environment": "google"}
joined = [False]
def _fake_sleep(_):
joined[0] = True
original_sys_exit(0)
def _thread_fn(cluster_spec):
distribute_coordinator.run_distribute_coordinator(
None,
MockStrategy(between_graph=True),
mode=INDEPENDENT_WORKER,
cluster_spec=cluster_spec,
task_type="ps",
task_id=0)
with test.mock.patch.dict(
"os.environ",
{"TF_CONFIG": json.dumps(tf_config)}), test.mock.patch.object(
time, "sleep", _fake_sleep):
t = threading.Thread(target=_thread_fn, args=(cluster_spec,))
t.start()
t.join()
self.assertTrue(joined[0])
def testRpcLayerEnvironmentVariable(self):
cluster_spec = {"worker": ["fake_worker"], "ps": ["fake_ps"]}
tf_config = {"cluster": cluster_spec, "rpc_layer": "cake"}
rpc_layer_from_coordinator = [None]
def _run_mock_server(cluster_spec=None,
task_type=None,
task_id=None,
session_config=None,
rpc_layer=None,
environment=None):
del cluster_spec, task_type, task_id, session_config, environment
rpc_layer_from_coordinator[0] = rpc_layer
return MockServer()
with test.mock.patch.dict(
"os.environ",
{"TF_CONFIG": json.dumps(tf_config)}), test.mock.patch.object(
distribute_coordinator, "_run_std_server", _run_mock_server):
distribute_coordinator.run_distribute_coordinator(
None,
MockStrategy(between_graph=True),
mode=INDEPENDENT_WORKER,
cluster_spec=cluster_spec,
task_type="ps",
task_id=0)
self.assertEqual(rpc_layer_from_coordinator[0], "cake")
class StrategyConfigureTest(test.TestCase):
def setUp(self):
self._device_filters = []
self._intra_op_parallelism_threads = None
self._inter_op_parallelism_threads = None
super(StrategyConfigureTest, self).setUp()
def _dump_device_filters(self, *args, **kwargs):
session_config = kwargs.get("session_config", None)
self._device_filters.extend(session_config.device_filters)
self._intra_op_parallelism_threads = (
session_config.intra_op_parallelism_threads)
self._inter_op_parallelism_threads = (
session_config.inter_op_parallelism_threads)
return MockServer()
def _worker_fn(self, strategy):
worker_context = distribute_coordinator_context.get_current_worker_context()
session_config = worker_context._session_config
self._device_filters.extend(session_config.device_filters)
self._intra_op_parallelism_threads = (
session_config.intra_op_parallelism_threads)
self._inter_op_parallelism_threads = (
session_config.inter_op_parallelism_threads)
return MockServer()
def test_session_config_in_std_server(self):
cluster_spec = {"worker": ["fake_worker"], "ps": ["fake_ps"]}
tf_config = {"cluster": cluster_spec}
with test.mock.patch.dict(
"os.environ",
{"TF_CONFIG": json.dumps(tf_config)}), test.mock.patch.object(
distribute_coordinator, "_run_std_server",
self._dump_device_filters):
distribute_coordinator.run_distribute_coordinator(
lambda _: None,
MockStrategy(between_graph=True),
mode=INDEPENDENT_WORKER,
cluster_spec=cluster_spec,
task_type="worker",
task_id=0)
self.assertEqual(self._intra_op_parallelism_threads, 1)
self.assertEqual(self._inter_op_parallelism_threads, 0)
def test_session_config_in_session_creator(self):
cluster_spec = {"worker": ["localhost:0"]}
tf_config = {"cluster": cluster_spec}
with test.mock.patch.dict("os.environ",
{"TF_CONFIG": json.dumps(tf_config)}):
distribute_coordinator.run_distribute_coordinator(
self._worker_fn,
MockStrategy(between_graph=True),
mode=INDEPENDENT_WORKER,
cluster_spec=cluster_spec,
task_type="worker",
task_id=0)
self.assertEqual(self._device_filters, ["/job:worker/task:0", "/job:ps"])
self.assertEqual(self._intra_op_parallelism_threads, 2)
self.assertEqual(self._inter_op_parallelism_threads, 0)
def test_eval_strategy_configure(self):
cluster_spec = {"evaluator": ["localhost:0"]}
tf_config = {"cluster": cluster_spec}
with test.mock.patch.dict("os.environ",
{"TF_CONFIG": json.dumps(tf_config)}):
distribute_coordinator.run_distribute_coordinator(
lambda _: None,
MockStrategy(between_graph=False),
eval_fn=self._worker_fn,
eval_strategy=MockStrategy(between_graph=True),
mode=INDEPENDENT_WORKER,
cluster_spec=cluster_spec,
task_type="evaluator",
task_id=0)
self.assertEqual(self._device_filters, ["/job:somejob"])
self.assertEqual(self._intra_op_parallelism_threads, 0)
self.assertEqual(self._inter_op_parallelism_threads, 2)
class RunStandardTensorflowServerTest(test.TestCase):
def test_std_server_arguments(self):
cs = {"worker": ["fake_worker"], "ps": ["fake_ps"]}
tf_config = {"cluster": cs, "task": {"type": "ps", "id": 0}}
def _mock_run_std_server(cluster_spec=None,
task_type=None,
task_id=None,
session_config=None,
rpc_layer=None):
self.assertEqual(cluster_spec.as_dict(), cs)
self.assertEqual(task_type, "ps")
self.assertEqual(task_id, 0)
self.assertEqual(session_config.experimental.collective_group_leader,
"/job:worker/replica:0/task:0")
self.assertEqual(session_config.intra_op_parallelism_threads, 1)
self.assertEqual(rpc_layer, "grpc")
return MockServer()
with test.mock.patch.dict(
"os.environ",
{"TF_CONFIG": json.dumps(tf_config)}), test.mock.patch.object(
distribute_coordinator, "_run_std_server", _mock_run_std_server):
session_config = config_pb2.ConfigProto()
session_config.intra_op_parallelism_threads = 1
mock_server = distribute_coordinator.run_standard_tensorflow_server(
session_config)
self.assertTrue(mock_server.started)
if __name__ == "__main__":
# TODO(yuefengz): find a smart way to terminite std server threads.
with test.mock.patch.object(sys, "exit", os._exit):
# Reduce `recovery_wait_secs` from 30 seconds so the test completes quickly.
orig_init = session_manager.SessionManager.__init__
def new_init(*args, **kwargs):
kwargs.pop("recovery_wait_secs", None)
kwargs["recovery_wait_secs"] = 0.5
orig_init(*args, **kwargs)
session_manager.SessionManager.__init__ = new_init
test.main()
|
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from base64 import b64encode, b64decode
import json
import sys
from .TProtocol import TProtocolBase, TProtocolException
from thrift.Thrift import TType
JSON_OBJECT_START = b'{'
JSON_OBJECT_END = b'}'
JSON_ARRAY_START = b'['
JSON_ARRAY_END = b']'
JSON_NEW_LINE = b'\n'
JSON_PAIR_SEPARATOR = b':'
JSON_ELEM_SEPARATOR = b','
JSON_BACKSLASH = b'\\'
JSON_STRING_DELIMITER = b'"'
JSON_ZERO_CHAR = b'0'
JSON_TAB = b" "
JSON_CARRIAGE_RETURN = b'\r'
JSON_SPACE = b' '
TAB = b'\t'
JSON_ESCAPE_CHAR = b'u'
JSON_ESCAPE_PREFIX = b"\\u00"
THRIFT_VERSION_1 = 1
THRIFT_NAN = b"NaN"
THRIFT_INFINITY = b"Infinity"
THRIFT_NEGATIVE_INFINITY = b"-Infinity"
JSON_CHAR_TABLE = [ \
# 0 1 2 3 4 5 6 7 8 9 A B C D E F
0, 0, 0, 0, 0, 0, 0, 0,b'b',b't',b'n', 0,b'f',b'r', 0, 0, \
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, \
1, 1,b'"', 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, \
]
ESCAPE_CHARS = b"\"\\bfnrt"
ESCAPE_CHAR_VALS = [b'"', b'\\', b'\b', b'\f', b'\n', b'\r', b'\t']
NUMERIC_CHAR = b'+-.0123456789Ee'
def hexChar(x):
x &= 0x0f
return hex(x)[2:]
def hexVal(ch):
if ch >= '0' and ch <= '9':
return int(ch) - int('0')
elif ch >= 'a' and ch <= 'f':
return int(ch) - int('a') + 10
raise TProtocolException(TProtocolException.INVALID_DATA,
"Unexpected hex value")
class TJSONContext:
def __init__(self, protocol, indentLevel=0):
self.indentLevel = indentLevel
self.protocol = protocol
def write(self, trans):
return
def read(self, reader):
return
def escapeNum(self):
return False
def writeNewLine(self, trans):
trans.write(JSON_NEW_LINE)
self.indent(trans)
def indent(self, trans):
for i in range(self.indentLevel):
trans.write(JSON_TAB)
class TJSONPairContext(TJSONContext):
def __init__(self, protocol, indentLevel=0, isMapPair=False):
TJSONContext.__init__(self, protocol, indentLevel)
self.first = True
self.colon = True
self.isMapPair = isMapPair
self.skipColon = False
def write(self, trans):
if self.first:
self.first = False
self.colon = True
else:
if self.colon:
trans.write(JSON_PAIR_SEPARATOR + b" ")
else:
trans.write(JSON_ELEM_SEPARATOR)
if self.isMapPair:
self.writeNewLine(trans)
self.colon = not self.colon
def read(self, reader):
if self.first:
self.first = False
self.colon = True
else:
self.protocol.skipWhitespace()
if self.colon:
if self.skipColon:
self.skipColon = False
else:
self.protocol.readJSONSyntaxChar(JSON_PAIR_SEPARATOR)
else:
self.protocol.readJSONSyntaxChar(JSON_ELEM_SEPARATOR)
self.colon = not self.colon
def escapeNum(self):
return self.colon
class TJSONListContext(TJSONContext):
def __init__(self, protocol, indentLevel=0):
TJSONContext.__init__(self, protocol, indentLevel)
self.first = True
def read(self, reader):
if self.first:
self.first = False
else:
self.protocol.skipWhitespace()
self.protocol.readJSONSyntaxChar(JSON_ELEM_SEPARATOR)
def write(self, trans):
if self.first:
self.first = False
else:
trans.write(JSON_ELEM_SEPARATOR)
self.writeNewLine(trans)
class LookaheadReader():
def __init__(self, protocol):
self.protocol = protocol
self.hasData = False
self.data = b''
def read(self):
if self.hasData is True:
self.hasData = False
else:
self.data = self.protocol.trans.read(1)
return self.data
def peek(self):
if self.hasData is False:
self.data = self.protocol.trans.read(1)
self.hasData = True
return self.data
class ThriftSpec():
def __init__(self, spec):
self.spec = spec
self.nextSpec = None
class StructSpec(ThriftSpec):
'''
Wraps thrift_spec of a thrift struct.
'''
def readFieldBegin(self, fname, guess_func):
field_spec = None
self.nextSpec = None
if sys.version_info[0] >= 3:
fname = fname.decode()
for s in self.spec:
if s is not None and s[2] == fname:
field_spec = s
break
if field_spec is not None:
if field_spec[1] == TType.STRUCT:
self.nextSpec = StructSpec(field_spec[3][1])
elif field_spec[1] in (TType.SET, TType.LIST):
self.nextSpec = ListOrSetSpec(field_spec[3])
elif field_spec[1] == TType.MAP:
self.nextSpec = MapSpec(field_spec[3])
return (fname, field_spec[1], field_spec[0])
else:
return (fname, guess_func(), 0)
def getNextSpec(self):
return self.nextSpec
class ListOrSetSpec(ThriftSpec):
'''Wraps a list or set's 2-tuple nested type spec.
getNextSpec is called in readListBegin to *prepare* the spec of
the list element which may/may not be used depending on whether
the list is empty.
For example, to read list<SomeStruct> the following methods will
be called:
readListBegin()
readStructBegin()
readStructEnd()
...
readListEnd()
After readListBegin is called the current spec is still
ListOrSetSpec and its nextSpec is prepared for its element.
readStructBegin/End will push/pop the element's StructSpec
whenever a SomeStruct is read.
-1 tells the generated code that the size of this list is
undetermined so it needs to use peekList to detect the end of
the list.
'''
def readListBegin(self):
self.getNextSpec()
return (self.spec[0], -1)
readSetBegin = readListBegin
def getNextSpec(self):
if self.nextSpec is None:
if self.spec[0] == TType.STRUCT:
self.nextSpec = StructSpec(self.spec[1][1])
elif self.spec[0] in (TType.LIST, TType.SET):
self.nextSpec = ListOrSetSpec(self.spec[1])
elif self.spec[0] == TType.MAP:
self.nextSpec = MapSpec(self.spec[1])
return self.nextSpec
class MapSpec(ThriftSpec):
'''Wraps a map's 4-tuple key/vale type spec.
'''
def __init__(self, spec):
ThriftSpec.__init__(self, spec)
self.key = True
self.keySpec = None
if self.spec[1] is not None:
if self.spec[0] == TType.STRUCT:
self.keySpec = StructSpec(self.spec[1][1])
elif self.spec[0] in (TType.LIST, TType.SET):
self.keySpec = ListOrSetSpec(self.spec[1])
elif self.spec[0] == TType.MAP:
self.keySpec = MapSpec(self.spec[1])
self.valueSpec = None
if self.spec[3] is not None:
if self.spec[2] == TType.STRUCT:
self.valueSpec = StructSpec(self.spec[3][1])
elif self.spec[2] in (TType.LIST, TType.SET):
self.valueSpec = ListOrSetSpec(self.spec[3])
elif self.spec[2] == TType.MAP:
self.valueSpec = MapSpec(self.spec[3])
def readMapBegin(self):
self.getNextSpec()
return (self.spec[0], self.spec[2], -1)
def getNextSpec(self):
if self.keySpec is not None and self.valueSpec is not None:
self.nextSpec = self.keySpec if self.key is True else \
self.valueSpec
self.key = not self.key
else:
self.nextSpec = self.keySpec if self.keySpec is not None else \
self.valueSpec
return self.nextSpec
class TSimpleJSONProtocolBase(TProtocolBase):
def __init__(self, trans, spec=None):
TProtocolBase.__init__(self, trans)
# Used as stack for contexts.
self.contexts = [TJSONContext(protocol=self)]
self.context = TJSONContext(protocol=self)
self.reader = LookaheadReader(self)
self.specs = []
self.spec = StructSpec(spec)
def pushContext(self, newContext):
self.contexts.append(self.context)
self.context = newContext
def popContext(self):
if len(self.contexts) > 0:
self.context = self.contexts.pop()
def pushSpec(self, newSpec):
self.specs.append(self.spec)
self.spec = newSpec
def popSpec(self):
if len(self.specs) > 0:
self.spec = self.specs.pop()
def skipWhitespace(self):
skipped = 0
while True:
ch = self.reader.peek()
if not ch in (JSON_NEW_LINE,
TAB,
JSON_CARRIAGE_RETURN,
JSON_SPACE):
break
self.reader.read()
skipped += 1
return skipped
def guessTypeIdFromFirstByte(self):
self.skipWhitespace()
self.readJSONSyntaxChar(JSON_PAIR_SEPARATOR)
self.context.skipColon = True
self.skipWhitespace()
byte = self.reader.peek()
if byte == JSON_OBJECT_END or byte == JSON_ARRAY_END:
return TType.STOP
elif byte == JSON_STRING_DELIMITER:
return TType.STRING
elif byte == JSON_OBJECT_START:
return TType.STRUCT
elif byte == JSON_ARRAY_START:
return TType.LIST
elif byte == b't' or byte == b'f':
return TType.BOOL
elif byte in (b'+', b'-', b'0', b'1', b'2', b'3', b'4', b'5',
b'6', b'7', b'8', b'9'):
return TType.DOUBLE
else:
raise TProtocolException(TProtocolException.INVALID_DATA,
"Unrecognized byte: {}".format(byte))
def writeJSONEscapeChar(self, ch):
self.trans.write(JSON_ESCAPE_PREFIX)
self.trans.write(hexChar(ch >> 4))
self.trans.write(hexChar(ch))
def writeJSONChar(self, ch):
charValue = ord(ch) if not isinstance(ch, int) else ch
ch = chr(ch) if isinstance(ch, int) else ch
if charValue >= 0x30:
if ch == JSON_BACKSLASH: # Only special character >= 0x30 is '\'.
self.trans.write(JSON_BACKSLASH)
self.trans.write(JSON_BACKSLASH)
else:
self.trans.write(ch)
else:
outCh = JSON_CHAR_TABLE[charValue]
if outCh == 1:
self.trans.write(ch)
elif outCh:
self.trans.write(JSON_BACKSLASH)
self.trans.write(outCh)
else:
self.writeJSONEscapeChar(charValue)
def writeJSONString(self, outStr):
self.context.write(self.trans)
self.trans.write(JSON_STRING_DELIMITER)
is_bytes_type = isinstance(outStr, bytes)
for i in range(len(outStr)):
# Slicing of bytes in Py3 produces bytes!
ch = outStr[i:(i + 1)] if is_bytes_type else outStr[i]
self.writeJSONChar(ch)
self.trans.write(JSON_STRING_DELIMITER)
def writeJSONBase64(self, outStr):
self.context.write(self.trans)
self.trans.write(JSON_STRING_DELIMITER)
b64Str = b64encode(outStr)
self.trans.write(b64Str)
self.trans.write(JSON_STRING_DELIMITER)
def writeJSONInteger(self, num):
self.context.write(self.trans)
escapeNum = self.context.escapeNum()
numStr = str(num)
if escapeNum:
self.trans.write(JSON_STRING_DELIMITER)
self.trans.write(numStr)
if escapeNum:
self.trans.write(JSON_STRING_DELIMITER)
def writeJSONBool(self, boolVal):
self.context.write(self.trans)
if self.context.escapeNum():
self.trans.write(JSON_STRING_DELIMITER)
if boolVal:
self.trans.write(b"true")
else:
self.trans.write(b"false")
if self.context.escapeNum():
self.trans.write(JSON_STRING_DELIMITER)
def writeJSONDouble(self, num):
self.context.write(self.trans)
numStr = str(num)
special = False
if numStr == "nan":
numStr = THRIFT_NAN
special = True
elif numStr == "inf":
numStr = THRIFT_INFINITY
special = True
elif numStr == "-inf":
numStr = THRIFT_NEGATIVE_INFINITY
special = True
escapeNum = special or self.context.escapeNum()
if escapeNum:
self.trans.write(JSON_STRING_DELIMITER)
self.trans.write(numStr)
if escapeNum:
self.trans.write(JSON_STRING_DELIMITER)
def writeJSONObjectStart(self):
self.context.write(self.trans)
self.trans.write(JSON_OBJECT_START)
self.pushContext(TJSONPairContext(protocol=self,
indentLevel=len(self.contexts)))
def writeJSONObjectEnd(self):
self.popContext()
self.context.writeNewLine(self.trans)
self.trans.write(JSON_OBJECT_END)
def writeJSONArrayStart(self):
self.context.write(self.trans)
self.trans.write(JSON_ARRAY_START)
self.pushContext(TJSONListContext(protocol=self,
indentLevel=len(self.contexts)))
def writeJSONArrayEnd(self):
self.popContext()
self.context.writeNewLine(self.trans)
self.trans.write(JSON_ARRAY_END)
def writeJSONMapStart(self):
self.context.write(self.trans)
self.trans.write(JSON_OBJECT_START)
self.pushContext(TJSONListContext(protocol=self,
indentLevel=len(self.contexts)))
def writeJSONMapEnd(self):
self.popContext()
self.context.writeNewLine(self.trans)
self.trans.write(JSON_OBJECT_END)
def readJSONSyntaxChar(self, char):
ch = self.reader.read()
if ch != char:
raise TProtocolException(TProtocolException.INVALID_DATA,
"Unexpected character: %s" % ch)
def readJSONString(self, skipContext=False):
self.skipWhitespace()
if skipContext is False:
self.context.read(self.reader)
self.skipWhitespace()
self.readJSONSyntaxChar(JSON_STRING_DELIMITER)
string = []
while True:
ch = self.reader.read()
if ch == JSON_STRING_DELIMITER:
break
if ch == JSON_BACKSLASH:
ch = self.reader.read()
if ch == b'u':
self.readJSONSyntaxChar(JSON_ZERO_CHAR)
self.readJSONSyntaxChar(JSON_ZERO_CHAR)
data = self.trans.read(2)
if sys.version_info[0] >= 3 and isinstance(data, bytes):
ch = json.JSONDecoder().decode(
'"\\u00%s"' % str(data, 'utf-8'))
else:
ch = json.JSONDecoder().decode('"\\u00%s"' % data)
else:
idx = ESCAPE_CHARS.find(ch)
if idx == -1:
raise TProtocolException(
TProtocolException.INVALID_DATA,
"Expected control char")
ch = ESCAPE_CHAR_VALS[idx]
string.append(ch)
return b''.join(string)
def isJSONNumeric(self, ch):
return NUMERIC_CHAR.find(ch) >= 0
def readJSONNumericChars(self):
numeric = []
while True:
ch = self.reader.peek()
if self.isJSONNumeric(ch) is False:
break
numeric.append(self.reader.read())
return b''.join(numeric)
def readJSONInteger(self):
self.context.read(self.reader)
self.skipWhitespace()
if self.context.escapeNum():
self.readJSONSyntaxChar(JSON_STRING_DELIMITER)
numeric = self.readJSONNumericChars()
if self.context.escapeNum():
self.readJSONSyntaxChar(JSON_STRING_DELIMITER)
try:
return int(numeric)
except ValueError:
raise TProtocolException(TProtocolException.INVALID_DATA,
"Bad data encounted in numeric data")
def readJSONDouble(self):
self.context.read(self.reader)
self.skipWhitespace()
if self.reader.peek() == JSON_STRING_DELIMITER:
string = self.readJSONString(True)
try:
double = float(string)
if (self.context.escapeNum is False and
double != float('inf') and
double != float('-inf') and
double != float('nan')
):
raise TProtocolException(TProtocolException.INVALID_DATA,
"Numeric data unexpectedly quoted")
return double
except ValueError:
raise TProtocolException(TProtocolException.INVALID_DATA,
"Bad data encountered in numeric data")
else:
try:
return float(self.readJSONNumericChars())
except ValueError:
raise TProtocolException(TProtocolException.INVALID_DATA,
"Bad data encountered in numeric data")
def readJSONBase64(self):
string = self.readJSONString()
return b64decode(string)
def readJSONBool(self):
self.context.read(self.reader)
self.skipWhitespace()
if self.context.escapeNum():
self.readJSONSyntaxChar(JSON_STRING_DELIMITER)
if self.reader.peek() == b't':
true_string = b'true'
for i in range(4):
if self.reader.read() != true_string[i:i+1]:
raise TProtocolException(TProtocolException.INVALID_DATA,
"Bad data encountered in bool")
boolVal = True
elif self.reader.peek() == b'f':
false_string = b'false'
for i in range(5):
if self.reader.read() != false_string[i:i+1]:
raise TProtocolException(TProtocolException.INVALID_DATA,
"Bad data encountered in bool")
boolVal = False
else:
raise TProtocolException(TProtocolException.INVALID_DATA,
"Bad data encountered in bool")
if self.context.escapeNum():
self.readJSONSyntaxChar(JSON_STRING_DELIMITER)
return boolVal
def readJSONArrayStart(self):
self.context.read(self.reader)
self.skipWhitespace()
self.readJSONSyntaxChar(JSON_ARRAY_START)
self.pushContext(TJSONListContext(protocol=self,
indentLevel=len(self.contexts)))
def readJSONArrayEnd(self):
self.popContext()
self.skipWhitespace()
self.readJSONSyntaxChar(JSON_ARRAY_END)
def readJSONMapStart(self):
self.context.read(self.reader)
self.skipWhitespace()
self.readJSONSyntaxChar(JSON_OBJECT_START)
self.pushContext(TJSONListContext(protocol=self,
indentLevel=len(self.contexts)))
def readJSONMapEnd(self):
self.popContext()
self.skipWhitespace()
self.readJSONSyntaxChar(JSON_OBJECT_END)
def readJSONObjectStart(self):
self.context.read(self.reader)
self.skipWhitespace()
self.readJSONSyntaxChar(JSON_OBJECT_START)
self.pushContext(TJSONPairContext(protocol=self,
indentLevel=len(self.contexts)))
def readJSONObjectEnd(self):
self.popContext()
self.skipWhitespace()
self.readJSONSyntaxChar(JSON_OBJECT_END)
class TSimpleJSONProtocol(TSimpleJSONProtocolBase):
"""
JSON protocol implementation for Thrift. This protocol is write-only, and
produces a simple output format that conforms to the JSON standard.
"""
def writeMessageBegin(self, name, messageType, seqId):
self.writeJSONArrayStart()
self.context.writeNewLine(self.trans)
self.writeJSONInteger(THRIFT_VERSION_1)
self.writeJSONString(name)
self.writeJSONInteger(messageType)
self.writeJSONInteger(seqId)
def writeMessageEnd(self):
self.writeJSONArrayEnd()
def writeStructBegin(self, name):
self.writeJSONObjectStart()
def writeStructEnd(self):
self.writeJSONObjectEnd()
def writeFieldBegin(self, name, fieldType, fieldId):
self.context.write(self.trans)
self.popContext()
self.pushContext(TJSONPairContext(protocol=self,
indentLevel=len(self.contexts)))
self.context.writeNewLine(self.trans)
self.writeJSONString(name)
def writeFieldEnd(self):
return
def writeFieldStop(self):
return
def writeMapBegin(self, keyType, valType, size):
self.writeJSONMapStart()
self.context.writeNewLine(self.trans)
self.pushContext(TJSONPairContext(protocol=self,
indentLevel=len(self.contexts) - 1, isMapPair=True))
def writeMapEnd(self):
self.popContext()
self.writeJSONMapEnd()
def writeListBegin(self, elemType, size):
self.writeJSONArrayStart()
self.context.writeNewLine(self.trans)
def writeListEnd(self):
self.writeJSONArrayEnd()
def writeSetBegin(self, elemType, size):
self.writeJSONArrayStart()
self.context.writeNewLine(self.trans)
def writeSetEnd(self):
self.writeJSONArrayEnd()
def writeBool(self, val):
self.writeJSONBool(val)
def writeByte(self, byte):
self.writeJSONInteger(byte)
def writeI16(self, i16):
self.writeJSONInteger(i16)
def writeI32(self, i32):
self.writeJSONInteger(i32)
def writeI64(self, i64):
self.writeJSONInteger(i64)
def writeDouble(self, d):
self.writeJSONDouble(d)
def writeFloat(self, f):
self.writeJSONDouble(f)
def writeString(self, outStr):
self.writeJSONString(outStr)
def writeBinary(self, outStr):
self.writeJSONBase64(outStr)
def readMessageBegin(self):
self.readJSONArrayStart()
self.skipWhitespace()
if self.readJSONInteger() != THRIFT_VERSION_1:
raise TProtocolException(TProtocolException.BAD_VERSION,
"Message contained bad version.")
name = self.readJSONString()
mtype = self.readJSONInteger()
seqid = self.readJSONInteger()
return (name, mtype, seqid)
def readMessageEnd(self):
self.readJSONArrayEnd()
def readStructBegin(self):
self.readJSONObjectStart()
# This is needed because of the very first call
if self.spec.nextSpec is not None:
self.pushSpec(self.spec.getNextSpec())
def readStructEnd(self):
self.readJSONObjectEnd()
self.popSpec()
def readFieldBegin(self):
self.skipWhitespace()
ch = self.reader.peek()
if ch == JSON_OBJECT_END:
return (None, TType.STOP, 0)
self.context.read(self.reader)
self.popContext()
self.pushContext(TJSONPairContext(protocol=self,
indentLevel=len(self.contexts)))
self.skipWhitespace()
fname = self.readJSONString()
assert isinstance(self.spec, StructSpec)
return self.spec.readFieldBegin(
fname,
self.guessTypeIdFromFirstByte)
def readFieldEnd(self):
return
def readFieldStop(self):
return
def readNumber(self):
return self.readJSONInteger()
readByte = readNumber
readI16 = readNumber
readI32 = readNumber
readI64 = readNumber
def readDouble(self):
return self.readJSONDouble()
def readFloat(self):
return self.readJSONDouble()
def readString(self):
return self.readJSONString()
def readBinary(self):
return self.readJSONBase64()
def readBool(self):
return self.readJSONBool()
def readMapBegin(self):
self.readJSONMapStart()
self.skipWhitespace()
self.pushContext(TJSONPairContext(protocol=self,
indentLevel=len(self.contexts) - 1, isMapPair=True))
self.pushSpec(self.spec.getNextSpec())
return self.spec.readMapBegin()
def readMapEnd(self):
self.popContext()
self.readJSONMapEnd()
self.popSpec()
def peekMap(self):
self.skipWhitespace()
return self.reader.peek() != JSON_OBJECT_END
def peekList(self):
self.skipWhitespace()
return self.reader.peek() != JSON_ARRAY_END
peekSet = peekList
def readListBegin(self):
self.skipWhitespace()
self.readJSONArrayStart()
self.pushSpec(self.spec.getNextSpec())
return self.spec.readListBegin()
readSetBegin = readListBegin
def readListEnd(self):
self.skipWhitespace()
self.readJSONArrayEnd()
self.popSpec()
readSetEnd = readListEnd
class TSimpleJSONProtocolFactory:
def getProtocol(self, trans, spec=None):
prot = TSimpleJSONProtocol(trans, spec)
return prot
|
|
# Copyright 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Convenience wrapper for invoking APIs/factories w/ a project."""
import os
from gcloud._helpers import _LocalStack
from gcloud._helpers import _determine_default_project as _base_default_project
from gcloud.client import _ClientProjectMixin
from gcloud.client import Client as _BaseClient
from gcloud.datastore import helpers
from gcloud.datastore.connection import Connection
from gcloud.datastore.batch import Batch
from gcloud.datastore.entity import Entity
from gcloud.datastore.key import Key
from gcloud.datastore.query import Query
from gcloud.datastore.transaction import Transaction
from gcloud.environment_vars import GCD_DATASET
_MAX_LOOPS = 128
"""Maximum number of iterations to wait for deferred keys."""
def _get_gcd_project():
"""Gets the GCD application ID if it can be inferred."""
return os.getenv(GCD_DATASET)
def _determine_default_project(project=None):
"""Determine default project explicitly or implicitly as fall-back.
In implicit case, supports four environments. In order of precedence, the
implicit environments are:
* DATASTORE_DATASET environment variable (for ``gcd`` / emulator testing)
* GCLOUD_PROJECT environment variable
* Google App Engine application ID
* Google Compute Engine project ID (from metadata server)
:type project: string
:param project: Optional. The project to use as default.
:rtype: string or ``NoneType``
:returns: Default project if it can be determined.
"""
if project is None:
project = _get_gcd_project()
if project is None:
project = _base_default_project(project=project)
return project
def _extended_lookup(connection, project, key_pbs,
missing=None, deferred=None,
eventual=False, transaction_id=None):
"""Repeat lookup until all keys found (unless stop requested).
Helper function for :meth:`Client.get_multi`.
:type connection: :class:`gcloud.datastore.connection.Connection`
:param connection: The connection used to connect to datastore.
:type project: string
:param project: The project to make the request for.
:type key_pbs: list of :class:`gcloud.datastore._generated.entity_pb2.Key`
:param key_pbs: The keys to retrieve from the datastore.
:type missing: list
:param missing: (Optional) If a list is passed, the key-only entity
protobufs returned by the backend as "missing" will be
copied into it.
:type deferred: list
:param deferred: (Optional) If a list is passed, the key protobufs returned
by the backend as "deferred" will be copied into it.
:type eventual: bool
:param eventual: If False (the default), request ``STRONG`` read
consistency. If True, request ``EVENTUAL`` read
consistency.
:type transaction_id: string
:param transaction_id: If passed, make the request in the scope of
the given transaction. Incompatible with
``eventual==True``.
:rtype: list of :class:`gcloud.datastore._generated.entity_pb2.Entity`
:returns: The requested entities.
:raises: :class:`ValueError` if missing / deferred are not null or
empty list.
"""
if missing is not None and missing != []:
raise ValueError('missing must be None or an empty list')
if deferred is not None and deferred != []:
raise ValueError('deferred must be None or an empty list')
results = []
loop_num = 0
while loop_num < _MAX_LOOPS: # loop against possible deferred.
loop_num += 1
results_found, missing_found, deferred_found = connection.lookup(
project=project,
key_pbs=key_pbs,
eventual=eventual,
transaction_id=transaction_id,
)
results.extend(results_found)
if missing is not None:
missing.extend(missing_found)
if deferred is not None:
deferred.extend(deferred_found)
break
if len(deferred_found) == 0:
break
# We have deferred keys, and the user didn't ask to know about
# them, so retry (but only with the deferred ones).
key_pbs = deferred_found
return results
class Client(_BaseClient, _ClientProjectMixin):
"""Convenience wrapper for invoking APIs/factories w/ a project.
:type project: string
:param project: (optional) The project to pass to proxied API methods.
:type namespace: string
:param namespace: (optional) namespace to pass to proxied API methods.
:type credentials: :class:`oauth2client.client.OAuth2Credentials` or
:class:`NoneType`
:param credentials: The OAuth2 Credentials to use for the connection
owned by this client. If not passed (and if no ``http``
object is passed), falls back to the default inferred
from the environment.
:type http: :class:`httplib2.Http` or class that defines ``request()``.
:param http: An optional HTTP object to make requests. If not passed, an
``http`` object is created that is bound to the
``credentials`` for the current object.
"""
_connection_class = Connection
def __init__(self, project=None, namespace=None,
credentials=None, http=None):
_ClientProjectMixin.__init__(self, project=project)
self.namespace = namespace
self._batch_stack = _LocalStack()
super(Client, self).__init__(credentials, http)
@staticmethod
def _determine_default(project):
return _determine_default_project(project)
def _push_batch(self, batch):
"""Push a batch/transaction onto our stack.
"Protected", intended for use by batch / transaction context mgrs.
:type batch: :class:`gcloud.datastore.batch.Batch`, or an object
implementing its API.
:param batch: newly-active batch/transaction.
"""
self._batch_stack.push(batch)
def _pop_batch(self):
"""Pop a batch/transaction from our stack.
"Protected", intended for use by batch / transaction context mgrs.
:raises: IndexError if the stack is empty.
:rtype: :class:`gcloud.datastore.batch.Batch`, or an object
implementing its API.
:returns: the top-most batch/transaction, after removing it.
"""
return self._batch_stack.pop()
@property
def current_batch(self):
"""Currently-active batch.
:rtype: :class:`gcloud.datastore.batch.Batch`, or an object
implementing its API, or ``NoneType`` (if no batch is active).
:returns: The batch/transaction at the top of the batch stack.
"""
return self._batch_stack.top
@property
def current_transaction(self):
"""Currently-active transaction.
:rtype: :class:`gcloud.datastore.transaction.Transaction`, or an object
implementing its API, or ``NoneType`` (if no transaction is
active).
:returns: The transaction at the top of the batch stack.
"""
transaction = self.current_batch
if isinstance(transaction, Transaction):
return transaction
def get(self, key, missing=None, deferred=None):
"""Retrieve an entity from a single key (if it exists).
.. note::
This is just a thin wrapper over :meth:`get_multi`.
The backend API does not make a distinction between a single key or
multiple keys in a lookup request.
:type key: :class:`gcloud.datastore.key.Key`
:param key: The key to be retrieved from the datastore.
:type missing: list
:param missing: (Optional) If a list is passed, the key-only entities
returned by the backend as "missing" will be copied
into it.
:type deferred: list
:param deferred: (Optional) If a list is passed, the keys returned
by the backend as "deferred" will be copied into it.
:rtype: :class:`gcloud.datastore.entity.Entity` or ``NoneType``
:returns: The requested entity if it exists.
"""
entities = self.get_multi(keys=[key], missing=missing,
deferred=deferred)
if entities:
return entities[0]
def get_multi(self, keys, missing=None, deferred=None):
"""Retrieve entities, along with their attributes.
:type keys: list of :class:`gcloud.datastore.key.Key`
:param keys: The keys to be retrieved from the datastore.
:type missing: list
:param missing: (Optional) If a list is passed, the key-only entities
returned by the backend as "missing" will be copied
into it. If the list is not empty, an error will occur.
:type deferred: list
:param deferred: (Optional) If a list is passed, the keys returned
by the backend as "deferred" will be copied into it.
If the list is not empty, an error will occur.
:rtype: list of :class:`gcloud.datastore.entity.Entity`
:returns: The requested entities.
:raises: :class:`ValueError` if one or more of ``keys`` has a project
which does not match our project.
"""
if not keys:
return []
ids = set(key.project for key in keys)
for current_id in ids:
if current_id != self.project:
raise ValueError('Keys do not match project')
transaction = self.current_transaction
entity_pbs = _extended_lookup(
connection=self.connection,
project=self.project,
key_pbs=[k.to_protobuf() for k in keys],
missing=missing,
deferred=deferred,
transaction_id=transaction and transaction.id,
)
if missing is not None:
missing[:] = [
helpers.entity_from_protobuf(missed_pb)
for missed_pb in missing]
if deferred is not None:
deferred[:] = [
helpers.key_from_protobuf(deferred_pb)
for deferred_pb in deferred]
return [helpers.entity_from_protobuf(entity_pb)
for entity_pb in entity_pbs]
def put(self, entity):
"""Save an entity in the Cloud Datastore.
.. note::
This is just a thin wrapper over :meth:`put_multi`.
The backend API does not make a distinction between a single
entity or multiple entities in a commit request.
:type entity: :class:`gcloud.datastore.entity.Entity`
:param entity: The entity to be saved to the datastore.
"""
self.put_multi(entities=[entity])
def put_multi(self, entities):
"""Save entities in the Cloud Datastore.
:type entities: list of :class:`gcloud.datastore.entity.Entity`
:param entities: The entities to be saved to the datastore.
:raises: :class:`ValueError` if ``entities`` is a single entity.
"""
if isinstance(entities, Entity):
raise ValueError("Pass a sequence of entities")
if not entities:
return
current = self.current_batch
in_batch = current is not None
if not in_batch:
current = self.batch()
for entity in entities:
current.put(entity)
if not in_batch:
current.commit()
def delete(self, key):
"""Delete the key in the Cloud Datastore.
.. note::
This is just a thin wrapper over :meth:`delete_multi`.
The backend API does not make a distinction between a single key or
multiple keys in a commit request.
:type key: :class:`gcloud.datastore.key.Key`
:param key: The key to be deleted from the datastore.
"""
return self.delete_multi(keys=[key])
def delete_multi(self, keys):
"""Delete keys from the Cloud Datastore.
:type keys: list of :class:`gcloud.datastore.key.Key`
:param keys: The keys to be deleted from the datastore.
"""
if not keys:
return
# We allow partial keys to attempt a delete, the backend will fail.
current = self.current_batch
in_batch = current is not None
if not in_batch:
current = self.batch()
for key in keys:
current.delete(key)
if not in_batch:
current.commit()
def allocate_ids(self, incomplete_key, num_ids):
"""Allocate a list of IDs from a partial key.
:type incomplete_key: :class:`gcloud.datastore.key.Key`
:param incomplete_key: Partial key to use as base for allocated IDs.
:type num_ids: int
:param num_ids: The number of IDs to allocate.
:rtype: list of :class:`gcloud.datastore.key.Key`
:returns: The (complete) keys allocated with ``incomplete_key`` as
root.
:raises: :class:`ValueError` if ``incomplete_key`` is not a
partial key.
"""
if not incomplete_key.is_partial:
raise ValueError(('Key is not partial.', incomplete_key))
incomplete_key_pb = incomplete_key.to_protobuf()
incomplete_key_pbs = [incomplete_key_pb] * num_ids
conn = self.connection
allocated_key_pbs = conn.allocate_ids(incomplete_key.project,
incomplete_key_pbs)
allocated_ids = [allocated_key_pb.path[-1].id
for allocated_key_pb in allocated_key_pbs]
return [incomplete_key.completed_key(allocated_id)
for allocated_id in allocated_ids]
def key(self, *path_args, **kwargs):
"""Proxy to :class:`gcloud.datastore.key.Key`.
Passes our ``project``.
"""
if 'project' in kwargs:
raise TypeError('Cannot pass project')
kwargs['project'] = self.project
if 'namespace' not in kwargs:
kwargs['namespace'] = self.namespace
return Key(*path_args, **kwargs)
def batch(self):
"""Proxy to :class:`gcloud.datastore.batch.Batch`."""
return Batch(self)
def transaction(self):
"""Proxy to :class:`gcloud.datastore.transaction.Transaction`."""
return Transaction(self)
def query(self, **kwargs):
"""Proxy to :class:`gcloud.datastore.query.Query`.
Passes our ``project``.
"""
if 'client' in kwargs:
raise TypeError('Cannot pass client')
if 'project' in kwargs:
raise TypeError('Cannot pass project')
kwargs['project'] = self.project
if 'namespace' not in kwargs:
kwargs['namespace'] = self.namespace
return Query(self, **kwargs)
|
|
from flask import Flask
import webmaster.decorators as decorators
import webmaster.core as core
#===============================================================================
# EXTENDS
def test_extends():
def plugin(view, **kwargs):
class SubView(object):
def inner(self):
pass
return SubView
@decorators.plugin(plugin)
class View(core.View):
pass
i = View()
assert hasattr(i, "inner") is True
#===============================================================================
# ROUTE
def test_route():
def deco1():
pass
def deco2():
pass
@decorators.route("/a-section", decorators=[deco1, deco2])
class A(core.View):
@decorators.route("home")
def index(self):
pass
a = A()
a.index()
assert "/a-section" == a.base_route
assert 2 == len(a.decorators)
assert deco1 in a.decorators
assert "home" == A.index._rule_cache["index"][0][0]
def test_methods():
class A(core.View):
@decorators.methods("post")
def path(self):
pass
@decorators.methods("post", "get", "put")
def other(self):
pass
a = A()
a.path()
a.other()
assert "POST" in A.path._methods_cache
assert "PUT" in A.other._methods_cache
assert 3 == len(A.other._methods_cache)
#===============================================================================
# MENU
def test_menu():
menu = decorators.menu
menu.clear()
@menu("Hello World")
class Hello(object):
@menu("Index")
def index(self):
pass
@menu("Page 2")
def index2(self):
pass
@menu("Monster")
class Monster(object):
@menu("Home")
def maggi(self):
pass
h = menu.get(Hello)
assert len(menu.MENU) == 2
def test_menu_with_extends():
menu = decorators.menu
menu.clear()
@menu("Hello World", group_name="admin")
class Hello(object):
@menu("Index")
def index(self):
pass
@menu("Page 2")
def index2(self):
pass
@menu("Monster", extends=Hello)
class Monster(object):
@menu("Home")
def maggi(self):
pass
h = menu.get(Hello)
assert h["kwargs"]["group_name"] == "admin"
assert len(h["sub_menu"]) == 2
def test_menu_with_advanced_extends():
menu = decorators.menu
menu.clear()
@menu("Hello World", group_name="admin")
class Hello(object):
@menu("Index")
def index(self):
pass
@menu("Page 2")
def index2(self):
pass
@menu("Monster", extends=Hello)
class Monster(object):
@menu("Home", extends=Hello)
def maggi(self):
pass
h = menu.get(Hello)
assert len(h["sub_menu"]) == 3
def test_menu_render():
menu = decorators.menu
menu.clear()
app = Flask(__name__)
app.testing = True
@menu("Hello World", group_name="admin")
class Hello(object):
@menu("Index")
def index(self):
pass
@menu("Page 2")
def index2(self):
pass
@menu("Monster")
class Monster(object):
@menu("Home")
def maggi(self):
pass
with app.test_client() as c:
c.get("/")
assert len(menu.render()) == 2
#===============================================================================
# LAYOUT
def test_template():
@decorators.template("layout.html", name="Jone")
class A(core.View):
@decorators.template("index.html", version=1)
def index(self):
return {}
@decorators.template("index2.html", layout="NewLayout.html")
def index2(self):
return {}
@decorators.template("index2.html", layout="NewLayout.html")
def index3(self):
return {
"template_": "other.html",
"layout_": "other-layout.html"
}
a = A()
ai1 = a.index()
ai2 = a.index2()
ai3 = a.index3()
assert "layout.html" == a._template_extends__.get("layout")
assert "layout.html" == a.base_layout
assert "index.html" == ai1["template_"]
assert "index2.html" == ai2["template_"]
assert "NewLayout.html" == ai2["layout_"]
assert "other.html" == ai3["template_"]
assert "other-layout.html" == ai3["layout_"]
assert 1 == ai1["version"]
def test_template_with_extension():
@decorators.template(brand_name="My Admin Zone")
class A(core.View):
pass
@decorators.template("Webmaster/admin/layout.html", extends=A)
class B(core.View):
pass
@decorators.template(extends=B, brand_name="Other")
class C(core.View):
pass
a = A()
b = B()
c = C()
assert "layout.html" in a._template_extends__.get("layout")
assert "Webmaster/admin/layout.html" in b._template_extends__.get("layout")
assert "My Admin Zone" in b._template_extends__.get("brand_name")
assert "Other" in c._template_extends__.get("brand_name")
#===============================================================================
# RENDER AS
def test_render_as_json():
import json
app = Flask(__name__)
app.testing = True
@app.route("/")
@decorators.render_as_json
def index():
return {"test": "ok"}
with app.test_client() as c:
assert {"test": "ok"} == json.loads(c.get("/").data)
def test_render_as_xml():
app = Flask(__name__)
app.testing = True
@app.route("/")
@decorators.render_as_xml
def index():
return {"test": "ok"}
with app.test_client() as c:
data = c.get("/").data
assert '<?xml version="1.0"' in data
|
|
import unittest
import pyCGM_Single.pyCGM as pyCGM
import numpy as np
import pytest
rounding_precision = 6
class TestPycgmAngle():
"""
This class tests the functions used for getting angles in pyCGM.py:
getangle_sho
getangle_spi
getangle
getHeadangle
getPelangle
"""
@pytest.mark.parametrize(["xRot", "yRot", "zRot", "expected"], [
(0, 0, 0, [0, 0, 0]),
# X rotations
(90, 0, 0, [0, 90, 0]), (30, 0, 0, [0, 30, 0]), (-30, 0, 0, [0, -30, 0]), (120, 0, 0, [0, 120, 0]),
(-120, 0, 0, [0, -120, 0]), (180, 0, 0, [0, 180, 0]),
# Y rotations
(0, 90, 0, [90, 0, 0]), (0, 30, 0, [30, 0, 0]), (0, -30, 0, [-30, 0, 0]), (0, 120, 0, [60, -180, -180]),
(0, -120, 0, [-60, -180, -180]), (0, 180, 0, [0, -180, -180]),
# Z rotations
(0, 0, 90, [0, 0, 90]), (0, 0, 30, [0, 0, 30]), (0, 0, -30, [0, 0, -30]), (0, 0, 120, [0, 0, 120]),
(0, 0, -120, [0, 0, -120]), (0, 0, 180, [0, 0, 180]),
# Multiple Rotations
(150, 30, 0, [30, 150, 0]), (45, 0, 60, [0, 45, 60]), (0, 90, 120, [90, 0, 120]), (135, 45, 90, [45, 135, 90])
])
def test_getangle_sho(self, xRot, yRot, zRot, expected):
"""
This test provides coverage of the getangle_sho function in pyCGM.py,
defined as getangle_sho(axisP,axisD) where axisP is the proximal axis and axisD is the distal axis.
getangle_sho takes in as input two axes, axisP and axisD, and returns in degrees, the Euler angle
rotations required to rotate axisP to axisD as a list [alpha, beta, gamma]. getangle_sho uses the XYZ
order Euler rotations to calculate the angles. The rotation matrix is obtained by directly comparing
the vectors in axisP to those in axisD through dot products between different components
of each axis. axisP and axisD each have 3 components to their axis, x, y, and z.
The angles are calculated as follows:
.. math::
\[ \alpha = \arcsin{(axisD_{z} \cdot axisP_{x})} \]
\[ \beta = \arctan2{(-(axisD_{z} \cdot axisP_{y}), axisD_{z} \cdot axisP_{z})} \]
\[ \gamma = \arctan2{(-(axisD_{y} \cdot axisP_{x}), axisD_{x} \cdot axisP_{x})} \]
This test calls pyCGM.rotmat() to create axisP with an x, y, and z rotation defined in the parameters.
It then calls pyCGM.getangle_sho() with axisP and axisD, which was created with no rotation in the
x, y or z direction. This result is then compared to the expected result. The results from this test will
be in the YXZ order, meaning that a parameter with an inputed x rotation will have a result with the same
angle in the y direction. The only exception to this is a 120, -120, or 180 degree Y rotation. These will end
up with a 60, -60, and 0 degree angle in the X direction respectively, and with a -180 degree
angle in the y and z direction.
"""
# Create axisP as a rotatinal matrix using the x, y, and z rotations given in testcase
axisP = pyCGM.rotmat(xRot, yRot, zRot)
axisD = pyCGM.rotmat(0, 0, 0)
result = pyCGM.getangle_sho(axisP, axisD)
np.testing.assert_almost_equal(result, expected, rounding_precision)
def test_getangle_sho_datatypes(self):
"""
This test provides coverage of the getangle_sho function in pyCGM.py, defined as getangle_sho(axisP,axisD).
It checks that the resulting output from calling getangle_sho is correct for a list of ints, a numpy array of
ints, a list of floats, and a numpy array of floats.
"""
axisD = pyCGM.rotmat(0, 0, 0)
axisP_floats = pyCGM.rotmat(90, 0, 90)
axisP_ints = [[int(y) for y in x] for x in axisP_floats]
expected = [0, 90, 90]
# Check that calling getangle_sho on a list of ints yields the expected results
result_int_list = pyCGM.getangle_sho(axisP_ints, axisD)
np.testing.assert_almost_equal(result_int_list, expected, rounding_precision)
# Check that calling getangle_sho on a numpy array of ints yields the expected results
result_int_nparray = pyCGM.getangle_sho(np.array(axisP_ints, dtype='int'), np.array(axisD, dtype='int'))
np.testing.assert_almost_equal(result_int_nparray, expected, rounding_precision)
# Check that calling getangle_sho on a list of floats yields the expected results
result_float_list = pyCGM.getangle_sho(axisP_floats, axisD)
np.testing.assert_almost_equal(result_float_list, expected, rounding_precision)
# Check that calling getangle_sho on a numpy array of floats yields the expected results
result_float_nparray = pyCGM.getangle_sho(np.array(axisP_floats, dtype='float'), np.array(axisD, dtype='float'))
np.testing.assert_almost_equal(result_float_nparray, expected, rounding_precision)
@pytest.mark.parametrize(["xRot", "yRot", "zRot", "expected"], [
(0, 0, 0, [0, 0, 0]),
# X rotations
(90, 0, 0, [0, 0, 90]), (30, 0, 0, [0, 0, 30]), (-30, 0, 0, [0, 0, -30]), (120, 0, 0, [0, 0, 60]), (-120, 0, 0, [0, 0, -60]), (180, 0, 0, [0, 0, 0]),
# Y rotations
(0, 90, 0, [90, 0, 0]), (0, 30, 0, [30, 0, 0]), (0, -30, 0, [-30, 0, 0]), (0, 120, 0, [60, 0, 0]), (0, -120, 0, [-60, 0, 0]), (0, 180, 0, [0, 0, 0]),
# Z rotations
(0, 0, 90, [0, 90, 0]), (0, 0, 30, [0, 30, 0]), (0, 0, -30, [0, -30, 0]), (0, 0, 120, [0, 60, 0]), (0, 0, -120, [0, -60, 0]), (0, 0, 180, [0, 0, 0]),
# Multiple Rotations
(150, 30, 0, [-30, 0, 30]), (45, 0, 60, [-40.89339465, 67.7923457, 20.70481105]), (0, 90, 120, [-90, 0, 60]), (135, 45, 90, [-54.73561032, 54.73561032, -30])
])
def test_getangle_spi(self, xRot, yRot, zRot, expected):
"""
This test provides coverage of the getangle_spi function in pyCGM.py,
defined as getangle_spi(axisP,axisD) where axisP is the proximal axis and axisD is the distal axis
getangle_spi takes in as input two axes, axisP and axisD, and returns in degrees, the Euler angle
rotations required to rotate axisP to axisD as a list [beta, gamma, alpha]. getangle_spi uses the XZX
order of Euler rotations to calculate the angles. The rotation matrix is obtained by directly comparing
the vectors in axisP to those in axisD through dot products between different components
of each axis. axisP and axisD each have 3 components to their axis, x, y, and z.
The angles are calculated as follows:
.. math::
\[ alpha = \arcsin{(axisD_{y} \cdot axisP_{z})} \]
\[ gamma = \arcsin{(-(axisD_{y} \cdot axisP_{x}) / \cos{\alpha})} \]
\[ beta = \arcsin{(-(axisD_{x} \cdot axisP_{z}) / \cos{\alpha})} \]
This test calls pyCGM.rotmat() to create axisP with an x, y, and z rotation defined in the parameters.
It then calls pyCGM.getangle_spi() with axisP and axisD, which was created with no rotation in the
x, y or z direction. This result is then compared to the expected result. The results from this test will
be in the YZX order, meaning that a parameter with an inputed x rotation will have a result with the same
angle in the z direction. The only exception to this is a 120, -120, or 180 degree Y rotation. The exception
to this is that 120, -120, and 180 degree rotations end up with 60, -60, and 0 degree angles respectively.
"""
# Create axisP as a rotatinal matrix using the x, y, and z rotations given in testcase
axisP = pyCGM.rotmat(xRot, yRot, zRot)
axisD = pyCGM.rotmat(0, 0, 0)
result = pyCGM.getangle_spi(axisP, axisD)
np.testing.assert_almost_equal(result, expected, rounding_precision)
def test_getangle_spi_datatypes(self):
"""
This test provides coverage of the getangle_spi function in pyCGM.py, defined as getangle_spi(axisP,axisD).
It checks that the resulting output from calling getangle_spi is correct for a list of ints, a numpy array of
ints, a list of floats, and a numpy array of floats.
"""
axisD = pyCGM.rotmat(0, 0, 0)
axisP_floats = pyCGM.rotmat(90, 0, 90)
axisP_ints = [[int(y) for y in x] for x in axisP_floats]
expected = [-90, 90, 0]
# Check that calling getangle_spi on a list of ints yields the expected results
result_int_list = pyCGM.getangle_spi(axisP_ints, axisD)
np.testing.assert_almost_equal(result_int_list, expected, rounding_precision)
# Check that calling getangle_spi on a numpy array of ints yields the expected results
result_int_nparray = pyCGM.getangle_spi(np.array(axisP_ints, dtype='int'), np.array(axisD, dtype='int'))
np.testing.assert_almost_equal(result_int_nparray, expected, rounding_precision)
# Check that calling getangle_spi on a list of floats yields the expected results
result_float_list = pyCGM.getangle_spi(axisP_floats, axisD)
np.testing.assert_almost_equal(result_float_list, expected, rounding_precision)
# Check that calling getangle_spi on a numpy array of floats yields the expected results
result_float_nparray = pyCGM.getangle_spi(np.array(axisP_floats, dtype='float'), np.array(axisD, dtype='float'))
np.testing.assert_almost_equal(result_float_nparray, expected, rounding_precision)
@pytest.mark.parametrize(["xRot", "yRot", "zRot", "expected"], [
(0, 0, 0, [0, 0, 90]),
# X rotations
(90, 0, 0, [0, 90, 90]), (30, 0, 0, [0, 30, 90]), (-30, 0, 0, [0, -30, 90]), (120, 0, 0, [180, 60, -90]), (-120, 0, 0, [180, -60, -90]), (180, 0, 0, [180, 0, -90]),
# Y rotations
(0, 90, 0, [90, 0, 90]), (0, 30, 0, [30, 0, 90]), (0, -30, 0, [-30, 0, 90]), (0, 120, 0, [120, 0, 90]), (0, -120, 0, [-120, 0, 90]), (0, 180, 0, [180, 0, 90]),
# Z rotations
(0, 0, 90, [0, 0, 0]), (0, 0, 30, [0, 0, 60]), (0, 0, -30, [0, 0, 120]), (0, 0, 120, [0, 0, -30]), (0, 0, -120, [0, 0, -150]), (0, 0, 180, [0, 0, -90]),
# Multiple Rotations
(150, 30, 0, [146.30993247, 25.65890627, -73.89788625]), (45, 0, 60, [0, 45, 30]), (0, 90, 120, [90, 0, -30]), (135, 45, 90, [125.26438968, 30, -144.73561032])
])
def test_getangle(self, xRot, yRot, zRot, expected):
"""
This test provides coverage of the getangle function in pyCGM.py,
defined as getangle(axisP,axisD) where axisP is the proximal axis and axisD is the distal axis
getangle takes in as input two axes, axisP and axisD, and returns in degrees, the Euler angle
rotations required to rotate axisP to axisD as a list [beta, alpha, gamma]. getangle uses the YXZ
order of Euler rotations to calculate the angles. The rotation matrix is obtained by directly comparing
the vectors in axisP to those in axisD through dot products between different components
of each axis. axisP and axisD each have 3 components to their axis, x, y, and z. Since arcsin
is being used, the function checks wether the angle alpha is between -pi/2 and pi/2.
The angles are calculated as follows:
.. math::
\[ \alpha = \arcsin{(-axisD_{z} \cdot axisP_{y})} \]
If alpha is between -pi/2 and pi/2
.. math::
\[ \beta = \arctan2{((axisD_{z} \cdot axisP_{x}), axisD_{z} \cdot axisP_{z})} \]
\[ \gamma = \arctan2{((axisD_{y} \cdot axisP_{y}), axisD_{x} \cdot axisP_{y})} \]
Otherwise
.. math::
\[ \beta = \arctan2{(-(axisD_{z} \cdot axisP_{x}), axisD_{z} \cdot axisP_{z})} \]
\[ \gamma = \arctan2{(-(axisD_{y} \cdot axisP_{y}), axisD_{x} \cdot axisP_{y})} \]
This test calls pyCGM.rotmat() to create axisP with an x, y, and z rotation defined in the parameters.
It then calls pyCGM.getangle() with axisP and axisD, which was created with no rotation in the x, y or z
direction. This result is then compared to the expected result. The results from this test will be in the
YXZ order, meaning that a parameter with an inputed x rotation will have a result with the same angle in
the z direction. There is also an additional 90 degree angle in the z direction if there was no z rotation.
If there was a z rotation than there will be a different angle in the z direction. A z rotation of 90, 30, -30,
120, -120, 180 degrees results in a 0, 60, 120, -30, -150, -90 degree angle in the z direction respectively.
"""
# Create axisP as a rotatinal matrix using the x, y, and z rotations given in testcase
axisP = pyCGM.rotmat(xRot, yRot, zRot)
axisD = pyCGM.rotmat(0, 0, 0)
result = pyCGM.getangle(axisP, axisD)
np.testing.assert_almost_equal(result, expected, rounding_precision)
def test_getangle_datatypes(self):
"""
This test provides coverage of the getangle function in pyCGM.py, defined as getangle(axisP,axisD).
It checks that the resulting output from calling getangle is correct for a list of ints, a numpy array of
ints, a list of floats, and a numpy array of floats.
"""
axisD = pyCGM.rotmat(0, 0, 0)
axisP_floats = pyCGM.rotmat(90, 0, 90)
axisP_ints = [[int(y) for y in x] for x in axisP_floats]
expected = [0, 90, 0]
# Check that calling getangle on a list of ints yields the expected results
result_int_list = pyCGM.getangle(axisP_ints, axisD)
np.testing.assert_almost_equal(result_int_list, expected, rounding_precision)
# Check that calling getangle on a numpy array of ints yields the expected results
result_int_nparray = pyCGM.getangle(np.array(axisP_ints, dtype='int'), np.array(axisD, dtype='int'))
np.testing.assert_almost_equal(result_int_nparray, expected, rounding_precision)
# Check that calling getangle on a list of floats yields the expected results
result_float_list = pyCGM.getangle(axisP_floats, axisD)
np.testing.assert_almost_equal(result_float_list, expected, rounding_precision)
# Check that calling getangle on a numpy array of floats yields the expected results
result_float_nparray = pyCGM.getangle(np.array(axisP_floats, dtype='float'), np.array(axisD, dtype='float'))
np.testing.assert_almost_equal(result_float_nparray, expected, rounding_precision)
@pytest.mark.parametrize(["xRot", "yRot", "zRot", "expected"], [
(0, 0, 0, [0, 0, -180]),
# X rotations
(90, 0, 0, [0, 90, -180]), (30, 0, 0, [0, 30, -180]), (-30, 0, 0, [0, -30, -180]), (120, 0, 0, [180, 60, 0]), (-120, 0, 0, [180, -60, 0]), (180, 0, 0, [180, 0, 0]),
# Y rotations
(0, 90, 0, [90, 0, -180]), (0, 30, 0, [30, 0, -180]), (0, -30, 0, [330, 0, -180]), (0, 120, 0, [120, 0, -180]), (0, -120, 0, [240, 0, -180]), (0, 180, 0, [180, 0, -180]),
# Z rotations
(0, 0, 90, [0, 0, -90]), (0, 0, 30, [0, 0, -150]), (0, 0, -30, [0, 0, -210]), (0, 0, 120, [0, 0, -60]), (0, 0, -120, [0, 0, -300]), (0, 0, 180, [0, 0, 0]),
# Multiple Rotations
(150, 30, 0, [146.30993247, 25.65890627, -16.10211375]), (45, 0, 60, [0, 45, -120]), (0, 90, 120, [90, 0, -60]), (135, 45, 90, [125.26438968, 30, 54.73561032])
])
def test_getHeadangle(self, xRot, yRot, zRot, expected):
"""
This test provides coverage of the getHeadangle function in pyCGM.py,
defined as getHeadangle(axisP,axisD) where axisP is the proximal axis and axisD is the distal axis
getHeadangle takes in as input two axes, axisP and axisD, and returns in degrees, the Euler angle
rotations required to rotate axisP to axisD as a list [alpha, beta, gamma]. getHeadangle uses the YXZ
order of Euler rotations to calculate the angles. The rotation matrix is obtained by directly comparing
the vectors in axisP to those in axisD through dot products between different components
of each axis. axisP and axisD each have 3 components to their axis, x, y, and z.
The angles are calculated as follows:
.. math::
\[ \beta = \arctan2{((axisD_{z} \cdot axisP_{y}), \sqrt{(axisD_{x} \cdot axisP_{y})^2 + (axisD_{y} \cdot axisP_{y})^2}}) \]
\[ \alpha = \arctan2{(-(axisD_{z} \cdot axisP_{x}), axisD_{z} \cdot axisP_{z})} \]
\[ \gamma = \arctan2{(-(axisD_{x} \cdot axisP_{y}), axisD_{y} \cdot axisP_{y})} \]
This test calls pyCGM.rotmat() to create axisP with an x, y, and z rotation defined in the parameters.
It then calls pyCGM.getHeadangle() with axisP and axisD, which was created with no rotation in the x, y or z
direction. This result is then compared to the expected result. The results from this test will be in the
YXZ order, meaning that a parameter with an inputed x rotation will have a result with the same angle in
the z direction. There is also an additional -180 degree angle in the z direction if there was no z rotation.
If there was a z rotation than there will be a different angle in the z direction. A z rotation of 90, 30, -30,
120, -120, 180 degrees results in a -90, -150, -210, -60, -300, 0 degree angle in the z direction respectively.
"""
# Create axisP as a rotatinal matrix using the x, y, and z rotations given in testcase
axisP = pyCGM.rotmat(xRot, yRot, zRot)
axisD = pyCGM.rotmat(0, 0, 0)
result = pyCGM.getHeadangle(axisP, axisD)
np.testing.assert_almost_equal(result, expected, rounding_precision)
def test_getHeadangle_datatypes(self):
"""
This test provides coverage of the getHeadangle function in pyCGM.py, defined as getHeadangle(axisP,axisD).
It checks that the resulting output from calling getHeadangle is correct for a list of ints, a numpy array of
ints, a list of floats, and a numpy array of floats.
"""
axisD = pyCGM.rotmat(0, 0, 0)
axisP_floats = pyCGM.rotmat(90, 90, 90)
axisP_ints = [[int(y) for y in x] for x in axisP_floats]
expected = [90, 0, 0]
# Check that calling getHeadangle on a list of ints yields the expected results
result_int_list = pyCGM.getHeadangle(axisP_ints, axisD)
np.testing.assert_almost_equal(result_int_list, expected, rounding_precision)
# Check that calling getHeadangle on a numpy array of ints yields the expected results
result_int_nparray = pyCGM.getHeadangle(np.array(axisP_ints, dtype='int'), np.array(axisD, dtype='int'))
np.testing.assert_almost_equal(result_int_nparray, expected, rounding_precision)
# Check that calling getHeadangle on a list of floats yields the expected results
result_float_list = pyCGM.getHeadangle(axisP_floats, axisD)
np.testing.assert_almost_equal(result_float_list, expected, rounding_precision)
# Check that calling getHeadangle on a numpy array of floats yields the expected results
result_float_nparray = pyCGM.getHeadangle(np.array(axisP_floats, dtype='float'), np.array(axisD, dtype='float'))
np.testing.assert_almost_equal(result_float_nparray, expected, rounding_precision)
@pytest.mark.parametrize(["xRot", "yRot", "zRot", "expected"], [
(0, 0, 0, [0, 0, 0]),
# X rotations
(90, 0, 0, [0, -90, 0]), (30, 0, 0, [0, -30, 0]), (-30, 0, 0, [0, 30, 0]), (120, 0, 0, [180, -60, 180]), (-120, 0, 0, [180, 60, 180]), (180, 0, 0, [180, 0, 180]),
# Y rotations
(0, 90, 0, [90, 0, 0]), (0, 30, 0, [30, 0, 0]), (0, -30, 0, [-30, 0, 0]), (0, 120, 0, [120, 0, 0]), (0, -120, 0, [-120, 0, -0]), (0, 180, 0, [180, 0, 0]),
# Z rotations
(0, 0, 90, [0, 0, 90]), (0, 0, 30, [0, 0, 30]), (0, 0, -30, [0, 0, -30]), (0, 0, 120, [0, 0, 120]), (0, 0, -120, [0, 0, -120]), (0, 0, 180, [0, 0, 180]),
# Multiple Rotations
(150, 30, 0, [146.30993247, -25.65890627, 163.89788625]), (45, 0, 60, [0, -45, 60]), (0, 90, 120, [90, 0, 120]), (135, 45, 90, [125.26438968, -30, -125.26438968])
])
def test_getPelangle(self, xRot, yRot, zRot, expected):
"""
This test provides coverage of the getPelangle function in pyCGM.py,
defined as getPelangle(axisP,axisD) where axisP is the proximal axis and axisD is the distal axis
getPelangle takes in as input two axes, axisP and axisD, and returns in degrees, the Euler angle
rotations required to rotate axisP to axisD as a list [alpha, beta, gamma]. getPelangle uses the YXZ
order of Euler rotations to calculate the angles. The rotation matrix is obtained by directly comparing
the vectors in axisP to those in axisD through dot products between different components
of each axis. axisP and axisD each have 3 components to their axis, x, y, and z.
The angles are calculated as follows:
.. math::
\[ \beta = \arctan2{((axisD_{z} \cdot axisP_{y}), \sqrt{(axisD_{z} \cdot axisP_{x})^2 + (axisD_{z} \cdot axisP_{z})^2}}) \]
\[ \alpha = \arctan2{((axisD_{z} \cdot axisP_{x}), axisD_{z} \cdot axisP_{z})} \]
\[ \gamma = \arctan2{((axisD_{x} \cdot axisP_{y}), axisD_{y} \cdot axisP_{y})} \]
This test calls pyCGM.rotmat() to create axisP with an x, y, and z rotation defined in the parameters.
It then calls pyCGM.getHeadangle() with axisP and axisD, which was created with no rotation in the x, y or z
direction. This result is then compared to the expected result. The results from this test will be in the
YXZ order, meaning that a parameter with an inputed x rotation will have a result with the same angle in
the z direction. The exception to this is x rotations. An x rotation of 90, 30, -30, 120, -120, 180
degrees results in a -90, -30, 30, -6, 60, 0 degree angle in the y direction respectively. A x rotation or
120, -120, or 180 also results in a 180 degree rotation in the x and z angles.
"""
# Create axisP as a rotatinal matrix using the x, y, and z rotations given in testcase
axisP = pyCGM.rotmat(xRot, yRot, zRot)
axisD = pyCGM.rotmat(0, 0, 0)
result = pyCGM.getPelangle(axisP, axisD)
np.testing.assert_almost_equal(result, expected, rounding_precision)
def test_getPelangle_datatypes(self):
"""
This test provides coverage of the getPelangle function in pyCGM.py, defined as getPelangle(axisP,axisD).
It checks that the resulting output from calling getPelangle is correct for a list of ints, a numpy array of
ints, a list of floats, and a numpy array of floats.
"""
axisD = pyCGM.rotmat(0, 0, 0)
axisP_floats = pyCGM.rotmat(90, 90, 90)
axisP_ints = [[int(y) for y in x] for x in axisP_floats]
expected = [90, 0, 180]
# Check that calling getPelangle on a list of ints yields the expected results
result_int_list = pyCGM.getPelangle(axisP_ints, axisD)
np.testing.assert_almost_equal(result_int_list, expected, rounding_precision)
# Check that calling getPelangle on a numpy array of ints yields the expected results
result_int_nparray = pyCGM.getPelangle(np.array(axisP_ints, dtype='int'), np.array(axisD, dtype='int'))
np.testing.assert_almost_equal(result_int_nparray, expected, rounding_precision)
# Check that calling getPelangle on a list of floats yields the expected results
result_float_list = pyCGM.getPelangle(axisP_floats, axisD)
np.testing.assert_almost_equal(result_float_list, expected, rounding_precision)
# Check that calling getPelangle on a numpy array of floats yields the expected results
result_float_nparray = pyCGM.getPelangle(np.array(axisP_floats, dtype='float'), np.array(axisD, dtype='float'))
np.testing.assert_almost_equal(result_float_nparray, expected, rounding_precision)
|
|
#!/usr/bin/env python
import os
import re
import sys
from datetime import datetime
import click
from send2trash import send2trash
# Verify that external dependencies are present first, so the user gets a
# more user-friendly error instead of an ImportError traceback.
from elodie.dependencies import verify_dependencies
if not verify_dependencies():
sys.exit(1)
from elodie import constants
from elodie import geolocation
from elodie.media.media import Media
from elodie.media.audio import Audio
from elodie.media.photo import Photo
from elodie.media.video import Video
from elodie.filesystem import FileSystem
from elodie.localstorage import Db
DB = Db()
FILESYSTEM = FileSystem()
def import_file(_file, destination, album_from_folder, trash):
"""Set file metadata and move it to destination.
"""
if not os.path.exists(_file):
if constants.debug:
print 'Could not find %s' % _file
print '{"source":"%s", "error_msg":"Could not find %s"}' % \
(_file, _file)
return
media = Media.get_class_by_file(_file, [Audio, Photo, Video])
if not media:
if constants.debug:
print 'Not a supported file (%s)' % _file
print '{"source":"%s", "error_msg":"Not a supported file"}' % _file
return
if media.__name__ == 'Video':
FILESYSTEM.set_date_from_path_video(media)
if album_from_folder:
media.set_album_from_folder()
dest_path = FILESYSTEM.process_file(_file, destination,
media, allowDuplicate=False, move=False)
if dest_path:
print '%s -> %s' % (_file, dest_path)
if trash:
send2trash(_file)
@click.command('import')
@click.option('--destination', type=click.Path(file_okay=False),
required=True, help='Copy imported files into this directory.')
@click.option('--source', type=click.Path(file_okay=False),
help='Import files from this directory, if specified.')
@click.option('--file', type=click.Path(dir_okay=False),
help='Import this file, if specified.')
@click.option('--album-from-folder', default=False, is_flag=True,
help="Use images' folders as their album names.")
@click.option('--trash', default=False, is_flag=True,
help='After copying files, move the old files to the trash.')
@click.argument('paths', nargs=-1, type=click.Path())
def _import(destination, source, file, album_from_folder, trash, paths):
"""Import files or directories.
"""
destination = os.path.expanduser(destination)
files = set()
paths = set(paths)
if source:
paths.add(source)
if file:
paths.add(file)
for path in paths:
path = os.path.expanduser(path)
if os.path.isdir(path):
files.update(FILESYSTEM.get_all_files(path, None))
else:
files.add(path)
for current_file in files:
import_file(current_file, destination, album_from_folder,
trash)
def update_location(media, file_path, location_name):
"""Update location exif metadata of media.
"""
location_coords = geolocation.coordinates_by_name(location_name)
if location_coords and 'latitude' in location_coords and \
'longitude' in location_coords:
location_status = media.set_location(location_coords[
'latitude'], location_coords['longitude'])
if not location_status:
if constants.debug:
print 'Failed to update location'
print ('{"source":"%s",' % file_path,
'"error_msg":"Failed to update location"}')
sys.exit(1)
return True
def update_time(media, file_path, time_string):
"""Update time exif metadata of media.
"""
time_format = '%Y-%m-%d %H:%M:%S'
if re.match(r'^\d{4}-\d{2}-\d{2}$', time_string):
time_string = '%s 00:00:00' % time_string
elif re.match(r'^\d{4}-\d{2}-\d{2} \d{2}:\d{2}\d{2}$', time_string):
msg = ('Invalid time format. Use YYYY-mm-dd hh:ii:ss or YYYY-mm-dd')
if constants.debug:
print msg
print '{"source":"%s", "error_msg":"%s"}' % (file_path, msg)
sys.exit(1)
time = datetime.strptime(time_string, time_format)
media.set_date_taken(time)
return True
@click.command('update')
@click.option('--album', help='Update the image album.')
@click.option('--location', help=('Update the image location. Location '
'should be the name of a place, like "Las '
'Vegas, NV".'))
@click.option('--time', help=('Update the image time. Time should be in '
'YYYY-mm-dd hh:ii:ss or YYYY-mm-dd format.'))
@click.option('--title', help='Update the image title.')
@click.argument('files', nargs=-1, type=click.Path(dir_okay=False),
required=True)
def _update(album, location, time, title, files):
"""Update files.
"""
for file_path in files:
if not os.path.exists(file_path):
if constants.debug:
print 'Could not find %s' % file_path
print '{"source":"%s", "error_msg":"Could not find %s"}' % \
(file_path, file_path)
continue
file_path = os.path.expanduser(file_path)
destination = os.path.expanduser(os.path.dirname(os.path.dirname(
os.path.dirname(file_path))))
media = Media.get_class_by_file(file_path, [Audio, Photo, Video])
if not media:
continue
updated = False
if location:
update_location(media, file_path, location)
updated = True
if time:
update_time(media, file_path, time)
updated = True
if album:
media.set_album(album)
updated = True
# Updating a title can be problematic when doing it 2+ times on a file.
# You would end up with img_001.jpg -> img_001-first-title.jpg ->
# img_001-first-title-second-title.jpg.
# To resolve that we have to track the prior title (if there was one.
# Then we massage the updated_media's metadata['base_name'] to remove
# the old title.
# Since FileSystem.get_file_name() relies on base_name it will properly
# rename the file by updating the title instead of appending it.
remove_old_title_from_name = False
if title:
# We call get_metadata() to cache it before making any changes
metadata = media.get_metadata()
title_update_status = media.set_title(title)
original_title = metadata['title']
if title_update_status and original_title:
# @TODO: We should move this to a shared method since
# FileSystem.get_file_name() does it too.
original_title = re.sub(r'\W+', '-', original_title.lower())
original_base_name = metadata['base_name']
remove_old_title_from_name = True
updated = True
if updated:
updated_media = Media.get_class_by_file(file_path,
[Audio, Photo, Video])
# See comments above on why we have to do this when titles
# get updated.
if remove_old_title_from_name and len(original_title) > 0:
updated_media.get_metadata()
updated_media.set_metadata_basename(
original_base_name.replace('-%s' % original_title, ''))
dest_path = FILESYSTEM.process_file(file_path, destination,
updated_media, move=True, allowDuplicate=True)
if constants.debug:
print u'%s -> %s' % (file_path, dest_path)
print '{"source":"%s", "destination":"%s"}' % (file_path,
dest_path)
# If the folder we moved the file out of or its parent are empty
# we delete it.
FILESYSTEM.delete_directory_if_empty(os.path.dirname(file_path))
FILESYSTEM.delete_directory_if_empty(
os.path.dirname(os.path.dirname(file_path)))
@click.group()
def main():
pass
main.add_command(_import)
main.add_command(_update)
if __name__ == '__main__':
main()
|
|
"""The tests for the MQTT device tracker platform."""
import pytest
from homeassistant.components.device_tracker.const import DOMAIN, SOURCE_TYPE_BLUETOOTH
from homeassistant.const import CONF_PLATFORM, STATE_HOME, STATE_NOT_HOME
from homeassistant.setup import async_setup_component
from tests.async_mock import patch
from tests.common import async_fire_mqtt_message
@pytest.fixture(autouse=True)
def setup_comp(hass, mqtt_mock):
"""Set up mqtt component."""
pass
async def test_ensure_device_tracker_platform_validation(hass):
"""Test if platform validation was done."""
async def mock_setup_scanner(hass, config, see, discovery_info=None):
"""Check that Qos was added by validation."""
assert "qos" in config
with patch(
"homeassistant.components.mqtt.device_tracker.async_setup_scanner",
autospec=True,
side_effect=mock_setup_scanner,
) as mock_sp:
dev_id = "paulus"
topic = "/location/paulus"
assert await async_setup_component(
hass, DOMAIN, {DOMAIN: {CONF_PLATFORM: "mqtt", "devices": {dev_id: topic}}}
)
assert mock_sp.call_count == 1
async def test_new_message(hass, mock_device_tracker_conf):
"""Test new message."""
dev_id = "paulus"
entity_id = f"{DOMAIN}.{dev_id}"
topic = "/location/paulus"
location = "work"
hass.config.components = {"mqtt", "zone"}
assert await async_setup_component(
hass, DOMAIN, {DOMAIN: {CONF_PLATFORM: "mqtt", "devices": {dev_id: topic}}}
)
async_fire_mqtt_message(hass, topic, location)
await hass.async_block_till_done()
assert hass.states.get(entity_id).state == location
async def test_single_level_wildcard_topic(hass, mock_device_tracker_conf):
"""Test single level wildcard topic."""
dev_id = "paulus"
entity_id = f"{DOMAIN}.{dev_id}"
subscription = "/location/+/paulus"
topic = "/location/room/paulus"
location = "work"
hass.config.components = {"mqtt", "zone"}
assert await async_setup_component(
hass,
DOMAIN,
{DOMAIN: {CONF_PLATFORM: "mqtt", "devices": {dev_id: subscription}}},
)
async_fire_mqtt_message(hass, topic, location)
await hass.async_block_till_done()
assert hass.states.get(entity_id).state == location
async def test_multi_level_wildcard_topic(hass, mock_device_tracker_conf):
"""Test multi level wildcard topic."""
dev_id = "paulus"
entity_id = f"{DOMAIN}.{dev_id}"
subscription = "/location/#"
topic = "/location/room/paulus"
location = "work"
hass.config.components = {"mqtt", "zone"}
assert await async_setup_component(
hass,
DOMAIN,
{DOMAIN: {CONF_PLATFORM: "mqtt", "devices": {dev_id: subscription}}},
)
async_fire_mqtt_message(hass, topic, location)
await hass.async_block_till_done()
assert hass.states.get(entity_id).state == location
async def test_single_level_wildcard_topic_not_matching(hass, mock_device_tracker_conf):
"""Test not matching single level wildcard topic."""
dev_id = "paulus"
entity_id = f"{DOMAIN}.{dev_id}"
subscription = "/location/+/paulus"
topic = "/location/paulus"
location = "work"
hass.config.components = {"mqtt", "zone"}
assert await async_setup_component(
hass,
DOMAIN,
{DOMAIN: {CONF_PLATFORM: "mqtt", "devices": {dev_id: subscription}}},
)
async_fire_mqtt_message(hass, topic, location)
await hass.async_block_till_done()
assert hass.states.get(entity_id) is None
async def test_multi_level_wildcard_topic_not_matching(hass, mock_device_tracker_conf):
"""Test not matching multi level wildcard topic."""
dev_id = "paulus"
entity_id = f"{DOMAIN}.{dev_id}"
subscription = "/location/#"
topic = "/somewhere/room/paulus"
location = "work"
hass.config.components = {"mqtt", "zone"}
assert await async_setup_component(
hass,
DOMAIN,
{DOMAIN: {CONF_PLATFORM: "mqtt", "devices": {dev_id: subscription}}},
)
async_fire_mqtt_message(hass, topic, location)
await hass.async_block_till_done()
assert hass.states.get(entity_id) is None
async def test_matching_custom_payload_for_home_and_not_home(
hass, mock_device_tracker_conf
):
"""Test custom payload_home sets state to home and custom payload_not_home sets state to not_home."""
dev_id = "paulus"
entity_id = f"{DOMAIN}.{dev_id}"
topic = "/location/paulus"
payload_home = "present"
payload_not_home = "not present"
hass.config.components = {"mqtt", "zone"}
assert await async_setup_component(
hass,
DOMAIN,
{
DOMAIN: {
CONF_PLATFORM: "mqtt",
"devices": {dev_id: topic},
"payload_home": payload_home,
"payload_not_home": payload_not_home,
}
},
)
async_fire_mqtt_message(hass, topic, payload_home)
await hass.async_block_till_done()
assert hass.states.get(entity_id).state == STATE_HOME
async_fire_mqtt_message(hass, topic, payload_not_home)
await hass.async_block_till_done()
assert hass.states.get(entity_id).state == STATE_NOT_HOME
async def test_not_matching_custom_payload_for_home_and_not_home(
hass, mock_device_tracker_conf
):
"""Test not matching payload does not set state to home or not_home."""
dev_id = "paulus"
entity_id = f"{DOMAIN}.{dev_id}"
topic = "/location/paulus"
payload_home = "present"
payload_not_home = "not present"
payload_not_matching = "test"
hass.config.components = {"mqtt", "zone"}
assert await async_setup_component(
hass,
DOMAIN,
{
DOMAIN: {
CONF_PLATFORM: "mqtt",
"devices": {dev_id: topic},
"payload_home": payload_home,
"payload_not_home": payload_not_home,
}
},
)
async_fire_mqtt_message(hass, topic, payload_not_matching)
await hass.async_block_till_done()
assert hass.states.get(entity_id).state != STATE_HOME
assert hass.states.get(entity_id).state != STATE_NOT_HOME
async def test_matching_source_type(hass, mock_device_tracker_conf):
"""Test setting source type."""
dev_id = "paulus"
entity_id = f"{DOMAIN}.{dev_id}"
topic = "/location/paulus"
source_type = SOURCE_TYPE_BLUETOOTH
location = "work"
hass.config.components = {"mqtt", "zone"}
assert await async_setup_component(
hass,
DOMAIN,
{
DOMAIN: {
CONF_PLATFORM: "mqtt",
"devices": {dev_id: topic},
"source_type": source_type,
}
},
)
async_fire_mqtt_message(hass, topic, location)
await hass.async_block_till_done()
assert hass.states.get(entity_id).attributes["source_type"] == SOURCE_TYPE_BLUETOOTH
|
|
#!/usr/bin/python
#============================ adjust path =====================================
import sys
import os
if __name__ == '__main__':
here = sys.path[0]
sys.path.insert(0, os.path.join(here, '..'))
#============================ imports =========================================
import Tkinter
import dustGuiLib
import dustFrame
from dustStyle import dustStyle
from SmartMeshSDK.ApiDefinition import IpMgrDefinition, \
IpMoteDefinition, \
HartMgrDefinition, \
HartMoteDefinition
from SmartMeshSDK.IpMgrConnectorMux import IpMgrConnectorMux
from SmartMeshSDK.IpMgrConnectorSerial import IpMgrConnectorSerial
from SmartMeshSDK.IpMoteConnector import IpMoteConnector
from SmartMeshSDK.HartMgrConnector import HartMgrConnector
from SmartMeshSDK.HartMoteConnector import HartMoteConnector
from SmartMeshSDK.ApiException import ConnectionError, \
CommandError
#============================ body ============================================
class dustFrameConnection(dustFrame.dustFrame):
def __init__(self,parentElem,guiLock,connectCb,frameName="connection",row=0,column=0):
# record variables
self.connectCb = connectCb
# init parent
dustFrame.dustFrame.__init__(self,parentElem,guiLock,frameName,row,column)
# row 0: serial port
self.serialFrame = Tkinter.Frame(self.container,
borderwidth=0,
bg=dustStyle.COLOR_BG)
temp = dustGuiLib.Label(self.serialFrame,
font=dustStyle.FONT_BODY,
bg=dustStyle.COLOR_BG,
text="through serial port:")
self._add(temp,0,0,columnspan=3)
temp = dustGuiLib.Label(self.serialFrame,
font=dustStyle.FONT_BODY,
bg=dustStyle.COLOR_BG,
text="port name:")
self._add(temp,1,0)
self.serialPortText = dustGuiLib.Text(self.serialFrame,
font=dustStyle.FONT_BODY,
width=25,
height=1,
returnAction=self._connectSerial)
self.serialPortText.insert(1.0,"")
self._add(self.serialPortText,1,1)
self.serialButton = dustGuiLib.Button(self.serialFrame,
text='connect',
command=self._connectSerial)
self._add(self.serialButton,1,2)
# row 2: serialMux
self.serialMuxFrame = Tkinter.Frame(self.container,
borderwidth=0,
bg=dustStyle.COLOR_BG)
temp = dustGuiLib.Label(self.serialMuxFrame,
font=dustStyle.FONT_BODY,
text="through serialMux:",
bg=dustStyle.COLOR_BG)
self._add(temp,0,0,columnspan=5)
temp = dustGuiLib.Label(self.serialMuxFrame,
font=dustStyle.FONT_BODY,
bg=dustStyle.COLOR_BG,
text="host:")
self._add(temp,1,0)
self.serialMuxHostText = dustGuiLib.Text(self.serialMuxFrame,
font=dustStyle.FONT_BODY,
width=15,
height=1,
returnAction=self._connectSerialMux)
self.serialMuxHostText.insert(1.0,"127.0.0.1")
self._add(self.serialMuxHostText,1,1)
temp = dustGuiLib.Label(self.serialMuxFrame,
font=dustStyle.FONT_BODY,
bg=dustStyle.COLOR_BG,
text="port:")
self._add(temp,1,2)
self.serialMuxPortText = dustGuiLib.Text(self.serialMuxFrame,
font=dustStyle.FONT_BODY,
width=5,
height=1,
returnAction=self._connectSerialMux)
self.serialMuxPortText.insert(1.0,"9900")
self._add(self.serialMuxPortText,1,3)
self.serialMuxButton = dustGuiLib.Button(self.serialMuxFrame,
text='connect',
command=self._connectSerialMux)
self._add(self.serialMuxButton,1,4)
# row 3: xml
self.xmlFrame = Tkinter.Frame(self.container,borderwidth=0,bg=dustStyle.COLOR_BG)
temp = dustGuiLib.Label(self.xmlFrame,
font=dustStyle.FONT_BODY,
bg=dustStyle.COLOR_BG,
text="through XML-RPC:")
self._add(temp,0,0,columnspan=5)
temp = dustGuiLib.Label(self.xmlFrame,
font=dustStyle.FONT_BODY,
bg=dustStyle.COLOR_BG,
text="host:")
self._add(temp,1,0)
self.xmlHostText = dustGuiLib.Text(self.xmlFrame,
font=dustStyle.FONT_BODY,
width=15,
height=1,
returnAction=self._connectXml)
self.xmlHostText.insert(1.0,"")
self._add(self.xmlHostText,1,1)
temp = dustGuiLib.Label(self.xmlFrame,
font=dustStyle.FONT_BODY,
bg=dustStyle.COLOR_BG,
text="port:")
self._add(temp,1,2)
self.xmlPortText = dustGuiLib.Text(self.xmlFrame,
font=dustStyle.FONT_BODY,
width=5,
height=1,
returnAction=self._connectXml)
self.xmlPortText.insert(1.0,"4445")
self._add(self.xmlPortText,1,3)
self.xmlButton = dustGuiLib.Button(self.xmlFrame,
text='connect',
command=self._connectXml)
self._add(self.xmlButton,1,4)
# row 4: text
self.tipLabel = dustGuiLib.Label(self.container,borderwidth=0,bg=dustStyle.COLOR_BG)
self.guiLock.acquire()
self.tipLabel.grid(row=4,column=0,sticky=Tkinter.W)
self.guiLock.release()
#======================== public ==========================================
def apiLoaded(self,apiDef):
# call the parent's apiLoaded function
dustFrame.dustFrame.apiLoaded(self,apiDef)
# display/hide connection forms
self._showHideConnectionForms()
def updateGuiDisconnected(self):
# update the connection fields
self.guiLock.acquire()
self.serialPortText.configure(bg=dustStyle.COLOR_BG)
self.serialMuxHostText.configure(bg=dustStyle.COLOR_BG)
self.serialMuxPortText.configure(bg=dustStyle.COLOR_BG)
self.xmlHostText.configure(bg=dustStyle.COLOR_BG)
self.xmlPortText.configure(bg=dustStyle.COLOR_BG)
self.tipLabel.configure(text="")
self.guiLock.release()
# update the buttons
self.guiLock.acquire()
self.serialButton.configure(text='connect',command=self._connectSerial)
self.serialMuxButton.configure(text='connect', command=self._connectSerialMux)
self.xmlButton.configure(text='connect', command=self._connectXml)
self.guiLock.release()
# display/hide connection forms
self._showHideConnectionForms()
#======================== private =========================================
def _showHideConnectionForms(self):
self.guiLock.acquire()
if (
isinstance(self.apiDef,IpMoteDefinition.IpMoteDefinition) or
isinstance(self.apiDef,IpMgrDefinition.IpMgrDefinition) or
isinstance(self.apiDef,HartMoteDefinition.HartMoteDefinition)
):
self.serialFrame.grid(row=2,column=0,sticky=Tkinter.W)
if (
isinstance(self.apiDef,IpMgrDefinition.IpMgrDefinition)
):
self.serialMuxFrame.grid(row=1,column=0,sticky=Tkinter.W)
if (
isinstance(self.apiDef,HartMgrDefinition.HartMgrDefinition)
):
self.xmlFrame.grid(row=3,column=0,sticky=Tkinter.W)
self.guiLock.release()
def _connectSerial(self):
'''
\brief Connect through the serial port.
'''
# initialize the connector
try:
if isinstance(self.apiDef,IpMgrDefinition.IpMgrDefinition):
self.connector = IpMgrConnectorSerial.IpMgrConnectorSerial()
elif isinstance(self.apiDef,IpMoteDefinition.IpMoteDefinition):
self.connector = IpMoteConnector.IpMoteConnector()
elif isinstance(self.apiDef,HartMoteDefinition.HartMoteDefinition):
self.connector = HartMoteConnector.HartMoteConnector()
else:
raise SystemError
except NotImplementedError as err:
self.guiLock.acquire()
self.tipLabel.configure(text=str(err))
self.guiLock.release()
return
# read connection params from GUI
self.guiLock.acquire()
connectParams = {
'port': self.serialPortText.get(1.0,Tkinter.END).strip(),
}
self.guiLock.release()
# connect to the serial port
try:
self.connector.connect(connectParams)
except ConnectionError as err:
self.guiLock.acquire()
self.serialPortText.configure(bg=dustStyle.COLOR_ERROR)
self.tipLabel.configure(text=str(err))
self.guiLock.release()
return
# if you get here, the connector could connect, i.e. the COM port is available
# make sure that the device attached to the serial port is really the mote we expect
if isinstance(self.apiDef,IpMgrDefinition.IpMgrDefinition):
# nothing to do, since connecting to a manager includes a handshake
pass
elif isinstance(self.apiDef,IpMoteDefinition.IpMoteDefinition):
try:
res = self.connector.dn_getParameter_moteInfo()
except (ConnectionError,CommandError) as err:
# disconnect the connector
self.connector.disconnect()
# print error text
output = []
output += ["Could open the COM port, but issuing dn_getParameter_moteInfo() failed."]
output += ["Exact error received: {0}".format(err)]
output += ["Please verify that the device connected to {0} is a SmartMesh IP mote.".format(connectParams['port'])]
output += ["Please verify that the SmartMesh IP mote is configured in slave mode."]
output = '\n'.join(output)
self.guiLock.acquire()
self.serialPortText.configure(bg=dustStyle.COLOR_WARNING_NOTWORKING)
self.tipLabel.configure(text=output)
self.guiLock.release()
return
elif isinstance(self.apiDef,HartMoteDefinition.HartMoteDefinition):
try:
res = self.connector.dn_getParameter_moteInfo()
except (ConnectionError,CommandError) as err:
# disconnect the connector
self.connector.disconnect()
# print error text
output = []
output += ["Could open the COM port, but issuing dn_getParameter_moteInfo() failed."]
output += ["Exact error received: {0}".format(err)]
output += ["Please verify that the device connected to {0} is a SmartMesh WirelessHART mote.".format(connectParams['port'])]
output += ["Please verify that the SmartMesh WirelessHART mote is configured in slave mode."]
output = '\n'.join(output)
self.guiLock.acquire()
self.serialPortText.configure(bg=dustStyle.COLOR_WARNING_NOTWORKING)
self.tipLabel.configure(text=output)
self.guiLock.release()
return
else:
raise SystemError
# if you get here, the connection has succeeded
self.guiLock.acquire()
self.serialPortText.configure(bg=dustStyle.COLOR_NOERROR)
self.tipLabel.configure(text="Connection successful.")
self.guiLock.release()
# hide other connectFrames
self.guiLock.acquire()
self.serialMuxFrame.grid_forget()
self.xmlFrame.grid_forget()
self.guiLock.release()
# update the button
self.guiLock.acquire()
self.serialButton.configure(text='disconnect', command=self._disconnect)
self.guiLock.release()
# common connect routing
self._connect()
def _connectSerialMux(self):
'''
\brief Connect through the serial Mux.
'''
# initialize the connector
try:
if isinstance(self.apiDef,IpMgrDefinition.IpMgrDefinition):
self.connector = IpMgrConnectorMux.IpMgrConnectorMux()
else:
raise SystemError
except NotImplementedError as err:
self.guiLock.acquire()
self.tipLabel.configure(text=str(err))
self.guiLock.release()
return
# read connection params from GUI
self.guiLock.acquire()
connectParams = {
'host': self.serialMuxHostText.get(1.0,Tkinter.END).strip(),
'port': int(self.serialMuxPortText.get(1.0,Tkinter.END).strip()),
}
self.guiLock.release()
# connect
try:
self.connector.connect(connectParams)
except ConnectionError as err:
self.guiLock.acquire()
self.serialMuxHostText.configure(bg=dustStyle.COLOR_ERROR)
self.serialMuxPortText.configure(bg=dustStyle.COLOR_ERROR)
self.tipLabel.configure(text=str(err))
self.guiLock.release()
return
else:
self.guiLock.acquire()
self.serialMuxHostText.configure(bg=dustStyle.COLOR_NOERROR)
self.serialMuxPortText.configure(bg=dustStyle.COLOR_NOERROR)
self.tipLabel.configure(text="Connection successful.")
self.guiLock.release()
# hide other connectFrames
self.guiLock.acquire()
self.serialFrame.grid_forget()
self.xmlFrame.grid_forget()
self.guiLock.release()
# update the button
self.guiLock.acquire()
self.serialMuxButton.configure(text='disconnect', command=self._disconnect)
self.guiLock.release()
# common connect routing
self._connect()
def _connectXml(self):
'''
\brief Connect over XML-RPC.
'''
# initialize the connector
try:
if isinstance(self.apiDef,HartMgrDefinition.HartMgrDefinition):
self.connector = HartMgrConnector.HartMgrConnector()
else:
raise SystemError
except NotImplementedError as err:
self.guiLock.acquire()
self.tipLabel.configure(text=str(err))
self.guiLock.release()
return
# read connection params from GUI
self.guiLock.acquire()
connectParams = {
'host': self.xmlHostText.get(1.0,Tkinter.END).strip(),
'port': int(self.xmlPortText.get(1.0,Tkinter.END).strip()),
}
self.guiLock.release()
# connect
try:
self.connector.connect(connectParams)
except ConnectionError as err:
self.guiLock.acquire()
self.xmlHostText.configure(bg=dustStyle.COLOR_ERROR)
self.xmlPortText.configure(bg=dustStyle.COLOR_ERROR)
self.tipLabel.configure(text=str(err))
self.guiLock.release()
return
else:
self.guiLock.acquire()
self.xmlHostText.configure(bg=dustStyle.COLOR_NOERROR)
self.xmlPortText.configure(bg=dustStyle.COLOR_NOERROR)
self.tipLabel.configure(text="Connection successful.")
self.guiLock.release()
# hide other connectFrames
self.guiLock.acquire()
self.serialFrame.grid_forget()
self.serialMuxFrame.grid_forget()
self.guiLock.release()
# update the button
self.guiLock.acquire()
self.xmlButton.configure(text='disconnect', command=self._disconnect)
self.guiLock.release()
# common connect routine
self._connect()
#======================== helpers =========================================
def _connect(self):
'''
\brief Connect routine common to all connectors.
'''
# call the callback
self.connectCb(self.connector)
def _disconnect(self):
'''
\brief Disconnect routine common to all connectors.
'''
# disconnect the connector from the device
self.connector.disconnect()
#============================ sample app ======================================
# The following gets called only if you run this module as a standalone app, by
# double-clicking on this source file. This code is NOT executed when importing
# this module is a larger application
#
class exampleApp(object):
def __init__(self):
self.window = dustWindow("dustFrameConnection",
self._closeCb)
self.guiLock = threading.Lock()
self.frame = dustFrameConnection(
self.window,
self.guiLock,
self._connectCb,
row=0,column=0)
self.apidef = IpMoteDefinition.IpMoteDefinition()
self.frame.apiLoaded(self.apidef)
self.frame.show()
self.window.mainloop()
def _closeCb(self):
print " _closeCb called"
def _connectCb(self,param):
print " _connectCb called with param="+str(param)
if __name__ == '__main__':
import threading
from dustWindow import dustWindow
exampleApp()
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
__all__ = ['baidu_download']
from ..common import *
from .embed import *
from .universal import *
def baidu_get_song_data(sid):
data = json.loads(get_html(
'http://music.baidu.com/data/music/fmlink?songIds=%s' % sid, faker=True))['data']
if data['xcode'] != '':
# inside china mainland
return data['songList'][0]
else:
# outside china mainland
return None
def baidu_get_song_url(data):
return data['songLink']
def baidu_get_song_artist(data):
return data['artistName']
def baidu_get_song_album(data):
return data['albumName']
def baidu_get_song_title(data):
return data['songName']
def baidu_get_song_lyric(data):
lrc = data['lrcLink']
return "http://music.baidu.com%s" % lrc if lrc else None
def baidu_download_song(sid, output_dir='.', merge=True, info_only=False):
data = baidu_get_song_data(sid)
if data is not None:
url = baidu_get_song_url(data)
title = baidu_get_song_title(data)
artist = baidu_get_song_artist(data)
album = baidu_get_song_album(data)
lrc = baidu_get_song_lyric(data)
file_name = "%s - %s - %s" % (title, album, artist)
else:
html = get_html("http://music.baidu.com/song/%s" % sid)
url = r1(r'data_url="([^"]+)"', html)
title = r1(r'data_name="([^"]+)"', html)
file_name = title
type, ext, size = url_info(url, faker=True)
print_info(site_info, title, type, size)
if not info_only:
download_urls([url], file_name, ext, size,
output_dir, merge=merge, faker=True)
try:
type, ext, size = url_info(lrc, faker=True)
print_info(site_info, title, type, size)
if not info_only:
download_urls([lrc], file_name, ext, size, output_dir, faker=True)
except:
pass
def baidu_download_album(aid, output_dir='.', merge=True, info_only=False):
html = get_html('http://music.baidu.com/album/%s' % aid, faker=True)
album_name = r1(r'<h2 class="album-name">(.+?)<\/h2>', html)
artist = r1(r'<span class="author_list" title="(.+?)">', html)
output_dir = '%s/%s - %s' % (output_dir, artist, album_name)
ids = json.loads(r1(r'<span class="album-add" data-adddata=\'(.+?)\'>',
html).replace('"', '').replace(';', '"'))['ids']
track_nr = 1
for id in ids:
song_data = baidu_get_song_data(id)
song_url = baidu_get_song_url(song_data)
song_title = baidu_get_song_title(song_data)
song_lrc = baidu_get_song_lyric(song_data)
file_name = '%02d.%s' % (track_nr, song_title)
type, ext, size = url_info(song_url, faker=True)
print_info(site_info, song_title, type, size)
if not info_only:
download_urls([song_url], file_name, ext, size,
output_dir, merge=merge, faker=True)
if song_lrc:
type, ext, size = url_info(song_lrc, faker=True)
print_info(site_info, song_title, type, size)
if not info_only:
download_urls([song_lrc], file_name, ext,
size, output_dir, faker=True)
track_nr += 1
def baidu_download(url, output_dir='.', stream_type=None, merge=True, info_only=False, **kwargs):
if re.match(r'https?://pan.baidu.com', url):
real_url, title, ext, size = baidu_pan_download(url)
print_info('BaiduPan', title, ext, size)
if not info_only:
print('Hold on...')
time.sleep(5)
download_urls([real_url], title, ext, size,
output_dir, url, merge=merge, faker=True)
elif re.match(r'https?://music.baidu.com/album/\d+', url):
id = r1(r'https?://music.baidu.com/album/(\d+)', url)
baidu_download_album(id, output_dir, merge, info_only)
elif re.match('https?://music.baidu.com/song/\d+', url):
id = r1(r'https?://music.baidu.com/song/(\d+)', url)
baidu_download_song(id, output_dir, merge, info_only)
elif re.match('https?://tieba.baidu.com/', url):
try:
# embedded videos
embed_download(url, output_dir, merge=merge, info_only=info_only, **kwargs)
except:
# images
html = get_html(url)
title = r1(r'title:"([^"]+)"', html)
vhsrc = re.findall(r'"BDE_Image"[^>]+src="([^"]+\.mp4)"', html) or \
re.findall(r'vhsrc="([^"]+)"', html)
if len(vhsrc) > 0:
ext = 'mp4'
size = url_size(vhsrc[0])
print_info(site_info, title, ext, size)
if not info_only:
download_urls(vhsrc, title, ext, size,
output_dir=output_dir, merge=False)
items = re.findall(
r'//tiebapic.baidu.com/forum/w[^"]+/([^/"]+)', html)
urls = ['http://tiebapic.baidu.com/forum/pic/item/' + i
for i in set(items)]
# handle albums
kw = r1(r'kw=([^&]+)', html) or r1(r"kw:'([^']+)'", html)
tid = r1(r'tid=(\d+)', html) or r1(r"tid:'([^']+)'", html)
album_url = 'http://tieba.baidu.com/photo/g/bw/picture/list?kw=%s&tid=%s&pe=%s' % (kw, tid, 1000)
album_info = json.loads(get_content(album_url))
for i in album_info['data']['pic_list']:
urls.append(
'http://tiebapic.baidu.com/forum/pic/item/' + i['pic_id'] + '.jpg')
ext = 'jpg'
size = float('Inf')
print_info(site_info, title, ext, size)
if not info_only:
download_urls(urls, title, ext, size,
output_dir=output_dir, merge=False)
def baidu_pan_download(url):
errno_patt = r'errno":([^"]+),'
refer_url = ""
fake_headers = {
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Charset': 'UTF-8,*;q=0.5',
'Accept-Encoding': 'gzip,deflate,sdch',
'Accept-Language': 'en-US,en;q=0.8',
'Host': 'pan.baidu.com',
'Origin': 'http://pan.baidu.com',
'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64; rv:13.0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/47.0.2500.0 Safari/537.36',
'Referer': refer_url
}
if cookies:
print('Use user specified cookies')
else:
print('Generating cookies...')
fake_headers['Cookie'] = baidu_pan_gen_cookies(url)
refer_url = "http://pan.baidu.com"
html = get_content(url, fake_headers, decoded=True)
isprotected = False
sign, timestamp, bdstoken, appid, primary_id, fs_id, uk = baidu_pan_parse(
html)
if sign == None:
if re.findall(r'\baccess-code\b', html):
isprotected = True
sign, timestamp, bdstoken, appid, primary_id, fs_id, uk, fake_headers, psk = baidu_pan_protected_share(
url)
# raise NotImplementedError("Password required!")
if isprotected != True:
raise AssertionError("Share not found or canceled: %s" % url)
if bdstoken == None:
bdstoken = ""
if isprotected != True:
sign, timestamp, bdstoken, appid, primary_id, fs_id, uk = baidu_pan_parse(
html)
request_url = "http://pan.baidu.com/api/sharedownload?sign=%s×tamp=%s&bdstoken=%s&channel=chunlei&clienttype=0&web=1&app_id=%s" % (
sign, timestamp, bdstoken, appid)
refer_url = url
post_data = {
'encrypt': 0,
'product': 'share',
'uk': uk,
'primaryid': primary_id,
'fid_list': '[' + fs_id + ']'
}
if isprotected == True:
post_data['sekey'] = psk
response_content = post_content(request_url, fake_headers, post_data, True)
errno = match1(response_content, errno_patt)
if errno != "0":
raise AssertionError(
"Server refused to provide download link! (Errno:%s)" % errno)
real_url = r1(r'dlink":"([^"]+)"', response_content).replace('\\/', '/')
title = r1(r'server_filename":"([^"]+)"', response_content)
assert real_url
type, ext, size = url_info(real_url, faker=True)
title_wrapped = json.loads('{"wrapper":"%s"}' % title)
title = title_wrapped['wrapper']
logging.debug(real_url)
return real_url, title, ext, size
def baidu_pan_parse(html):
sign_patt = r'sign":"([^"]+)"'
timestamp_patt = r'timestamp":([^"]+),'
appid_patt = r'app_id":"([^"]+)"'
bdstoken_patt = r'bdstoken":"([^"]+)"'
fs_id_patt = r'fs_id":([^"]+),'
uk_patt = r'uk":([^"]+),'
errno_patt = r'errno":([^"]+),'
primary_id_patt = r'shareid":([^"]+),'
sign = match1(html, sign_patt)
timestamp = match1(html, timestamp_patt)
appid = match1(html, appid_patt)
bdstoken = match1(html, bdstoken_patt)
fs_id = match1(html, fs_id_patt)
uk = match1(html, uk_patt)
primary_id = match1(html, primary_id_patt)
return sign, timestamp, bdstoken, appid, primary_id, fs_id, uk
def baidu_pan_gen_cookies(url, post_data=None):
from http import cookiejar
cookiejar = cookiejar.CookieJar()
opener = request.build_opener(request.HTTPCookieProcessor(cookiejar))
resp = opener.open('http://pan.baidu.com')
if post_data != None:
resp = opener.open(url, bytes(parse.urlencode(post_data), 'utf-8'))
return cookjar2hdr(cookiejar)
def baidu_pan_protected_share(url):
print('This share is protected by password!')
inpwd = input('Please provide unlock password: ')
inpwd = inpwd.replace(' ', '').replace('\t', '')
print('Please wait...')
post_pwd = {
'pwd': inpwd,
'vcode': None,
'vstr': None
}
from http import cookiejar
import time
cookiejar = cookiejar.CookieJar()
opener = request.build_opener(request.HTTPCookieProcessor(cookiejar))
resp = opener.open('http://pan.baidu.com')
resp = opener.open(url)
init_url = resp.geturl()
verify_url = 'http://pan.baidu.com/share/verify?%s&t=%s&channel=chunlei&clienttype=0&web=1' % (
init_url.split('?', 1)[1], int(time.time()))
refer_url = init_url
fake_headers = {
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Charset': 'UTF-8,*;q=0.5',
'Accept-Encoding': 'gzip,deflate,sdch',
'Accept-Language': 'en-US,en;q=0.8',
'Host': 'pan.baidu.com',
'Origin': 'http://pan.baidu.com',
'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64; rv:13.0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/47.0.2500.0 Safari/537.36',
'Referer': refer_url
}
opener.addheaders = dict2triplet(fake_headers)
pwd_resp = opener.open(verify_url, bytes(
parse.urlencode(post_pwd), 'utf-8'))
pwd_resp_str = ungzip(pwd_resp.read()).decode('utf-8')
pwd_res = json.loads(pwd_resp_str)
if pwd_res['errno'] != 0:
raise AssertionError(
'Server returned an error: %s (Incorrect password?)' % pwd_res['errno'])
pg_resp = opener.open('http://pan.baidu.com/share/link?%s' %
init_url.split('?', 1)[1])
content = ungzip(pg_resp.read()).decode('utf-8')
sign, timestamp, bdstoken, appid, primary_id, fs_id, uk = baidu_pan_parse(
content)
psk = query_cookiejar(cookiejar, 'BDCLND')
psk = parse.unquote(psk)
fake_headers['Cookie'] = cookjar2hdr(cookiejar)
return sign, timestamp, bdstoken, appid, primary_id, fs_id, uk, fake_headers, psk
def cookjar2hdr(cookiejar):
cookie_str = ''
for i in cookiejar:
cookie_str = cookie_str + i.name + '=' + i.value + ';'
return cookie_str[:-1]
def query_cookiejar(cookiejar, name):
for i in cookiejar:
if i.name == name:
return i.value
def dict2triplet(dictin):
out_triplet = []
for i in dictin:
out_triplet.append((i, dictin[i]))
return out_triplet
site_info = "Baidu.com"
download = baidu_download
download_playlist = playlist_not_supported("baidu")
|
|
"""
The cluster module contains the definitions for retrieving and manipulating
cluster information.
"""
from qds_sdk.qubole import Qubole
from qds_sdk.resource import Resource
from argparse import ArgumentParser
from qds_sdk import util
import logging
import json
log = logging.getLogger("qds_cluster")
def str2bool(v):
return v.lower() in ("yes", "true", "t", "1")
class Cluster(Resource):
"""
qds_sdk.Cluster is the class for retrieving and manipulating cluster
information.
"""
rest_entity_path = "clusters"
api_version = "v1.2"
@classmethod
def _parse_list(cls, args):
"""
Parse command line arguments to construct a dictionary of cluster
parameters that can be used to determine which clusters to list.
Args:
`args`: sequence of arguments
Returns:
Dictionary that can be used to determine which clusters to list
"""
argparser = ArgumentParser(prog="cluster list")
group = argparser.add_mutually_exclusive_group()
group.add_argument("--id", dest="cluster_id",
help="show cluster with this id")
group.add_argument("--label", dest="label",
help="show cluster with this label")
group.add_argument("--state", dest="state", action="store",
choices=['up', 'down', 'pending', 'terminating'],
help="list only clusters in the given state")
arguments = argparser.parse_args(args)
return vars(arguments)
@classmethod
def list(cls, state=None):
"""
List existing clusters present in your account.
Kwargs:
`state`: list only those clusters which are in this state
Returns:
List of clusters satisfying the given criteria
"""
conn = Qubole.agent()
if state is None:
return conn.get(cls.rest_entity_path)
elif state is not None:
cluster_list = conn.get(cls.rest_entity_path)
result = []
for cluster in cluster_list:
if state.lower() == cluster['cluster']['state'].lower():
result.append(cluster)
return result
@classmethod
def show(cls, cluster_id_label):
"""
Show information about the cluster with id/label `cluster_id_label`.
"""
conn = Qubole.agent(version=Cluster.api_version)
return conn.get(cls.element_path(cluster_id_label))
@classmethod
def status(cls, cluster_id_label):
"""
Show the status of the cluster with id/label `cluster_id_label`.
"""
conn = Qubole.agent()
return conn.get(cls.element_path(cluster_id_label) + "/state")
@classmethod
def start(cls, cluster_id_label):
"""
Start the cluster with id/label `cluster_id_label`.
"""
conn = Qubole.agent()
data = {"state": "start"}
return conn.put(cls.element_path(cluster_id_label) + "/state", data)
@classmethod
def terminate(cls, cluster_id_label):
"""
Terminate the cluster with id/label `cluster_id_label`.
"""
conn = Qubole.agent()
data = {"state": "terminate"}
return conn.put(cls.element_path(cluster_id_label) + "/state", data)
@classmethod
def _parse_create_update(cls, args, action, api_version):
"""
Parse command line arguments to determine cluster parameters that can
be used to create or update a cluster.
Args:
`args`: sequence of arguments
`action`: "create", "update" or "clone"
Returns:
Object that contains cluster parameters
"""
argparser = ArgumentParser(prog="cluster %s" % action)
create_required = False
label_required = False
if action == "create":
create_required = True
elif action == "update":
argparser.add_argument("cluster_id_label",
help="id/label of the cluster to update")
elif action == "clone":
argparser.add_argument("cluster_id_label",
help="id/label of the cluster to update")
label_required = True
argparser.add_argument("--label", dest="label",
nargs="+", required=(create_required or label_required),
help="list of labels for the cluster" +
" (atleast one label is required)")
ec2_group = argparser.add_argument_group("ec2 settings")
ec2_group.add_argument("--access-key-id",
dest="aws_access_key_id",
help="access key id for customer's aws" +
" account. This is required while" +
" creating the cluster",)
ec2_group.add_argument("--secret-access-key",
dest="aws_secret_access_key",
help="secret access key for customer's aws" +
" account. This is required while" +
" creating the cluster",)
ec2_group.add_argument("--aws-region",
dest="aws_region",
choices=["us-east-1", "us-west-2", "ap-northeast-1", "sa-east-1",
"eu-west-1", "ap-southeast-1", "us-west-1"],
help="aws region to create the cluster in",)
ec2_group.add_argument("--aws-availability-zone",
dest="aws_availability_zone",
help="availability zone to" +
" create the cluster in",)
ec2_group.add_argument("--subnet-id",
dest="subnet_id",
help="subnet to create the cluster in",)
ec2_group.add_argument("--vpc-id",
dest="vpc_id",
help="vpc to create the cluster in",)
ec2_group.add_argument("--master-elastic-ip",
dest="master_elastic_ip",
help="elastic ip to attach to master",)
ec2_group.add_argument("--bastion-node-public-dns",
dest="bastion_node_public_dns",
help="public dns name of the bastion node. Required only if cluster is in private subnet of a EC2-VPC",)
ec2_group.add_argument("--role-instance-profile",
dest="role_instance_profile",
help="IAM Role instance profile to attach on cluster",)
hadoop_group = argparser.add_argument_group("hadoop settings")
node_config_group = argparser.add_argument_group("node configuration") if (api_version >= 1.3) else hadoop_group
node_config_group.add_argument("--master-instance-type",
dest="master_instance_type",
help="instance type to use for the hadoop" +
" master node",)
node_config_group.add_argument("--slave-instance-type",
dest="slave_instance_type",
help="instance type to use for the hadoop" +
" slave nodes",)
node_config_group.add_argument("--initial-nodes",
dest="initial_nodes",
type=int,
help="number of nodes to start the" +
" cluster with",)
node_config_group.add_argument("--max-nodes",
dest="max_nodes",
type=int,
help="maximum number of nodes the cluster" +
" may be auto-scaled up to")
node_config_group.add_argument("--slave-request-type",
dest="slave_request_type",
choices=["ondemand", "spot", "hybrid"],
help="purchasing option for slave instaces",)
hadoop_group.add_argument("--custom-config",
dest="custom_config_file",
help="location of file containg custom" +
" hadoop configuration overrides")
hadoop_group.add_argument("--use-hbase", dest="use_hbase",
action="store_true", default=None,
help="Use hbase on this cluster",)
hadoop_group.add_argument("--is-ha", dest="is_ha",
action="store_true", default=None,
help="Enable HA config for cluster")
if api_version >= 1.3:
qubole_placement_policy_group = hadoop_group.add_mutually_exclusive_group()
qubole_placement_policy_group.add_argument("--use-qubole-placement-policy",
dest="use_qubole_placement_policy",
action="store_true",
default=None,
help="Use Qubole Block Placement policy" +
" for clusters with spot nodes",)
qubole_placement_policy_group.add_argument("--no-use-qubole-placement-policy",
dest="use_qubole_placement_policy",
action="store_false",
default=None,
help="Do not use Qubole Block Placement policy" +
" for clusters with spot nodes",)
fallback_to_ondemand_group = node_config_group.add_mutually_exclusive_group()
fallback_to_ondemand_group.add_argument("--fallback-to-ondemand",
dest="fallback_to_ondemand",
action="store_true",
default=None,
help="Fallback to on-demand nodes if spot nodes" +
" could not be obtained. Valid only if slave_request_type is spot",)
fallback_to_ondemand_group.add_argument("--no-fallback-to-ondemand",
dest="fallback_to_ondemand",
action="store_false",
default=None,
help="Dont Fallback to on-demand nodes if spot nodes" +
" could not be obtained. Valid only if slave_request_type is spot",)
ebs_volume_group = argparser.add_argument_group("ebs volume settings")
ebs_volume_group.add_argument("--ebs-volume-count",
dest="ebs_volume_count",
type=int,
help="Number of EBS volumes to attach to" +
" each instance of the cluster",)
ebs_volume_group.add_argument("--ebs-volume-type",
dest="ebs_volume_type",
choices=["standard", "gp2"],
help=" of the EBS volume. Valid values are " +
"'standard' (magnetic) and 'gp2' (ssd).",)
ebs_volume_group.add_argument("--ebs-volume-size",
dest="ebs_volume_size",
type=int,
help="Size of each EBS volume, in GB",)
hadoop2 = hadoop_group.add_mutually_exclusive_group()
hadoop2.add_argument("--use-hadoop2",
dest="use_hadoop2",
action="store_true",
default=None,
help="Use hadoop2 instead of hadoop1")
hadoop2.add_argument("--use-hadoop1",
dest="use_hadoop2",
action="store_false",
default=None,
help="Use hadoop1 instead of hadoop2. This is the default.")
hadoop2.add_argument("--use-spark",
dest="use_spark",
action="store_true",
default=None,
help="Turn on spark for this cluster")
spot_group = argparser.add_argument_group("spot instance settings" +
" (valid only when slave-request-type is hybrid or spot)")
spot_group.add_argument("--maximum-bid-price-percentage",
dest="maximum_bid_price_percentage",
type=float,
help="maximum value to bid for spot instances" +
" expressed as a percentage of the base" +
" price for the slave node instance type",)
spot_group.add_argument("--timeout-for-spot-request",
dest="timeout_for_request",
type=int,
help="timeout for a spot instance request" +
" unit: minutes")
spot_group.add_argument("--maximum-spot-instance-percentage",
dest="maximum_spot_instance_percentage",
type=int,
help="maximum percentage of instances that may" +
" be purchased from the aws spot market," +
" valid only when slave-request-type" +
" is 'hybrid'",)
stable_spot_group = argparser.add_argument_group("stable spot instance settings")
stable_spot_group.add_argument("--stable-maximum-bid-price-percentage",
dest="stable_maximum_bid_price_percentage",
type=float,
help="maximum value to bid for stable node spot instances" +
" expressed as a percentage of the base" +
" price for the master and slave node instance types",)
stable_spot_group.add_argument("--stable-timeout-for-spot-request",
dest="stable_timeout_for_request",
type=int,
help="timeout for a stable node spot instance request" +
" unit: minutes")
stable_spot_group.add_argument("--stable-allow-fallback",
dest="stable_allow_fallback", default=None,
type=str2bool,
help="whether to fallback to on-demand instances for stable nodes" +
" if spot instances aren't available")
fairscheduler_group = argparser.add_argument_group(
"fairscheduler configuration options")
fairscheduler_group.add_argument("--fairscheduler-config-xml",
dest="fairscheduler_config_xml_file",
help="location for file containing" +
" xml with custom configuration" +
" for the fairscheduler",)
fairscheduler_group.add_argument("--fairscheduler-default-pool",
dest="default_pool",
help="default pool for the" +
" fairscheduler",)
security_group = argparser.add_argument_group("security setttings")
ephemerals = security_group.add_mutually_exclusive_group()
ephemerals.add_argument("--encrypted-ephemerals",
dest="encrypted_ephemerals",
action="store_true",
default=None,
help="encrypt the ephemeral drives on" +
" the instance",)
ephemerals.add_argument("--no-encrypted-ephemerals",
dest="encrypted_ephemerals",
action="store_false",
default=None,
help="don't encrypt the ephemeral drives on" +
" the instance",)
security_group.add_argument("--customer-ssh-key",
dest="customer_ssh_key_file",
help="location for ssh key to use to" +
" login to the instance")
security_group.add_argument("--persistent-security-group",
dest="persistent_security_group",
help="a security group to associate with each" +
" node of the cluster. Typically used" +
" to provide access to external hosts")
presto_group = argparser.add_argument_group("presto settings")
enabling_presto = presto_group.add_mutually_exclusive_group()
enabling_presto.add_argument("--enable-presto",
dest="enable_presto",
action="store_true",
default=None,
help="Enable presto for this cluster",)
enabling_presto.add_argument("--disable-presto",
dest="enable_presto",
action="store_false",
default=None,
help="Disable presto for this cluster",)
presto_group.add_argument("--presto-custom-config",
dest="presto_custom_config_file",
help="location of file containg custom" +
" presto configuration overrides")
termination = argparser.add_mutually_exclusive_group()
termination.add_argument("--disallow-cluster-termination",
dest="disallow_cluster_termination",
action="store_true",
default=None,
help="don't auto-terminate idle clusters," +
" use this with extreme caution",)
termination.add_argument("--allow-cluster-termination",
dest="disallow_cluster_termination",
action="store_false",
default=None,
help="auto-terminate idle clusters,")
ganglia = argparser.add_mutually_exclusive_group()
ganglia.add_argument("--enable-ganglia-monitoring",
dest="enable_ganglia_monitoring",
action="store_true",
default=None,
help="enable ganglia monitoring for the" +
" cluster",)
ganglia.add_argument("--disable-ganglia-monitoring",
dest="enable_ganglia_monitoring",
action="store_false",
default=None,
help="disable ganglia monitoring for the" +
" cluster",)
argparser.add_argument("--node-bootstrap-file",
dest="node_bootstrap_file",
help="""name of the node bootstrap file for this cluster. It
should be in stored in S3 at
<account-default-location>/scripts/hadoop/NODE_BOOTSTRAP_FILE
""",)
argparser.add_argument("--custom-ec2-tags",
dest="custom_ec2_tags",
help="""Custom ec2 tags to be set on all instances
of the cluster. Specified as JSON object (key-value pairs)
e.g. --custom-ec2-tags '{"key1":"value1", "key2":"value2"}'
""",)
arguments = argparser.parse_args(args)
return arguments
@classmethod
def create(cls, cluster_info, version=None):
"""
Create a new cluster using information provided in `cluster_info`.
Optionally provide the version (eg: v1.3) to use the new version of the
API. If None we default to v1.2
"""
conn = Qubole.agent(version=version)
return conn.post(cls.rest_entity_path, data=cluster_info)
@classmethod
def update(cls, cluster_id_label, cluster_info, version=None):
"""
Update the cluster with id/label `cluster_id_label` using information provided in
`cluster_info`.
Optionally provide the version (eg: v1.3) to use the new version of the
API. If None we default to v1.2
"""
conn = Qubole.agent(version=version)
return conn.put(cls.element_path(cluster_id_label), data=cluster_info)
@classmethod
def clone(cls, cluster_id_label, cluster_info, version=None):
"""
Update the cluster with id/label `cluster_id_label` using information provided in
`cluster_info`.
Optionally provide the version (eg: v1.3) to use the new version of the
API. If None we default to v1.2
"""
conn = Qubole.agent(version=version)
return conn.post(cls.element_path(cluster_id_label) + '/clone', data=cluster_info)
@classmethod
def _parse_cluster_manage_command(cls, args, action):
"""
Parse command line arguments for cluster manage commands.
"""
argparser = ArgumentParser(prog="cluster_manage_command")
group = argparser.add_mutually_exclusive_group(required=True)
group.add_argument("--id", dest="cluster_id",
help="execute on cluster with this id")
group.add_argument("--label", dest="label",
help="execute on cluster with this label")
if action == "remove" or action == "update":
argparser.add_argument("--private_dns",
help="the private_dns of the machine to be updated/removed", required=True)
if action == "update":
argparser.add_argument("--command",
help="the update command to be executed", required=True, choices=["replace"])
arguments = argparser.parse_args(args)
return arguments
@classmethod
def _parse_reassign_label(cls, args):
"""
Parse command line arguments for reassigning label.
"""
argparser = ArgumentParser(prog="cluster reassign_label")
argparser.add_argument("destination_cluster",
metavar="destination_cluster_id_label",
help="id/label of the cluster to move the label to")
argparser.add_argument("label",
help="label to be moved from the source cluster")
arguments = argparser.parse_args(args)
return arguments
@classmethod
def reassign_label(cls, destination_cluster, label):
"""
Reassign a label from one cluster to another.
Args:
`destination_cluster`: id/label of the cluster to move the label to
`label`: label to be moved from the source cluster
"""
conn = Qubole.agent()
data = {
"destination_cluster": destination_cluster,
"label": label
}
return conn.put(cls.rest_entity_path + "/reassign-label", data)
@classmethod
def delete(cls, cluster_id_label):
"""
Delete the cluster with id/label `cluster_id_label`.
"""
conn = Qubole.agent()
return conn.delete(cls.element_path(cluster_id_label))
@classmethod
def _parse_snapshot_restore_command(cls, args, action):
"""
Parse command line arguments for snapshot command.
"""
argparser = ArgumentParser(prog="cluster %s" % action)
group = argparser.add_mutually_exclusive_group(required=True)
group.add_argument("--id", dest="cluster_id",
help="execute on cluster with this id")
group.add_argument("--label", dest="label",
help="execute on cluster with this label")
argparser.add_argument("--s3_location",
help="s3_location where backup is stored", required=True)
if action == "snapshot":
argparser.add_argument("--backup_type",
help="backup_type: full/incremental, default is full")
elif action == "restore_point":
argparser.add_argument("--backup_id",
help="back_id from which restoration will be done", required=True)
argparser.add_argument("--table_names",
help="table(s) which are to be restored", required=True)
argparser.add_argument("--no-overwrite", action="store_false",
help="With this option, restore overwrites to the existing table if theres any in restore target")
argparser.add_argument("--no-automatic", action="store_false",
help="With this option, all the dependencies are automatically restored together with this backup image following the correct order")
arguments = argparser.parse_args(args)
return arguments
@classmethod
def _parse_get_snapshot_schedule(cls, args):
"""
Parse command line arguments for updating hbase snapshot schedule or to get details.
"""
argparser = ArgumentParser(prog="cluster snapshot_schedule")
group = argparser.add_mutually_exclusive_group(required=True)
group.add_argument("--id", dest="cluster_id",
help="execute on cluster with this id")
group.add_argument("--label", dest="label",
help="execute on cluster with this label")
arguments = argparser.parse_args(args)
return arguments
@classmethod
def _parse_update_snapshot_schedule(cls, args):
"""
Parse command line arguments for updating hbase snapshot schedule or to get details.
"""
argparser = ArgumentParser(prog="cluster snapshot_schedule")
group = argparser.add_mutually_exclusive_group(required=True)
group.add_argument("--id", dest="cluster_id",
help="execute on cluster with this id")
group.add_argument("--label", dest="label",
help="execute on cluster with this label")
argparser.add_argument("--frequency-num",
help="frequency number")
argparser.add_argument("--frequency-unit",
help="frequency unit")
argparser.add_argument("--s3-location",
help="s3_location about where to store snapshots")
argparser.add_argument("--status",
help="status of periodic job you want to change to", choices = ["RUNNING", "SUSPENDED"])
arguments = argparser.parse_args(args)
return arguments
@classmethod
def snapshot(cls, cluster_id_label, s3_location, backup_type):
"""
Create hbase snapshot full/incremental
"""
conn = Qubole.agent()
parameters = {}
parameters['s3_location'] = s3_location
if backup_type:
parameters['backup_type'] = backup_type
return conn.post(cls.element_path(cluster_id_label) + "/snapshots", data=parameters)
@classmethod
def restore_point(cls, cluster_id_label, s3_location, backup_id, table_names, overwrite=True, automatic=True):
"""
Restoring cluster from a given hbase snapshot id
"""
conn = Qubole.agent()
parameters = {}
parameters['s3_location'] = s3_location
parameters['backup_id'] = backup_id
parameters['table_names'] = table_names
parameters['overwrite'] = overwrite
parameters['automatic'] = automatic
return conn.post(cls.element_path(cluster_id_label) + "/restore_point", data=parameters)
@classmethod
def get_snapshot_schedule(cls, cluster_id_label):
"""
Get details for snapshot schedule
"""
conn = Qubole.agent()
return conn.get(cls.element_path(cluster_id_label) + "/snapshot_schedule")
@classmethod
def update_snapshot_schedule(cls, cluster_id_label, s3_location=None, frequency_unit=None, frequency_num=None, status=None):
"""
Update for snapshot schedule
"""
conn = Qubole.agent()
data = {}
if s3_location is not None:
data["s3_location"] = s3_location
if frequency_unit is not None:
data["frequency_unit"] = frequency_unit
if frequency_num is not None:
data["frequency_num"] = frequency_num
if status is not None:
data["status"] = status
return conn.put(cls.element_path(cluster_id_label) + "/snapshot_schedule", data)
@classmethod
def add_node(cls, cluster_id_label, parameters=None):
"""
Add a node to an existing cluster
"""
conn = Qubole.agent()
parameters = {} if not parameters else parameters
return conn.post(cls.element_path(cluster_id_label) + "/nodes", data={"parameters" : parameters})
@classmethod
def remove_node(cls, cluster_id_label, private_dns, parameters=None):
"""
Add a node to an existing cluster
"""
conn = Qubole.agent()
parameters = {} if not parameters else parameters
data = {"private_dns" : private_dns, "parameters" : parameters}
return conn.delete(cls.element_path(cluster_id_label) + "/nodes", data)
@classmethod
def update_node(cls, cluster_id_label, command, private_dns, parameters=None):
"""
Add a node to an existing cluster
"""
conn = Qubole.agent()
parameters = {} if not parameters else parameters
data = {"command" : command, "private_dns" : private_dns, "parameters" : parameters}
return conn.put(cls.element_path(cluster_id_label) + "/nodes", data)
class ClusterInfo():
"""
qds_sdk.ClusterInfo is the class which stores information about a cluster.
You can use objects of this class to create or update a cluster.
"""
def __init__(self, label, aws_access_key_id, aws_secret_access_key,
disallow_cluster_termination=None,
enable_ganglia_monitoring=None,
node_bootstrap_file=None):
"""
Args:
`label`: A list of labels that identify the cluster. At least one label
must be provided when creating a cluster.
`aws_access_key_id`: The access key id for customer's aws account. This
is required for creating the cluster.
`aws_secret_access_key`: The secret access key for customer's aws
account. This is required for creating the cluster.
`disallow_cluster_termination`: Set this to True if you don't want
qubole to auto-terminate idle clusters. Use this option with
extreme caution.
`enable_ganglia_monitoring`: Set this to True if you want to enable
ganglia monitoring for the cluster.
`node_bootstrap_file`: name of the node bootstrap file for this
cluster. It should be in stored in S3 at
<your-default-location>/scripts/hadoop/
"""
self.label = label
self.ec2_settings = {}
self.ec2_settings['compute_access_key'] = aws_access_key_id
self.ec2_settings['compute_secret_key'] = aws_secret_access_key
self.disallow_cluster_termination = disallow_cluster_termination
self.enable_ganglia_monitoring = enable_ganglia_monitoring
self.node_bootstrap_file = node_bootstrap_file
self.hadoop_settings = {}
self.security_settings = {}
self.presto_settings = {}
def set_ec2_settings(self,
aws_region=None,
aws_availability_zone=None,
vpc_id=None,
subnet_id=None,
master_elastic_ip=None,
role_instance_profile=None,
bastion_node_public_dns=None):
"""
Kwargs:
`aws_region`: AWS region to create the cluster in.
`aws_availability_zone`: The availability zone to create the cluster
in.
`vpc_id`: The vpc to create the cluster in.
`subnet_id`: The subnet to create the cluster in.
`bastion_node_public_dns`: Public dns name of the bastion host. Required only if
cluster is in private subnet.
"""
self.ec2_settings['aws_region'] = aws_region
self.ec2_settings['aws_preferred_availability_zone'] = aws_availability_zone
self.ec2_settings['vpc_id'] = vpc_id
self.ec2_settings['subnet_id'] = subnet_id
self.ec2_settings['role_instance_profile'] = role_instance_profile
self.ec2_settings['master_elastic_ip'] = master_elastic_ip
self.ec2_settings['bastion_node_public_dns'] = bastion_node_public_dns
def set_hadoop_settings(self, master_instance_type=None,
slave_instance_type=None,
initial_nodes=None,
max_nodes=None,
custom_config=None,
slave_request_type=None,
use_hbase=None,
custom_ec2_tags=None,
use_hadoop2=None,
use_spark=None,
is_ha=None):
"""
Kwargs:
`master_instance_type`: The instance type to use for the Hadoop master
node.
`slave_instance_type`: The instance type to use for the Hadoop slave
nodes.
`initial_nodes`: Number of nodes to start the cluster with.
`max_nodes`: Maximum number of nodes the cluster may be auto-scaled up
to.
`custom_config`: Custom Hadoop configuration overrides.
`slave_request_type`: Purchasing option for slave instances.
Valid values: "ondemand", "hybrid", "spot".
`use_hbase`: Start hbase daemons on the cluster. Uses Hadoop2
`use_hadoop2`: Use hadoop2 in this cluster
`use_spark`: Use spark in this cluster
`is_ha` : enable HA config for cluster
"""
self.hadoop_settings['master_instance_type'] = master_instance_type
self.hadoop_settings['slave_instance_type'] = slave_instance_type
self.hadoop_settings['initial_nodes'] = initial_nodes
self.hadoop_settings['max_nodes'] = max_nodes
self.hadoop_settings['custom_config'] = custom_config
self.hadoop_settings['slave_request_type'] = slave_request_type
self.hadoop_settings['use_hbase'] = use_hbase
self.hadoop_settings['use_hadoop2'] = use_hadoop2
self.hadoop_settings['use_spark'] = use_spark
self.hadoop_settings['is_ha'] = is_ha
if custom_ec2_tags and custom_ec2_tags.strip():
try:
self.hadoop_settings['custom_ec2_tags'] = json.loads(custom_ec2_tags.strip())
except Exception as e:
raise Exception("Invalid JSON string for custom ec2 tags: %s" % e.message)
def set_spot_instance_settings(self, maximum_bid_price_percentage=None,
timeout_for_request=None,
maximum_spot_instance_percentage=None):
"""
Purchase options for spot instances. Valid only when
`slave_request_type` is hybrid or spot.
`maximum_bid_price_percentage`: Maximum value to bid for spot
instances, expressed as a percentage of the base price for the
slave node instance type.
`timeout_for_request`: Timeout for a spot instance request (Unit:
minutes)
`maximum_spot_instance_percentage`: Maximum percentage of instances
that may be purchased from the AWS Spot market. Valid only when
slave_request_type is "hybrid".
"""
self.hadoop_settings['spot_instance_settings'] = {
'maximum_bid_price_percentage': maximum_bid_price_percentage,
'timeout_for_request': timeout_for_request,
'maximum_spot_instance_percentage': maximum_spot_instance_percentage}
def set_stable_spot_instance_settings(self, maximum_bid_price_percentage=None,
timeout_for_request=None,
allow_fallback=True):
"""
Purchase options for stable spot instances.
`maximum_bid_price_percentage`: Maximum value to bid for stable node spot
instances, expressed as a percentage of the base price
(applies to both master and slave nodes).
`timeout_for_request`: Timeout for a stable node spot instance request (Unit:
minutes)
`allow_fallback`: Whether to fallback to on-demand instances for
stable nodes if spot instances are not available
"""
self.hadoop_settings['stable_spot_instance_settings'] = {
'maximum_bid_price_percentage': maximum_bid_price_percentage,
'timeout_for_request': timeout_for_request,
'allow_fallback': allow_fallback}
def set_fairscheduler_settings(self, fairscheduler_config_xml=None,
default_pool=None):
"""
Fair scheduler configuration options.
`fairscheduler_config_xml`: XML string with custom configuration
parameters for the fair scheduler.
`default_pool`: The default pool for the fair scheduler.
"""
self.hadoop_settings['fairscheduler_settings'] = {
'fairscheduler_config_xml': fairscheduler_config_xml,
'default_pool': default_pool}
def set_security_settings(self,
encrypted_ephemerals=None,
customer_ssh_key=None,
persistent_security_group=None):
"""
Kwargs:
`encrypted_ephemerals`: Encrypt the ephemeral drives on the instance.
`customer_ssh_key`: SSH key to use to login to the instances.
"""
self.security_settings['encrypted_ephemerals'] = encrypted_ephemerals
self.security_settings['customer_ssh_key'] = customer_ssh_key
self.security_settings['persistent_security_group'] = persistent_security_group
def set_presto_settings(self, enable_presto=None, presto_custom_config=None):
"""
Kwargs:
`enable_presto`: Enable Presto on the cluster.
`presto_custom_config`: Custom Presto configuration overrides.
"""
self.presto_settings['enable_presto'] = enable_presto
self.presto_settings['custom_config'] = presto_custom_config
def minimal_payload(self):
"""
This method can be used to create the payload which is sent while
creating or updating a cluster.
"""
payload = {"cluster": self.__dict__}
return util._make_minimal(payload)
class ClusterInfoV13():
"""
qds_sdk.ClusterInfo is the class which stores information about a cluster.
You can use objects of this class to create or update a cluster.
"""
def __init__(self, label, api_version=1.3):
"""
Args:
`label`: A list of labels that identify the cluster. At least one label
must be provided when creating a cluster.
`api_version`: api version to use
"""
self.label = label
self.api_version = api_version
self.ec2_settings = {}
self.hadoop_settings = {}
self.security_settings = {}
self.presto_settings = {}
self.node_configuration = {}
def set_cluster_info(self, aws_access_key_id=None,
aws_secret_access_key=None,
aws_region=None,
aws_availability_zone=None,
vpc_id=None,
subnet_id=None,
master_elastic_ip=None,
disallow_cluster_termination=None,
enable_ganglia_monitoring=None,
node_bootstrap_file=None,
master_instance_type=None,
slave_instance_type=None,
initial_nodes=None,
max_nodes=None,
slave_request_type=None,
fallback_to_ondemand=None,
custom_config=None,
use_hbase=None,
custom_ec2_tags=None,
use_hadoop2=None,
use_spark=None,
use_qubole_placement_policy=None,
maximum_bid_price_percentage=None,
timeout_for_request=None,
maximum_spot_instance_percentage=None,
stable_maximum_bid_price_percentage=None,
stable_timeout_for_request=None,
stable_allow_fallback=True,
ebs_volume_count=None,
ebs_volume_type=None,
ebs_volume_size=None,
fairscheduler_config_xml=None,
default_pool=None,
encrypted_ephemerals=None,
ssh_public_key=None,
persistent_security_group=None,
enable_presto=None,
bastion_node_public_dns=None,
role_instance_profile=None,
presto_custom_config=None,
is_ha=None):
"""
Kwargs:
`aws_access_key_id`: The access key id for customer's aws account. This
is required for creating the cluster.
`aws_secret_access_key`: The secret access key for customer's aws
account. This is required for creating the cluster.
`aws_region`: AWS region to create the cluster in.
`aws_availability_zone`: The availability zone to create the cluster
in.
`vpc_id`: The vpc to create the cluster in.
`subnet_id`: The subnet to create the cluster in.
`master_elastic_ip`: Elastic IP to attach to master node
`disallow_cluster_termination`: Set this to True if you don't want
qubole to auto-terminate idle clusters. Use this option with
extreme caution.
`enable_ganglia_monitoring`: Set this to True if you want to enable
ganglia monitoring for the cluster.
`node_bootstrap_file`: name of the node bootstrap file for this
cluster. It should be in stored in S3 at
<your-default-location>/scripts/hadoop/
`master_instance_type`: The instance type to use for the Hadoop master
node.
`slave_instance_type`: The instance type to use for the Hadoop slave
nodes.
`initial_nodes`: Number of nodes to start the cluster with.
`max_nodes`: Maximum number of nodes the cluster may be auto-scaled up
to.
`slave_request_type`: Purchasing option for slave instances.
Valid values: "ondemand", "hybrid", "spot".
`fallback_to_ondemand`: Fallback to on-demand nodes if spot nodes could not be
obtained. Valid only if slave_request_type is 'spot'.
`custom_config`: Custom Hadoop configuration overrides.
`use_hbase`: Start hbase daemons on the cluster. Uses Hadoop2
`use_hadoop2`: Use hadoop2 in this cluster
`use_spark`: Use spark in this cluster
`use_qubole_placement_policy`: Use Qubole Block Placement policy for
clusters with spot nodes.
`maximum_bid_price_percentage`: ( Valid only when `slave_request_type`
is hybrid or spot.) Maximum value to bid for spot
instances, expressed as a percentage of the base price
for the slave node instance type.
`timeout_for_request`: Timeout for a spot instance request (Unit:
minutes)
`maximum_spot_instance_percentage`: Maximum percentage of instances
that may be purchased from the AWS Spot market. Valid only when
slave_request_type is "hybrid".
`stable_maximum_bid_price_percentage`: Maximum value to bid for stable node spot
instances, expressed as a percentage of the base price
(applies to both master and slave nodes).
`stable_timeout_for_request`: Timeout for a stable node spot instance request (Unit:
minutes)
`stable_allow_fallback`: Whether to fallback to on-demand instances for
stable nodes if spot instances are not available
`ebs_volume_count`: Number of EBS volumes to attach
to each instance of the cluster.
`ebs_volume_type`: Type of the EBS volume. Valid
values are 'standard' (magnetic) and 'ssd'.
`ebs_volume_size`: Size of each EBS volume, in GB.
`fairscheduler_config_xml`: XML string with custom configuration
parameters for the fair scheduler.
`default_pool`: The default pool for the fair scheduler.
`encrypted_ephemerals`: Encrypt the ephemeral drives on the instance.
`ssh_public_key`: SSH key to use to login to the instances.
`persistent_security_group`: Comma-separated list of persistent
security groups for the cluster.
`enable_presto`: Enable Presto on the cluster.
`presto_custom_config`: Custom Presto configuration overrides.
`bastion_node_public_dns`: Public dns name of the bastion node. Required only if cluster is in private subnet.
`is_ha`: Enabling HA config for cluster
"""
self.disallow_cluster_termination = disallow_cluster_termination
self.enable_ganglia_monitoring = enable_ganglia_monitoring
self.node_bootstrap_file = node_bootstrap_file
self.set_node_configuration(master_instance_type, slave_instance_type, initial_nodes, max_nodes, slave_request_type, fallback_to_ondemand)
self.set_ec2_settings(aws_access_key_id, aws_secret_access_key, aws_region, aws_availability_zone, vpc_id, subnet_id,
master_elastic_ip, bastion_node_public_dns, role_instance_profile)
self.set_hadoop_settings(custom_config, use_hbase, custom_ec2_tags, use_hadoop2, use_spark, use_qubole_placement_policy, is_ha)
self.set_spot_instance_settings(maximum_bid_price_percentage, timeout_for_request, maximum_spot_instance_percentage)
self.set_stable_spot_instance_settings(stable_maximum_bid_price_percentage, stable_timeout_for_request, stable_allow_fallback)
self.set_ebs_volume_settings(ebs_volume_count, ebs_volume_type, ebs_volume_size)
self.set_fairscheduler_settings(fairscheduler_config_xml, default_pool)
self.set_security_settings(encrypted_ephemerals, ssh_public_key, persistent_security_group)
self.set_presto_settings(enable_presto, presto_custom_config)
def set_ec2_settings(self,
aws_access_key_id=None,
aws_secret_access_key=None,
aws_region=None,
aws_availability_zone=None,
vpc_id=None,
subnet_id=None,
master_elastic_ip=None,
bastion_node_public_dns=None,
role_instance_profile=None):
self.ec2_settings['compute_access_key'] = aws_access_key_id
self.ec2_settings['compute_secret_key'] = aws_secret_access_key
self.ec2_settings['aws_region'] = aws_region
self.ec2_settings['aws_preferred_availability_zone'] = aws_availability_zone
self.ec2_settings['vpc_id'] = vpc_id
self.ec2_settings['subnet_id'] = subnet_id
self.ec2_settings['master_elastic_ip'] = master_elastic_ip
self.ec2_settings['bastion_node_public_dns'] = bastion_node_public_dns
self.ec2_settings['role_instance_profile'] = role_instance_profile
def set_node_configuration(self, master_instance_type=None,
slave_instance_type=None,
initial_nodes=None,
max_nodes=None,
slave_request_type=None,
fallback_to_ondemand=None):
self.node_configuration['master_instance_type'] = master_instance_type
self.node_configuration['slave_instance_type'] = slave_instance_type
self.node_configuration['initial_nodes'] = initial_nodes
self.node_configuration['max_nodes'] = max_nodes
self.node_configuration['slave_request_type'] = slave_request_type
self.node_configuration['fallback_to_ondemand'] = fallback_to_ondemand
def set_hadoop_settings(self, custom_config=None,
use_hbase=None,
custom_ec2_tags=None,
use_hadoop2=None,
use_spark=None,
use_qubole_placement_policy=None,
is_ha=None):
self.hadoop_settings['custom_config'] = custom_config
self.hadoop_settings['use_hbase'] = use_hbase
self.hadoop_settings['use_hadoop2'] = use_hadoop2
self.hadoop_settings['use_spark'] = use_spark
self.hadoop_settings['use_qubole_placement_policy'] = use_qubole_placement_policy
self.hadoop_settings['is_ha'] = is_ha
if custom_ec2_tags and custom_ec2_tags.strip():
try:
self.hadoop_settings['custom_ec2_tags'] = json.loads(custom_ec2_tags.strip())
except Exception as e:
raise Exception("Invalid JSON string for custom ec2 tags: %s" % e.message)
def set_spot_instance_settings(self, maximum_bid_price_percentage=None,
timeout_for_request=None,
maximum_spot_instance_percentage=None):
self.node_configuration['spot_instance_settings'] = {
'maximum_bid_price_percentage': maximum_bid_price_percentage,
'timeout_for_request': timeout_for_request,
'maximum_spot_instance_percentage': maximum_spot_instance_percentage}
def set_stable_spot_instance_settings(self, maximum_bid_price_percentage=None,
timeout_for_request=None,
allow_fallback=True):
self.node_configuration['stable_spot_instance_settings'] = {
'maximum_bid_price_percentage': maximum_bid_price_percentage,
'timeout_for_request': timeout_for_request,
'allow_fallback': allow_fallback}
def set_ebs_volume_settings(self, ebs_volume_count=None,
ebs_volume_type=None,
ebs_volume_size=None):
self.node_configuration['ebs_volume_count'] = ebs_volume_count
self.node_configuration['ebs_volume_type'] = ebs_volume_type
self.node_configuration['ebs_volume_size'] = ebs_volume_size
def set_fairscheduler_settings(self, fairscheduler_config_xml=None,
default_pool=None):
self.hadoop_settings['fairscheduler_settings'] = {
'fairscheduler_config_xml': fairscheduler_config_xml,
'default_pool': default_pool}
def set_security_settings(self,
encrypted_ephemerals=None,
ssh_public_key=None,
persistent_security_group=None):
self.security_settings['encrypted_ephemerals'] = encrypted_ephemerals
self.security_settings['ssh_public_key'] = ssh_public_key
self.security_settings['persistent_security_group'] = persistent_security_group
def set_presto_settings(self, enable_presto=None, presto_custom_config=None):
self.presto_settings['enable_presto'] = enable_presto
self.presto_settings['custom_config'] = presto_custom_config
def minimal_payload(self):
"""
This method can be used to create the payload which is sent while
creating or updating a cluster.
"""
payload_dict = self.__dict__
payload_dict.pop("api_version", None)
return util._make_minimal(payload_dict)
|
|
import cStringIO, struct, socket
###
from rtypes import *
from misc import FunctionMapper
from rexceptions import RResponseError, REvalError
from taggedContainers import TaggedList, asTaggedArray, asAttrArray
DEBUG = False
class Lexeme(list):
def __init__(self, rTypeCode, length, hasAttr, lexpos):
list.__init__(self, [rTypeCode, length, hasAttr, lexpos])
self.rTypeCode = rTypeCode
self.length = length
self.hasAttr = hasAttr
self.lexpos = lexpos
self.attrLexeme = None
def setAttr(self, attrLexeme):
self.attrLexeme = attrLexeme
@property
def attr(self):
return self.attrLexeme.data if self.attrLexeme else None
@property
def attrLength(self):
return self.attrLexeme.length
@property
def attrTypeCode(self):
return self.attrLexeme.rTypeCode
@property
def dataLength(self):
'Return length (in bytes) of actual REXPR data body'
if self.hasAttr:
if not self.attrLexeme:
raise RuntimeError('Attribute lexeme not yet set')
return self.length - self.attrLength - 4 # also subtract size of REXP header=4
else:
return self.length
def __str__(self):
return 'Typecode: %s Length: %s hasAttr: %s, Lexpos: %d' % \
(hex(self.rTypeCode), self.length, self.hasAttr, self.lexpos)
class EndOfDataError(RserveError):
pass
class Lexer(object):
#
lexerMap = {}
fmap = FunctionMapper(lexerMap)
#
def __init__(self, src):
'''
@param src: Either a string, a file object, a socket - all providing valid binary r data
'''
try:
# this only works for objects implementing the buffer protocol, e.g. strings, arrays, ...
self.fp = cStringIO.StringIO(src)
except TypeError:
if isinstance(src, socket._socketobject):
self.fp = src.makefile()
else:
self.fp = src
def readHeader(self):
'''
Called initially when reading fresh data from an input source (file or socket).
Reads header which contains data like response/error code and size of data entire package.
'''
self.lexpos = 0
# First three bytes encode a 24bit response code, add an additional zero bytes and convert it:
self.responseCode = struct.unpack('<i', self.read(3) + '\x00')[0]
if self.responseCode == RESP_OK:
self.responseOK = True
elif self.responseCode == RESP_ERR:
self.responseOK = False
else:
self.clearSocketData()
raise ValueError('Received illegal response code (%x)' % self.responseCode)
self.errCode = self.__unpack(XT_BYTE)
self.messageSize = self.__unpack(XT_INT)
self.read(8) # read additional 8 bytes from header -- CLEARIFY THIS!!
if DEBUG:
print 'response ok? %s (responseCode=%x), error-code: %x, message size: %d' % \
(self.responseOK, self.responseCode, self.errCode, self.messageSize)
return self.messageSize
def clearSocketData(self):
'''
If for any reason the parsing process returns an error, make sure that all data from
a socket is removed to avoid data pollution with further parsing attempts.
This should only be called after self.readHeader() has been executed.
'''
if not hasattr(self.fp, '_sock'):
# probably not a socket. Nothing to do here.
return
self.fp._sock.setblocking(0)
try:
while 1:
self.fp.read(SOCKET_BLOCK_SIZE)
except:
pass
finally:
self.fp._sock.setblocking(1)
def read(self, length):
'''
Reads number of bytes from input data source (file or socket). If end of data is
reached it raises EndOfDataError().
'''
if length==0:
# this can happen if an empty string is read from the data source
data = ''
else:
self.lexpos += length
data = self.fp.read(length)
if len(data) == 0:
raise EndOfDataError()
return data
def __unpack(self, tCode, num=None):
'''
Reads 'num' (atomic) data items from the input source and converts them into a list
of python objects. Byteswapping for numeric data will be done.
'''
structCode = structMap[tCode] if type(tCode)==int else tCode
# All data from Rserve is stored in little-endian format!
fmt = '<' + str(num) + structCode if (num is not None) else '<' + structCode
if tCode == XT_INT3:
length = 3
rawData = self.read(length) + '\x00'
else:
length = struct.calcsize(fmt or 1)
rawData = self.read(length)
d = struct.unpack(fmt, rawData)
return d[0] if num is None else list(d)
def nextExprHdr(self):
'''
From the input file/socket determine the type of the next data item, and its length.
This method can be applied to read the
- entire data header (containing one of the DT_* codes)
- an REXPR header
'''
startLexpos = self.lexpos
_rTypeCode = self.__unpack('B') # unsigned byte!
rTypeCode = _rTypeCode & (0xFF - XT_HAS_ATTR) # remove XT_HAS_ATTR flag (if it exists)
hasAttr = (_rTypeCode & XT_HAS_ATTR) != 0 # extract XT_HAS_ATTR flag (if it exists)
length = self.__unpack(XT_INT3)
if not rTypeCode in VALID_R_TYPES:
raise RParserError("Invalid token %s found at lexpos %d, length %d" %
(hex(rTypeCode), startLexpos, length))
return Lexeme(rTypeCode, length, hasAttr, startLexpos)
def nextExprData(self, lexeme):
'''
Reads next data item from binary r data and transforms it into a python object.
'''
return self.lexerMap[lexeme.rTypeCode](self, lexeme)
####################################################################################
@fmap(XT_INT, XT_DOUBLE)
def xt_atom(self, lexeme):
raw = self.read(lexeme.dataLength)
return struct.unpack('<%s' % structMap[lexeme.rTypeCode], raw)[0]
@fmap(XT_BOOL)
def xt_bool(self, lexeme):
raw = self.read(lexeme.dataLength)
# a boolean is stored in a 4 bytes word, but only the first byte is significant:
b = struct.unpack('<%s' % structMap[XT_BOOL], raw[0])[0]
# b can be 2, meaning NA. Otherwise transform 0/1 into False/True
return None if b==2 else b==1
@fmap(XT_ARRAY_INT, XT_ARRAY_DOUBLE, XT_ARRAY_CPLX)
def xt_array_numeric(self, lexeme):
raw = self.read(lexeme.dataLength)
# TODO: swapping...
data = numpy.fromstring(raw, dtype=numpyMap[lexeme.rTypeCode])
return data
@fmap(XT_ARRAY_BOOL)
def xt_array_bool(self, lexeme):
"""A boolean array consists of a 4-byte word (i.e. integer) determining the number of boolean values
in the following dataLength-4 bytes.
E.g. a bool array of one TRUE item looks like:
01 00 00 00 01 ff ff ff
The first 01 value tells that there is one bool value in the array.
The other 01 is the TRUE value, the other 3 'ff's are padding bytes. Those will be used if the vector
has 2,3 or 4 boolean values. For a fifth value another 4 bytes are appended.
"""
numBools = self.__unpack(XT_INT, 1)[0]
raw = self.read(lexeme.dataLength-4) # read the actual boolean values, including padding bytes
data = numpy.fromstring(raw[:numBools], dtype=numpyMap[lexeme.rTypeCode])
return data
@fmap(XT_ARRAY_STR)
def xt_array_str(self, lexeme):
'''
An array of one or more null-terminated strings.
The XT_ARRAY_STR can contain trailing chars \x01 which need to be chopped off.
'''
if lexeme.dataLength == 0:
return ''
raw = self.read(lexeme.dataLength)
data = raw.split('\0')[:-1]
return numpy.array(data)
@fmap(XT_STR, XT_SYMNAME)
def xt_symname(self, lexeme):
'''
A null-terminated string.
It's length an be larger than the actual string, it is always a multiple of 4.
The rest is filled with trailing \0s which need to be chopped off.
'''
raw = self.read(lexeme.dataLength)
return raw.split('\0')[0]
@fmap(XT_NULL)
def xt_null(self, lexeme):
return None
@fmap(XT_UNKNOWN)
def xt_unknown(self, lexeme):
return self.__unpack(XT_INT)
@fmap(XT_RAW)
def xt_raw(self, lexeme):
self.__unpack(XT_INT)
return self.read(lexeme.dataLength - 4)
class RParser(object):
#
parserMap = {}
fmap = FunctionMapper(parserMap)
#
def __init__(self, src, atomicArray):
'''
@param atomicArray: if False parsing arrays with only one element will just return this element
'''
self.lexer = Lexer(src)
self.atomicArray = atomicArray
def __getitem__(self, key):
return self.parserMap[key]
def __getattr__(self, attr):
if attr in ['messageSize']:
return getattr(self.lexer, attr)
else:
raise AttributeError(attr)
@property
def __ind(self):
# return string with number of spaces appropriate for current indentation level
return self.indentLevel*4*' '
def _debugLog(self, lexeme, isRexpr=True):
if DEBUG:
l = lexeme
typeCodeDict = XTs if isRexpr else DTs
print '%s %s (%s), hasAttr=%s, lexpos=%d, length=%s' % \
(self.__ind, typeCodeDict[l.rTypeCode], hex(l.rTypeCode),
l.hasAttr, l.lexpos, l.length)
def parse(self):
'''
@brief parse data stream and return result converted into python data structure
'''
self.indentLevel = 1
self.lexer.readHeader()
if self.lexer.messageSize > 0:
try:
return self._parse()
except:
# If any error is raised during lexing and parsing, make sure that the entire data
# is read from the input source if it is a socket, otherwise following attempts to
# parse again from a socket will return polluted data:
self.lexer.clearSocketData()
raise
elif not self.lexer.responseOK:
try:
rserve_err_msg = ERRORS[self.lexer.errCode]
except KeyError:
raise REvalError("R evaluation error (code=%d)" % self.lexer.errCode)
else:
raise RResponseError('Response error %s (error code=%d)' %
(rserve_err_msg, self.lexer.errCode))
def _parse(self):
dataLexeme = self.lexer.nextExprHdr()
self._debugLog(dataLexeme, isRexpr=False)
if dataLexeme.rTypeCode == DT_SEXP:
return self._stripArray(self._parseExpr().data)
else:
raise NotImplementedError()
def _parseExpr(self):
self.indentLevel += 1
lexeme = self.lexer.nextExprHdr()
self._debugLog(lexeme)
if lexeme.hasAttr:
self.indentLevel += 1
if DEBUG:
print '%s Attribute:' % self.__ind
lexeme.setAttr(self._parseExpr())
self.indentLevel -= 1
lexeme.data = self.parserMap.get(lexeme.rTypeCode, self[None])(self, lexeme)
self.indentLevel -= 1
return lexeme
def _nextExprData(self, lexeme):
lexpos = self.lexer.lexpos
data = self.lexer.nextExprData(lexeme)
if DEBUG:
print '%s data-lexpos: %d, data-length: %d' % (self.__ind, lexpos, lexeme.dataLength)
print '%s data: %s' % (self.__ind, repr(data))
return data
def _stripArray(self, data):
# if data is a plain numpy array, and has only one element, just extract and return this
if data.__class__ == numpy.ndarray and len(data) == 1 and not self.atomicArray:
# if requested, return singular element of numpy-array.
# this does not apply for arrays with attributes (__class__ would be TaggedArray)!
data = data[0]
return data
@fmap(None)
def xt_(self, lexeme):
'apply this for atomic data'
return self._nextExprData(lexeme)
@fmap(XT_ARRAY_BOOL, XT_ARRAY_INT, XT_ARRAY_DOUBLE, XT_ARRAY_STR)
def xt_array(self, lexeme):
data = self._nextExprData(lexeme) # converts lexeme into a numpy array
if lexeme.hasAttr and lexeme.attrTypeCode == XT_LIST_TAG:
for tag, value in lexeme.attr:
if tag == 'dim':
# the array has a defined shape
data.shape = value
elif tag == 'names':
# convert numpy-vector 'value' into list to make TaggedArray work properly:
data = asTaggedArray(data, list(value))
else:
# there are additional tags in the attribute, just collect them in a dictionary
# attached to the array.
try:
data.attr[tag] = value
except AttributeError:
data = asAttrArray(data, {tag: value})
return data
@fmap(XT_VECTOR, XT_LANG_NOTAG, XT_LIST_NOTAG)
def xt_vector(self, lexeme):
'''
A vector is e.g. return when sending "list('abc','def')" to R. It can contain mixed
types of data items.
The binary representation of an XT_VECTOR is weird: a vector contains unknown number
of items, with possibly variable length.
The end of this REXP can only be detected by keeping track of how many bytes
have been consumed (lexeme.length!) until the end of the REXP has been reached.
'''
finalLexpos = self.lexer.lexpos + lexeme.dataLength
if DEBUG:
print '%s Vector-lexpos: %d, length %d, finished at: %d' % \
(self.__ind, self.lexer.lexpos, lexeme.dataLength, finalLexpos)
data = []
while self.lexer.lexpos < finalLexpos:
# convert single item arrays into atoms (via stripArray)
data.append(self._stripArray(self._parseExpr().data))
if lexeme.hasAttr and lexeme.attrTypeCode == XT_LIST_TAG:
for tag, value in lexeme.attr:
if tag == 'names':
# the vector has named items
data = TaggedList(zip(value, data))
else:
if DEBUG:
print 'Warning: applying LIST_TAG "%s" on xt_vector not yet implemented' % tag
return data
@fmap(XT_LIST_TAG, XT_LANG_TAG)
def xt_list_tag(self, lexeme):
# a xt_list_tag usually occurrs as an attribute of a vector or list (like for a tagged list)
finalLexpos = self.lexer.lexpos + lexeme.dataLength
r = []
while self.lexer.lexpos < finalLexpos:
value, tag = self._parseExpr().data, self._parseExpr().data
# reverse order of tag and value when adding it to result list
r.append((tag, value))
return r
@fmap(XT_CLOS)
def xt_closure(self, lexeme):
# read entire data provided for closure even though we don't know what to do with
# it on the Python side ;-)
aList1 = self._parseExpr().data
aList2 = self._parseExpr().data
# Some closures seem to provide their sourcecode in an attrLexeme, but some don't.
#return Closure(lexeme.attrLexeme.data[0][1])
# So for now let's just return the entire parse tree in a Closure class.
return Closure(lexeme, aList1, aList2)
########################################################################################
def rparse(src, atomicArray=False):
rparser = RParser(src, atomicArray)
return rparser.parse()
########################################################################################
class Closure(object):
'Very simple container to return "something" for a closure. Not really usable in Python though.'
def __init__(self, lexeme, aList1, aList2):
self.lexeme = lexeme
self.aList1 = aList1
self.aList2 = aList2
def __repr__(self):
return '<Closure instance %d>' % id(self)
|
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A decoder that performs beam search."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import numpy as np
from tensorflow.contrib.seq2seq.python.ops import beam_search_ops
from tensorflow.contrib.seq2seq.python.ops import decoder
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.layers import base as layers_base
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import embedding_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import rnn_cell_impl
from tensorflow.python.ops import tensor_array_ops
from tensorflow.python.platform import tf_logging
from tensorflow.python.util import nest
__all__ = [
"BeamSearchDecoderOutput",
"BeamSearchDecoderState",
"BeamSearchDecoder",
"FinalBeamSearchDecoderOutput",
"tile_batch",
]
class BeamSearchDecoderState(
collections.namedtuple("BeamSearchDecoderState",
("cell_state", "log_probs", "finished", "lengths"))):
pass
class BeamSearchDecoderOutput(
collections.namedtuple("BeamSearchDecoderOutput",
("scores", "predicted_ids", "parent_ids"))):
pass
class FinalBeamSearchDecoderOutput(
collections.namedtuple("FinalBeamDecoderOutput",
["predicted_ids", "beam_search_decoder_output"])):
"""Final outputs returned by the beam search after all decoding is finished.
Args:
predicted_ids: The final prediction. A tensor of shape
`[batch_size, T, beam_width]` (or `[T, batch_size, beam_width]` if
`output_time_major` is True). Beams are ordered from best to worst.
beam_search_decoder_output: An instance of `BeamSearchDecoderOutput` that
describes the state of the beam search.
"""
pass
def _tile_batch(t, multiplier):
"""Core single-tensor implementation of tile_batch."""
t = ops.convert_to_tensor(t, name="t")
shape_t = array_ops.shape(t)
if t.shape.ndims is None or t.shape.ndims < 1:
raise ValueError("t must have statically known rank")
tiling = [1] * (t.shape.ndims + 1)
tiling[1] = multiplier
tiled_static_batch_size = (
t.shape[0].value * multiplier if t.shape[0].value is not None else None)
tiled = array_ops.tile(array_ops.expand_dims(t, 1), tiling)
tiled = array_ops.reshape(tiled,
array_ops.concat(
([shape_t[0] * multiplier], shape_t[1:]), 0))
tiled.set_shape(
tensor_shape.TensorShape([tiled_static_batch_size]).concatenate(
t.shape[1:]))
return tiled
def tile_batch(t, multiplier, name=None):
"""Tile the batch dimension of a (possibly nested structure of) tensor(s) t.
For each tensor t in a (possibly nested structure) of tensors,
this function takes a tensor t shaped `[batch_size, s0, s1, ...]` composed of
minibatch entries `t[0], ..., t[batch_size - 1]` and tiles it to have a shape
`[batch_size * multiplier, s0, s1, ...]` composed of minibatch entries
`t[0], t[0], ..., t[1], t[1], ...` where each minibatch entry is repeated
`multiplier` times.
Args:
t: `Tensor` shaped `[batch_size, ...]`.
multiplier: Python int.
name: Name scope for any created operations.
Returns:
A (possibly nested structure of) `Tensor` shaped
`[batch_size * multiplier, ...]`.
Raises:
ValueError: if tensor(s) `t` do not have a statically known rank or
the rank is < 1.
"""
flat_t = nest.flatten(t)
with ops.name_scope(name, "tile_batch", flat_t + [multiplier]):
return nest.map_structure(lambda t_: _tile_batch(t_, multiplier), t)
def gather_tree_from_array(t, parent_ids, sequence_length):
"""Calculates the full beams for `TensorArray`s.
Args:
t: A stacked `TensorArray` of size `max_time` that contains `Tensor`s of
shape `[batch_size, beam_width, s]` or `[batch_size * beam_width, s]`
where `s` is the depth shape.
parent_ids: The parent ids of shape `[max_time, batch_size, beam_width]`.
sequence_length: The sequence length of shape `[batch_size, beam_width]`.
Returns:
A `Tensor` which is a stacked `TensorArray` of the same size and type as
`t` and where beams are sorted in each `Tensor` according to `parent_ids`.
"""
max_time = parent_ids.shape[0].value or array_ops.shape(parent_ids)[0]
batch_size = parent_ids.shape[1].value or array_ops.shape(parent_ids)[1]
beam_width = parent_ids.shape[2].value or array_ops.shape(parent_ids)[2]
# Generate beam ids that will be reordered by gather_tree.
beam_ids = array_ops.expand_dims(
array_ops.expand_dims(math_ops.range(beam_width), 0), 0)
beam_ids = array_ops.tile(beam_ids, [max_time, batch_size, 1])
mask = array_ops.sequence_mask(
sequence_length, maxlen=max_time, dtype=dtypes.int32)
mask = array_ops.transpose(mask, perm=[2, 0, 1])
# Use beam_width + 1 to mark the end of beam.
masked_beam_ids = (beam_ids * mask) + (1 - mask) * (beam_width + 1)
max_sequence_lengths = math_ops.to_int32(
math_ops.reduce_max(sequence_length, axis=1))
sorted_beam_ids = beam_search_ops.gather_tree(
step_ids=masked_beam_ids,
parent_ids=parent_ids,
max_sequence_lengths=max_sequence_lengths,
end_token=beam_width + 1)
# For out of range steps, simply copy the same beam.
sorted_beam_ids = array_ops.where(
math_ops.cast(mask, dtypes.bool), x=sorted_beam_ids, y=beam_ids)
# Generate indices for gather_nd.
time_ind = array_ops.tile(array_ops.reshape(
math_ops.range(max_time), [-1, 1, 1]), [1, batch_size, beam_width])
batch_ind = array_ops.tile(array_ops.reshape(
math_ops.range(batch_size), [-1, 1, 1]), [1, max_time, beam_width])
batch_ind = array_ops.transpose(batch_ind, perm=[1, 0, 2])
indices = array_ops.stack([time_ind, batch_ind, sorted_beam_ids], -1)
# Gather from a tensor with collapsed additional dimensions.
gather_from = t
final_shape = array_ops.shape(gather_from)
gather_from = array_ops.reshape(
gather_from, [max_time, batch_size, beam_width, -1])
ordered = array_ops.gather_nd(gather_from, indices)
ordered = array_ops.reshape(ordered, final_shape)
return ordered
def _check_maybe(t):
if t.shape.ndims is None:
raise ValueError(
"Expected tensor (%s) to have known rank, but ndims == None." % t)
def _check_static_batch_beam_maybe(shape, batch_size, beam_width):
"""Raises an exception if dimensions are known statically and can not be
reshaped to [batch_size, beam_size, -1].
"""
reshaped_shape = tensor_shape.TensorShape([batch_size, beam_width, None])
if (batch_size is not None and shape[0].value is not None
and (shape[0] != batch_size * beam_width
or (shape.ndims >= 2 and shape[1].value is not None
and (shape[0] != batch_size or shape[1] != beam_width)))):
tf_logging.warn("TensorArray reordering expects elements to be "
"reshapable to %s which is incompatible with the "
"current shape %s. Consider setting "
"reorder_tensor_arrays to False to disable TensorArray "
"reordering during the beam search."
% (reshaped_shape, shape))
return False
return True
def _check_batch_beam(t, batch_size, beam_width):
"""Returns an Assert operation checking that the elements of the stacked
TensorArray can be reshaped to [batch_size, beam_size, -1]. At this point,
the TensorArray elements have a known rank of at least 1.
"""
error_message = ("TensorArray reordering expects elements to be "
"reshapable to [batch_size, beam_size, -1] which is "
"incompatible with the dynamic shape of %s elements. "
"Consider setting reorder_tensor_arrays to False to disable "
"TensorArray reordering during the beam search."
% (t.name))
rank = t.shape.ndims
shape = array_ops.shape(t)
if rank == 2:
condition = math_ops.equal(shape[1], batch_size * beam_width)
else:
condition = math_ops.logical_or(
math_ops.equal(shape[1], batch_size * beam_width),
math_ops.logical_and(
math_ops.equal(shape[1], batch_size),
math_ops.equal(shape[2], beam_width)))
return control_flow_ops.Assert(condition, [error_message])
class BeamSearchDecoder(decoder.Decoder):
"""BeamSearch sampling decoder.
**NOTE** If you are using the `BeamSearchDecoder` with a cell wrapped in
`AttentionWrapper`, then you must ensure that:
- The encoder output has been tiled to `beam_width` via
@{tf.contrib.seq2seq.tile_batch} (NOT `tf.tile`).
- The `batch_size` argument passed to the `zero_state` method of this
wrapper is equal to `true_batch_size * beam_width`.
- The initial state created with `zero_state` above contains a
`cell_state` value containing properly tiled final state from the
encoder.
An example:
```
tiled_encoder_outputs = tf.contrib.seq2seq.tile_batch(
encoder_outputs, multiplier=beam_width)
tiled_encoder_final_state = tf.contrib.seq2seq.tile_batch(
encoder_final_state, multiplier=beam_width)
tiled_sequence_length = tf.contrib.seq2seq.tile_batch(
sequence_length, multiplier=beam_width)
attention_mechanism = MyFavoriteAttentionMechanism(
num_units=attention_depth,
memory=tiled_inputs,
memory_sequence_length=tiled_sequence_length)
attention_cell = AttentionWrapper(cell, attention_mechanism, ...)
decoder_initial_state = attention_cell.zero_state(
dtype, batch_size=true_batch_size * beam_width)
decoder_initial_state = decoder_initial_state.clone(
cell_state=tiled_encoder_final_state)
```
"""
def __init__(self,
cell,
embedding,
start_tokens,
end_token,
initial_state,
beam_width,
output_layer=None,
length_penalty_weight=0.0,
reorder_tensor_arrays=True):
"""Initialize the BeamSearchDecoder.
Args:
cell: An `RNNCell` instance.
embedding: A callable that takes a vector tensor of `ids` (argmax ids),
or the `params` argument for `embedding_lookup`.
start_tokens: `int32` vector shaped `[batch_size]`, the start tokens.
end_token: `int32` scalar, the token that marks end of decoding.
initial_state: A (possibly nested tuple of...) tensors and TensorArrays.
beam_width: Python integer, the number of beams.
output_layer: (Optional) An instance of `tf.layers.Layer`, i.e.,
`tf.layers.Dense`. Optional layer to apply to the RNN output prior
to storing the result or sampling.
length_penalty_weight: Float weight to penalize length. Disabled with 0.0.
reorder_tensor_arrays: If `True`, `TensorArray`s' elements within the cell
state will be reordered according to the beam search path. If the
`TensorArray` can be reordered, the stacked form will be returned.
Otherwise, the `TensorArray` will be returned as is. Set this flag to
`False` if the cell state contains `TensorArray`s that are not amenable
to reordering.
Raises:
TypeError: if `cell` is not an instance of `RNNCell`,
or `output_layer` is not an instance of `tf.layers.Layer`.
ValueError: If `start_tokens` is not a vector or
`end_token` is not a scalar.
"""
rnn_cell_impl.assert_like_rnncell("cell", cell) # pylint: disable=protected-access
if (output_layer is not None and
not isinstance(output_layer, layers_base.Layer)):
raise TypeError(
"output_layer must be a Layer, received: %s" % type(output_layer))
self._cell = cell
self._output_layer = output_layer
self._reorder_tensor_arrays = reorder_tensor_arrays
if callable(embedding):
self._embedding_fn = embedding
else:
self._embedding_fn = (
lambda ids: embedding_ops.embedding_lookup(embedding, ids))
self._start_tokens = ops.convert_to_tensor(
start_tokens, dtype=dtypes.int32, name="start_tokens")
if self._start_tokens.get_shape().ndims != 1:
raise ValueError("start_tokens must be a vector")
self._end_token = ops.convert_to_tensor(
end_token, dtype=dtypes.int32, name="end_token")
if self._end_token.get_shape().ndims != 0:
raise ValueError("end_token must be a scalar")
self._batch_size = array_ops.size(start_tokens)
self._beam_width = beam_width
self._length_penalty_weight = length_penalty_weight
self._initial_cell_state = nest.map_structure(
self._maybe_split_batch_beams, initial_state, self._cell.state_size)
self._start_tokens = array_ops.tile(
array_ops.expand_dims(self._start_tokens, 1), [1, self._beam_width])
self._start_inputs = self._embedding_fn(self._start_tokens)
self._finished = array_ops.one_hot(
array_ops.zeros([self._batch_size], dtype=dtypes.int32),
depth=self._beam_width,
on_value=False,
off_value=True,
dtype=dtypes.bool)
@property
def batch_size(self):
return self._batch_size
def _rnn_output_size(self):
size = self._cell.output_size
if self._output_layer is None:
return size
else:
# To use layer's compute_output_shape, we need to convert the
# RNNCell's output_size entries into shapes with an unknown
# batch size. We then pass this through the layer's
# compute_output_shape and read off all but the first (batch)
# dimensions to get the output size of the rnn with the layer
# applied to the top.
output_shape_with_unknown_batch = nest.map_structure(
lambda s: tensor_shape.TensorShape([None]).concatenate(s), size)
layer_output_shape = self._output_layer.compute_output_shape(
output_shape_with_unknown_batch)
return nest.map_structure(lambda s: s[1:], layer_output_shape)
@property
def tracks_own_finished(self):
"""The BeamSearchDecoder shuffles its beams and their finished state.
For this reason, it conflicts with the `dynamic_decode` function's
tracking of finished states. Setting this property to true avoids
early stopping of decoding due to mismanagement of the finished state
in `dynamic_decode`.
Returns:
`True`.
"""
return True
@property
def output_size(self):
# Return the cell output and the id
return BeamSearchDecoderOutput(
scores=tensor_shape.TensorShape([self._beam_width]),
predicted_ids=tensor_shape.TensorShape([self._beam_width]),
parent_ids=tensor_shape.TensorShape([self._beam_width]))
@property
def output_dtype(self):
# Assume the dtype of the cell is the output_size structure
# containing the input_state's first component's dtype.
# Return that structure and int32 (the id)
dtype = nest.flatten(self._initial_cell_state)[0].dtype
return BeamSearchDecoderOutput(
scores=nest.map_structure(lambda _: dtype, self._rnn_output_size()),
predicted_ids=dtypes.int32,
parent_ids=dtypes.int32)
def initialize(self, name=None):
"""Initialize the decoder.
Args:
name: Name scope for any created operations.
Returns:
`(finished, start_inputs, initial_state)`.
"""
finished, start_inputs = self._finished, self._start_inputs
dtype = nest.flatten(self._initial_cell_state)[0].dtype
log_probs = array_ops.one_hot( # shape(batch_sz, beam_sz)
array_ops.zeros([self._batch_size], dtype=dtypes.int32),
depth=self._beam_width,
on_value=ops.convert_to_tensor(0.0, dtype=dtype),
off_value=ops.convert_to_tensor(-np.Inf, dtype=dtype),
dtype=dtype)
initial_state = BeamSearchDecoderState(
cell_state=self._initial_cell_state,
log_probs=log_probs,
finished=finished,
lengths=array_ops.zeros(
[self._batch_size, self._beam_width], dtype=dtypes.int64))
return (finished, start_inputs, initial_state)
def finalize(self, outputs, final_state, sequence_lengths):
"""Finalize and return the predicted_ids.
Args:
outputs: An instance of BeamSearchDecoderOutput.
final_state: An instance of BeamSearchDecoderState. Passed through to the
output.
sequence_lengths: An `int64` tensor shaped `[batch_size, beam_width]`.
The sequence lengths determined for each beam during decode.
**NOTE** These are ignored; the updated sequence lengths are stored in
`final_state.lengths`.
Returns:
outputs: An instance of `FinalBeamSearchDecoderOutput` where the
predicted_ids are the result of calling _gather_tree.
final_state: The same input instance of `BeamSearchDecoderState`.
"""
del sequence_lengths
# Get max_sequence_length across all beams for each batch.
max_sequence_lengths = math_ops.to_int32(
math_ops.reduce_max(final_state.lengths, axis=1))
predicted_ids = beam_search_ops.gather_tree(
outputs.predicted_ids,
outputs.parent_ids,
max_sequence_lengths=max_sequence_lengths,
end_token=self._end_token)
if self._reorder_tensor_arrays:
final_state = final_state._replace(cell_state=nest.map_structure(
lambda t: self._maybe_sort_array_beams(
t, outputs.parent_ids, final_state.lengths),
final_state.cell_state))
outputs = FinalBeamSearchDecoderOutput(
beam_search_decoder_output=outputs, predicted_ids=predicted_ids)
return outputs, final_state
def _merge_batch_beams(self, t, s=None):
"""Merges the tensor from a batch of beams into a batch by beams.
More exactly, t is a tensor of dimension [batch_size, beam_width, s]. We
reshape this into [batch_size*beam_width, s]
Args:
t: Tensor of dimension [batch_size, beam_width, s]
s: (Possibly known) depth shape.
Returns:
A reshaped version of t with dimension [batch_size * beam_width, s].
"""
if isinstance(s, ops.Tensor):
s = tensor_shape.as_shape(tensor_util.constant_value(s))
else:
s = tensor_shape.TensorShape(s)
t_shape = array_ops.shape(t)
static_batch_size = tensor_util.constant_value(self._batch_size)
batch_size_beam_width = (
None
if static_batch_size is None else static_batch_size * self._beam_width)
reshaped_t = array_ops.reshape(
t,
array_ops.concat(([self._batch_size * self._beam_width], t_shape[2:]),
0))
reshaped_t.set_shape(
(tensor_shape.TensorShape([batch_size_beam_width]).concatenate(s)))
return reshaped_t
def _split_batch_beams(self, t, s=None):
"""Splits the tensor from a batch by beams into a batch of beams.
More exactly, t is a tensor of dimension [batch_size*beam_width, s]. We
reshape this into [batch_size, beam_width, s]
Args:
t: Tensor of dimension [batch_size*beam_width, s].
s: (Possibly known) depth shape.
Returns:
A reshaped version of t with dimension [batch_size, beam_width, s].
Raises:
ValueError: If, after reshaping, the new tensor is not shaped
`[batch_size, beam_width, s]` (assuming batch_size and beam_width
are known statically).
"""
if isinstance(s, ops.Tensor):
s = tensor_shape.TensorShape(tensor_util.constant_value(s))
else:
s = tensor_shape.TensorShape(s)
t_shape = array_ops.shape(t)
reshaped_t = array_ops.reshape(
t,
array_ops.concat(([self._batch_size, self._beam_width], t_shape[1:]),
0))
static_batch_size = tensor_util.constant_value(self._batch_size)
expected_reshaped_shape = tensor_shape.TensorShape(
[static_batch_size, self._beam_width]).concatenate(s)
if not reshaped_t.shape.is_compatible_with(expected_reshaped_shape):
raise ValueError("Unexpected behavior when reshaping between beam width "
"and batch size. The reshaped tensor has shape: %s. "
"We expected it to have shape "
"(batch_size, beam_width, depth) == %s. Perhaps you "
"forgot to create a zero_state with "
"batch_size=encoder_batch_size * beam_width?" %
(reshaped_t.shape, expected_reshaped_shape))
reshaped_t.set_shape(expected_reshaped_shape)
return reshaped_t
def _maybe_split_batch_beams(self, t, s):
"""Maybe splits the tensor from a batch by beams into a batch of beams.
We do this so that we can use nest and not run into problems with shapes.
Args:
t: `Tensor`, either scalar or shaped `[batch_size * beam_width] + s`.
s: `Tensor`, Python int, or `TensorShape`.
Returns:
If `t` is a matrix or higher order tensor, then the return value is
`t` reshaped to `[batch_size, beam_width] + s`. Otherwise `t` is
returned unchanged.
Raises:
ValueError: If the rank of `t` is not statically known.
"""
if isinstance(t, tensor_array_ops.TensorArray):
return t
_check_maybe(t)
if t.shape.ndims >= 1:
return self._split_batch_beams(t, s)
else:
return t
def _maybe_merge_batch_beams(self, t, s):
"""Splits the tensor from a batch by beams into a batch of beams.
More exactly, `t` is a tensor of dimension `[batch_size * beam_width] + s`,
then we reshape it to `[batch_size, beam_width] + s`.
Args:
t: `Tensor` of dimension `[batch_size * beam_width] + s`.
s: `Tensor`, Python int, or `TensorShape`.
Returns:
A reshaped version of t with shape `[batch_size, beam_width] + s`.
Raises:
ValueError: If the rank of `t` is not statically known.
"""
if isinstance(t, tensor_array_ops.TensorArray):
return t
_check_maybe(t)
if t.shape.ndims >= 2:
return self._merge_batch_beams(t, s)
else:
return t
def _maybe_sort_array_beams(self, t, parent_ids, sequence_length):
"""Maybe sorts beams within a `TensorArray`.
Args:
t: A `TensorArray` of size `max_time` that contains `Tensor`s of shape
`[batch_size, beam_width, s]` or `[batch_size * beam_width, s]` where
`s` is the depth shape.
parent_ids: The parent ids of shape `[max_time, batch_size, beam_width]`.
sequence_length: The sequence length of shape `[batch_size, beam_width]`.
Returns:
A `TensorArray` where beams are sorted in each `Tensor` or `t` itself if
it is not a `TensorArray` or does not meet shape requirements.
"""
if not isinstance(t, tensor_array_ops.TensorArray):
return t
# pylint: disable=protected-access
if (not t._infer_shape or not t._element_shape
or t._element_shape[0].ndims is None
or t._element_shape[0].ndims < 1):
shape = (
t._element_shape[0] if t._infer_shape and t._element_shape
else tensor_shape.TensorShape(None))
tf_logging.warn("The TensorArray %s in the cell state is not amenable to "
"sorting based on the beam search result. For a "
"TensorArray to be sorted, its elements shape must be "
"defined and have at least a rank of 1, but saw shape: %s"
% (t.handle.name, shape))
return t
shape = t._element_shape[0]
# pylint: enable=protected-access
if not _check_static_batch_beam_maybe(
shape, tensor_util.constant_value(self._batch_size), self._beam_width):
return t
t = t.stack()
with ops.control_dependencies(
[_check_batch_beam(t, self._batch_size, self._beam_width)]):
return gather_tree_from_array(t, parent_ids, sequence_length)
def step(self, time, inputs, state, name=None):
"""Perform a decoding step.
Args:
time: scalar `int32` tensor.
inputs: A (structure of) input tensors.
state: A (structure of) state tensors and TensorArrays.
name: Name scope for any created operations.
Returns:
`(outputs, next_state, next_inputs, finished)`.
"""
batch_size = self._batch_size
beam_width = self._beam_width
end_token = self._end_token
length_penalty_weight = self._length_penalty_weight
with ops.name_scope(name, "BeamSearchDecoderStep", (time, inputs, state)):
cell_state = state.cell_state
inputs = nest.map_structure(
lambda inp: self._merge_batch_beams(inp, s=inp.shape[2:]), inputs)
cell_state = nest.map_structure(self._maybe_merge_batch_beams, cell_state,
self._cell.state_size)
cell_outputs, next_cell_state = self._cell(inputs, cell_state)
cell_outputs = nest.map_structure(
lambda out: self._split_batch_beams(out, out.shape[1:]), cell_outputs)
next_cell_state = nest.map_structure(
self._maybe_split_batch_beams, next_cell_state, self._cell.state_size)
if self._output_layer is not None:
cell_outputs = self._output_layer(cell_outputs)
beam_search_output, beam_search_state = _beam_search_step(
time=time,
logits=cell_outputs,
next_cell_state=next_cell_state,
beam_state=state,
batch_size=batch_size,
beam_width=beam_width,
end_token=end_token,
length_penalty_weight=length_penalty_weight)
finished = beam_search_state.finished
sample_ids = beam_search_output.predicted_ids
next_inputs = control_flow_ops.cond(
math_ops.reduce_all(finished), lambda: self._start_inputs,
lambda: self._embedding_fn(sample_ids))
return (beam_search_output, beam_search_state, next_inputs, finished)
def _beam_search_step(time, logits, next_cell_state, beam_state, batch_size,
beam_width, end_token, length_penalty_weight):
"""Performs a single step of Beam Search Decoding.
Args:
time: Beam search time step, should start at 0. At time 0 we assume
that all beams are equal and consider only the first beam for
continuations.
logits: Logits at the current time step. A tensor of shape
`[batch_size, beam_width, vocab_size]`
next_cell_state: The next state from the cell, e.g. an instance of
AttentionWrapperState if the cell is attentional.
beam_state: Current state of the beam search.
An instance of `BeamSearchDecoderState`.
batch_size: The batch size for this input.
beam_width: Python int. The size of the beams.
end_token: The int32 end token.
length_penalty_weight: Float weight to penalize length. Disabled with 0.0.
Returns:
A new beam state.
"""
static_batch_size = tensor_util.constant_value(batch_size)
# Calculate the current lengths of the predictions
prediction_lengths = beam_state.lengths
previously_finished = beam_state.finished
# Calculate the total log probs for the new hypotheses
# Final Shape: [batch_size, beam_width, vocab_size]
step_log_probs = nn_ops.log_softmax(logits)
step_log_probs = _mask_probs(step_log_probs, end_token, previously_finished)
total_probs = array_ops.expand_dims(beam_state.log_probs, 2) + step_log_probs
# Calculate the continuation lengths by adding to all continuing beams.
vocab_size = logits.shape[-1].value or array_ops.shape(logits)[-1]
lengths_to_add = array_ops.one_hot(
indices=array_ops.fill([batch_size, beam_width], end_token),
depth=vocab_size,
on_value=np.int64(0),
off_value=np.int64(1),
dtype=dtypes.int64)
add_mask = math_ops.to_int64(math_ops.logical_not(previously_finished))
lengths_to_add *= array_ops.expand_dims(add_mask, 2)
new_prediction_lengths = (
lengths_to_add + array_ops.expand_dims(prediction_lengths, 2))
# Calculate the scores for each beam
scores = _get_scores(
log_probs=total_probs,
sequence_lengths=new_prediction_lengths,
length_penalty_weight=length_penalty_weight)
time = ops.convert_to_tensor(time, name="time")
# During the first time step we only consider the initial beam
scores_flat = array_ops.reshape(scores, [batch_size, -1])
# Pick the next beams according to the specified successors function
next_beam_size = ops.convert_to_tensor(
beam_width, dtype=dtypes.int32, name="beam_width")
next_beam_scores, word_indices = nn_ops.top_k(scores_flat, k=next_beam_size)
next_beam_scores.set_shape([static_batch_size, beam_width])
word_indices.set_shape([static_batch_size, beam_width])
# Pick out the probs, beam_ids, and states according to the chosen predictions
next_beam_probs = _tensor_gather_helper(
gather_indices=word_indices,
gather_from=total_probs,
batch_size=batch_size,
range_size=beam_width * vocab_size,
gather_shape=[-1],
name="next_beam_probs")
# Note: just doing the following
# math_ops.to_int32(word_indices % vocab_size,
# name="next_beam_word_ids")
# would be a lot cleaner but for reasons unclear, that hides the results of
# the op which prevents capturing it with tfdbg debug ops.
raw_next_word_ids = math_ops.mod(
word_indices, vocab_size, name="next_beam_word_ids")
next_word_ids = math_ops.to_int32(raw_next_word_ids)
next_beam_ids = math_ops.to_int32(
word_indices / vocab_size, name="next_beam_parent_ids")
# Append new ids to current predictions
previously_finished = _tensor_gather_helper(
gather_indices=next_beam_ids,
gather_from=previously_finished,
batch_size=batch_size,
range_size=beam_width,
gather_shape=[-1])
next_finished = math_ops.logical_or(
previously_finished,
math_ops.equal(next_word_ids, end_token),
name="next_beam_finished")
# Calculate the length of the next predictions.
# 1. Finished beams remain unchanged.
# 2. Beams that are now finished (EOS predicted) have their length
# increased by 1.
# 3. Beams that are not yet finished have their length increased by 1.
lengths_to_add = math_ops.to_int64(math_ops.logical_not(previously_finished))
next_prediction_len = _tensor_gather_helper(
gather_indices=next_beam_ids,
gather_from=beam_state.lengths,
batch_size=batch_size,
range_size=beam_width,
gather_shape=[-1])
next_prediction_len += lengths_to_add
# Pick out the cell_states according to the next_beam_ids. We use a
# different gather_shape here because the cell_state tensors, i.e.
# the tensors that would be gathered from, all have dimension
# greater than two and we need to preserve those dimensions.
# pylint: disable=g-long-lambda
next_cell_state = nest.map_structure(
lambda gather_from: _maybe_tensor_gather_helper(
gather_indices=next_beam_ids,
gather_from=gather_from,
batch_size=batch_size,
range_size=beam_width,
gather_shape=[batch_size * beam_width, -1]),
next_cell_state)
# pylint: enable=g-long-lambda
next_state = BeamSearchDecoderState(
cell_state=next_cell_state,
log_probs=next_beam_probs,
lengths=next_prediction_len,
finished=next_finished)
output = BeamSearchDecoderOutput(
scores=next_beam_scores,
predicted_ids=next_word_ids,
parent_ids=next_beam_ids)
return output, next_state
def _get_scores(log_probs, sequence_lengths, length_penalty_weight):
"""Calculates scores for beam search hypotheses.
Args:
log_probs: The log probabilities with shape
`[batch_size, beam_width, vocab_size]`.
sequence_lengths: The array of sequence lengths.
length_penalty_weight: Float weight to penalize length. Disabled with 0.0.
Returns:
The scores normalized by the length_penalty.
"""
length_penalty_ = _length_penalty(
sequence_lengths=sequence_lengths, penalty_factor=length_penalty_weight)
return log_probs / length_penalty_
def _length_penalty(sequence_lengths, penalty_factor):
"""Calculates the length penalty. See https://arxiv.org/abs/1609.08144.
Returns the length penalty tensor:
```
[(5+sequence_lengths)/6]**penalty_factor
```
where all operations are performed element-wise.
Args:
sequence_lengths: `Tensor`, the sequence lengths of each hypotheses.
penalty_factor: A scalar that weights the length penalty.
Returns:
If the penalty is `0`, returns the scalar `1.0`. Otherwise returns
the length penalty factor, a tensor with the same shape as
`sequence_lengths`.
"""
penalty_factor = ops.convert_to_tensor(penalty_factor, name="penalty_factor")
penalty_factor.set_shape(()) # penalty should be a scalar.
static_penalty = tensor_util.constant_value(penalty_factor)
if static_penalty is not None and static_penalty == 0:
return 1.0
return math_ops.div((5. + math_ops.to_float(sequence_lengths))
**penalty_factor, (5. + 1.)**penalty_factor)
def _mask_probs(probs, eos_token, finished):
"""Masks log probabilities.
The result is that finished beams allocate all probability mass to eos and
unfinished beams remain unchanged.
Args:
probs: Log probabilities of shape `[batch_size, beam_width, vocab_size]`
eos_token: An int32 id corresponding to the EOS token to allocate
probability to.
finished: A boolean tensor of shape `[batch_size, beam_width]` that
specifies which elements in the beam are finished already.
Returns:
A tensor of shape `[batch_size, beam_width, vocab_size]`, where unfinished
beams stay unchanged and finished beams are replaced with a tensor with all
probability on the EOS token.
"""
vocab_size = array_ops.shape(probs)[2]
# All finished examples are replaced with a vector that has all
# probability on EOS
finished_row = array_ops.one_hot(
eos_token,
vocab_size,
dtype=probs.dtype,
on_value=ops.convert_to_tensor(0., dtype=probs.dtype),
off_value=probs.dtype.min)
finished_probs = array_ops.tile(
array_ops.reshape(finished_row, [1, 1, -1]),
array_ops.concat([array_ops.shape(finished), [1]], 0))
finished_mask = array_ops.tile(
array_ops.expand_dims(finished, 2), [1, 1, vocab_size])
return array_ops.where(finished_mask, finished_probs, probs)
def _maybe_tensor_gather_helper(gather_indices, gather_from, batch_size,
range_size, gather_shape):
"""Maybe applies _tensor_gather_helper.
This applies _tensor_gather_helper when the gather_from dims is at least as
big as the length of gather_shape. This is used in conjunction with nest so
that we don't apply _tensor_gather_helper to inapplicable values like scalars.
Args:
gather_indices: The tensor indices that we use to gather.
gather_from: The tensor that we are gathering from.
batch_size: The batch size.
range_size: The number of values in each range. Likely equal to beam_width.
gather_shape: What we should reshape gather_from to in order to preserve the
correct values. An example is when gather_from is the attention from an
AttentionWrapperState with shape [batch_size, beam_width, attention_size].
There, we want to preserve the attention_size elements, so gather_shape is
[batch_size * beam_width, -1]. Then, upon reshape, we still have the
attention_size as desired.
Returns:
output: Gathered tensor of shape tf.shape(gather_from)[:1+len(gather_shape)]
or the original tensor if its dimensions are too small.
"""
if isinstance(gather_from, tensor_array_ops.TensorArray):
return gather_from
_check_maybe(gather_from)
if gather_from.shape.ndims >= len(gather_shape):
return _tensor_gather_helper(
gather_indices=gather_indices,
gather_from=gather_from,
batch_size=batch_size,
range_size=range_size,
gather_shape=gather_shape)
else:
return gather_from
def _tensor_gather_helper(gather_indices,
gather_from,
batch_size,
range_size,
gather_shape,
name=None):
"""Helper for gathering the right indices from the tensor.
This works by reshaping gather_from to gather_shape (e.g. [-1]) and then
gathering from that according to the gather_indices, which are offset by
the right amounts in order to preserve the batch order.
Args:
gather_indices: The tensor indices that we use to gather.
gather_from: The tensor that we are gathering from.
batch_size: The input batch size.
range_size: The number of values in each range. Likely equal to beam_width.
gather_shape: What we should reshape gather_from to in order to preserve the
correct values. An example is when gather_from is the attention from an
AttentionWrapperState with shape [batch_size, beam_width, attention_size].
There, we want to preserve the attention_size elements, so gather_shape is
[batch_size * beam_width, -1]. Then, upon reshape, we still have the
attention_size as desired.
name: The tensor name for set of operations. By default this is
'tensor_gather_helper'. The final output is named 'output'.
Returns:
output: Gathered tensor of shape tf.shape(gather_from)[:1+len(gather_shape)]
"""
with ops.name_scope(name, "tensor_gather_helper"):
range_ = array_ops.expand_dims(math_ops.range(batch_size) * range_size, 1)
gather_indices = array_ops.reshape(gather_indices + range_, [-1])
output = array_ops.gather(
array_ops.reshape(gather_from, gather_shape), gather_indices)
final_shape = array_ops.shape(gather_from)[:1 + len(gather_shape)]
static_batch_size = tensor_util.constant_value(batch_size)
final_static_shape = (
tensor_shape.TensorShape([static_batch_size]).concatenate(
gather_from.shape[1:1 + len(gather_shape)]))
output = array_ops.reshape(output, final_shape, name="output")
output.set_shape(final_static_shape)
return output
|
|
# Copyright 2008-2015 Nokia Solutions and Networks
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from robot.errors import DataError
class XmlElementHandler(object):
def __init__(self, execution_result, root_handler=None):
self._stack = [(root_handler or RootHandler(), execution_result)]
def start(self, elem):
handler, result = self._stack[-1]
handler = handler.get_child_handler(elem)
result = handler.start(elem, result)
self._stack.append((handler, result))
def end(self, elem):
handler, result = self._stack.pop()
handler.end(elem, result)
class _Handler(object):
def __init__(self):
self._child_handlers = dict((c.tag, c) for c in self._children())
def _children(self):
return []
def get_child_handler(self, elem):
try:
return self._child_handlers[elem.tag]
except KeyError:
raise DataError("Incompatible XML element '%s'." % elem.tag)
def start(self, elem, result):
return result
def end(self, elem, result):
pass
def _timestamp(self, elem, attr_name):
timestamp = elem.get(attr_name)
return timestamp if timestamp != 'N/A' else None
class RootHandler(_Handler):
def _children(self):
return [RobotHandler()]
class RobotHandler(_Handler):
tag = 'robot'
def start(self, elem, result):
generator = elem.get('generator', 'unknown').split()[0].upper()
result.generated_by_robot = generator == 'ROBOT'
return result
def _children(self):
return [RootSuiteHandler(), StatisticsHandler(), ErrorsHandler()]
class SuiteHandler(_Handler):
tag = 'suite'
def start(self, elem, result):
return result.suites.create(name=elem.get('name', ''),
source=elem.get('source'))
def _children(self):
return [DocHandler(), MetadataHandler(), SuiteStatusHandler(),
KeywordHandler(), TestCaseHandler(), self]
class RootSuiteHandler(SuiteHandler):
def start(self, elem, result):
result.suite.name = elem.get('name', '')
result.suite.source = elem.get('source')
return result.suite
def _children(self):
return SuiteHandler._children(self)[:-1] + [SuiteHandler()]
class TestCaseHandler(_Handler):
tag = 'test'
def start(self, elem, result):
return result.tests.create(name=elem.get('name', ''))
def _children(self):
return [DocHandler(), TagsHandler(), TimeoutHandler(),
TestStatusHandler(), KeywordHandler()]
class KeywordHandler(_Handler):
tag = 'kw'
def start(self, elem, result):
return result.keywords.create(kwname=elem.get('name', ''),
libname=elem.get('library', ''),
type=elem.get('type', 'kw'))
def _children(self):
return [DocHandler(), ArgumentsHandler(), AssignHandler(),
TagsHandler(), TimeoutHandler(), KeywordStatusHandler(),
MessageHandler(), self]
class MessageHandler(_Handler):
tag = 'msg'
def end(self, elem, result):
result.messages.create(elem.text or '',
elem.get('level', 'INFO'),
elem.get('html', 'no') == 'yes',
self._timestamp(elem, 'timestamp'))
class _StatusHandler(_Handler):
tag = 'status'
def _set_status(self, elem, result):
result.status = elem.get('status', 'FAIL')
def _set_message(self, elem, result):
result.message = elem.text or ''
def _set_times(self, elem, result):
result.starttime = self._timestamp(elem, 'starttime')
result.endtime = self._timestamp(elem, 'endtime')
class KeywordStatusHandler(_StatusHandler):
def end(self, elem, result):
self._set_status(elem, result)
self._set_times(elem, result)
if result.type == result.TEARDOWN_TYPE:
self._set_message(elem, result)
class SuiteStatusHandler(_StatusHandler):
def end(self, elem, result):
self._set_message(elem, result)
self._set_times(elem, result)
class TestStatusHandler(_StatusHandler):
def end(self, elem, result):
self._set_status(elem, result)
self._set_message(elem, result)
self._set_times(elem, result)
class DocHandler(_Handler):
tag = 'doc'
def end(self, elem, result):
result.doc = elem.text or ''
class MetadataHandler(_Handler):
tag = 'metadata'
def _children(self):
return [MetadataItemHandler()]
class MetadataItemHandler(_Handler):
tag = 'item'
def end(self, elem, result):
result.metadata[elem.get('name', '')] = elem.text or ''
class TagsHandler(_Handler):
tag = 'tags'
def _children(self):
return [TagHandler()]
class TagHandler(_Handler):
tag = 'tag'
def end(self, elem, result):
result.tags.add(elem.text or '')
class TimeoutHandler(_Handler):
tag = 'timeout'
def end(self, elem, result):
result.timeout = elem.get('value')
class AssignHandler(_Handler):
tag = 'assign'
def _children(self):
return [AssignVarHandler()]
class AssignVarHandler(_Handler):
tag = 'var'
def end(self, elem, result):
result.assign += (elem.text or '',)
class ArgumentsHandler(_Handler):
tag = 'arguments'
def _children(self):
return [ArgumentHandler()]
class ArgumentHandler(_Handler):
tag = 'arg'
def end(self, elem, result):
result.args += (elem.text or '',)
class ErrorsHandler(_Handler):
tag = 'errors'
def start(self, elem, result):
return result.errors
def _children(self):
return [MessageHandler()]
class StatisticsHandler(_Handler):
tag = 'statistics'
def get_child_handler(self, elem):
return self
|
|
import requests
from xml.etree import cElementTree as ElementTree # for zillow API
from .pyzillowerrors import ZillowError, ZillowFail, ZillowNoResults
from . import __version__
class ZillowWrapper(object):
"""This class provides an interface into the Zillow API.
An API key is required to create an instance of this class:
>>> from pyzillow.pyzillow import ZillowWrapper
>>> zillow_data = ZillowWrapper(YOUR_ZILLOW_API_KEY)
To request data from Zillow, you can choose between:
1. The GetDeepSearchResults API endpoint (:class:`pyzillow.pyzillow.GetDeepSearchResults`)
which requires the following arguments:
* A street address (e.g. ``'2114 Bigelow Ave'``)
* A ZIP code or city and state combination (e.g. ``'98109'`` or ``'Seattle, WA'``)
* Optional: Enabling or disabling Zillow Rentzestimate information in
API results (``True``/``False``)
Example:
>>> from pyzillow.pyzillow import ZillowWrapper, GetDeepSearchResults
>>> zillow_data = ZillowWrapper(YOUR_ZILLOW_API_KEY)
>>> deep_search_response = zillow_data.get_deep_search_results(address,
zipcode,
rentzestimate)
>>> result = GetDeepSearchResults(deep_search_response)
2. The GetUpdatedPropertyDetails API endpoint
(:class:`pyzillow.pyzillow.GetUpdatedPropertyDetails`) which requires a
Zillow Property ID (ZPID) as an argument. You can acquire this identifier by
accessing ``.zillow_id`` from a :class:`pyzillow.pyzillow.GetDeepSearchResults`
object. GetUpdatedPropertyDetails data is not available for all valid Zillow IDs.
Example:
>>> from pyzillow.pyzillow import ZillowWrapper, GetUpdatedPropertyDetails
>>> zillow_data = ZillowWrapper(YOUR_ZILLOW_API_KEY)
>>> updated_property_details_response = \
zillow_data.get_updated_property_details(zillow_id)
>>> result = GetUpdatedPropertyDetails(updated_property_details_response)
"""
def __init__(self, api_key: str = None):
"""Constructor method
"""
self.api_key = api_key
def get_deep_search_results(
self, address: str, zipcode: str, rentzestimate: bool = False
):
"""This method provides results from the GetDeepSearchResults API endpoint as an XML object.
:param address: Street address to look up
:type address: str
:param zipcode: ZIP code to look up
:type zipcode: str
:param rentzestimate: Add Rent Zestimate information to result (True/False),
defaults to False
:type rentzestimate: bool, optional
:return: Result from API query
:rtype: xml.etree.ElementTree.Element
"""
url = "http://www.zillow.com/webservice/GetDeepSearchResults.htm"
params = {
"address": address,
"citystatezip": zipcode,
"rentzestimate": str(rentzestimate).lower(),
"zws-id": self.api_key,
}
return self.get_data(url, params)
def get_updated_property_details(self, zpid: str):
"""This method provides results from the GetUpdatedPropertyDetails API endpoint as an XML object.
:param zpid: Zillow Web Service Identifier
:type zpid: str
:return: Result from API query
:rtype: xml.etree.ElementTree.Element
"""
url = "http://www.zillow.com/webservice/GetUpdatedPropertyDetails.htm"
params = {"zpid": zpid, "zws-id": self.api_key}
return self.get_data(url, params)
def get_data(self, url: str, params: dict):
"""This method requests data from the API endpoint specified in the url argument.
It uses parameters from the params argument.
:param url: URL of API endpoint
:type url: str
:param params: Parameters for API query
:type params: dict
:raises ZillowFail: The API endpoint could not be reached or the request
did not return valid XML
:raises ZillowError: The API endpoint responded with an error code
:raises ZillowNoResults: The request did not return any results
:return: Result from API query
:rtype: xml.etree.ElementTree.Element
"""
try:
request = requests.get(
url=url,
params=params,
headers={
"User-Agent": "".join(["pyzillow/", __version__, " (Python)"])
},
)
except (
requests.exceptions.ConnectionError,
requests.exceptions.TooManyRedirects,
requests.exceptions.Timeout,
):
raise ZillowFail
try:
request.raise_for_status()
except requests.exceptions.HTTPError:
raise ZillowFail
try:
response = ElementTree.fromstring(request.text)
except ElementTree.ParseError:
print("Zillow response is not a valid XML ({})".format(params["address"]))
raise ZillowFail
if response.findall("message/code")[0].text != "0":
raise ZillowError(int(str(response.findall("message/code")[0].text)))
else:
if not response.findall("response"):
print("Zillow returned no results for ({})".format(params["address"]))
raise ZillowNoResults
return response
class ZillowResults(object):
"""Base class for :class:`pyzillow.pyzillow.GetDeepSearchResults`
and :class:`pyzillow.pyzillow.GetUpdatedPropertyDetails`.
"""
def __init__(self):
self.attribute_mapping = {}
def get_attr(self, attr):
"""
"""
try:
return self.data.find(self.attribute_mapping[attr]).text
except AttributeError:
return None
def __str__(self):
return self.zillow_id
@property
def area_unit(self):
"""
lotSizeSqFt
"""
return u"SqFt"
@property
def last_sold_price_currency(self):
"""
lastSoldPrice currency
"""
return self.data.find(self.attribute_mapping["last_sold_price"]).attrib[
"currency"
]
class GetDeepSearchResults(ZillowResults):
"""Maps results from the XML data array into attributes of an instance of GetDeepSearchResults.
An instance of ``GetDeepSearchResults`` has the following attributes:
``.bathrooms``
``.bedrooms``
``.city``
``.fips_county``
``.graph_data_link``
``.home_detail_link``
``.home_size``
``.home_type``
``.last_sold_date``
``.last_sold_price``
``.latitude``
``.longitude``
``.map_this_home_link``
``.property_size``
``.rentzestimate_amount``
``.rentzestimate_last_updated``
``.rentzestimate_valuation_range_high``
``.rentzestimate_valuation_range_low``
``.rentzestimate_value_change``
``.state``
``.street``
``.tax_value``
``.tax_year``
``.total_rooms``
``.use_code``
``.year_built``
``.zestimate_amount``
``.zestimate_last_updated``
``.zestimate_percentile``
``.zestimate_valuation_range_high``
``.zestimate_valuation_range_low``
``.zestimate_value_change``
``.zillow_id``
``.zipcode``
"""
attribute_mapping = {
"bathrooms": "result/bathrooms",
"bedrooms": "result/bedrooms",
"city": "result/address/city",
"fips_county": "result/FIPScounty",
"graph_data_link": "result/links/graphsanddata",
"home_detail_link": "result/links/homedetails",
"home_size": "result/finishedSqFt",
"home_type": "result/useCode",
"last_sold_date": "result/lastSoldDate",
"last_sold_price": "result/lastSoldPrice",
"latitude": "result/address/latitude",
"longitude": "result/address/longitude",
"map_this_home_link": "result/links/mapthishome",
"property_size": "result/lotSizeSqFt",
"rentzestimate_amount": "result/rentzestimate/amount",
"rentzestimate_last_updated": "result/rentzestimate/last-updated",
"rentzestimate_valuation_range_high": "result/rentzestimate/valuationRange/high",
"rentzestimate_valuation_range_low": "result/rentzestimate/valuationRange/low",
"rentzestimate_value_change": "result/rentzestimate/valueChange",
"state": "result/address/state",
"street": "result/address/street",
"tax_value": "result/taxAssessment",
"tax_year": "result/taxAssessmentYear",
"total_rooms": "result/totalRooms",
"use_code": "result/useCode",
"year_built": "result/yearBuilt",
"zestimate_amount": "result/zestimate/amount",
"zestimate_last_updated": "result/zestimate/last-updated",
"zestimate_percentile": "result/zestimate/percentile",
"zestimate_valuation_range_high": "result/zestimate/valuationRange/high",
"zestimate_valuation_range_low": "result/zestimate/valuationRange/low",
"zestimate_value_change": "result/zestimate/valueChange",
"zillow_id": "result/zpid",
"zipcode": "result/address/zipcode",
}
def __init__(self, data, *args, **kwargs):
"""Constructor method
"""
self.data = data.findall("response/results")[0]
for attr in self.attribute_mapping.__iter__():
try:
self.__setattr__(attr, self.get_attr(attr))
except AttributeError:
print("AttributeError with {}".format(attr))
@property
def region_name(self):
"""
region name
"""
try:
return self.data.find("result/localRealEstate/region").attrib["name"]
except AttributeError:
return None
@property
def region_id(self):
"""
region id
"""
try:
return self.data.find("result/localRealEstate/region").attrib["id"]
except AttributeError:
return None
@property
def region_type(self):
"""
region type
"""
try:
return self.data.find("result/localRealEstate/region").attrib["type"]
except AttributeError:
return None
class GetUpdatedPropertyDetails(ZillowResults):
"""Maps results from the XML data array into attributes of an instance of GetUpdatedPropertyDetails.
An instance of ``GetUpdatedPropertyDetails`` has the following attributes:
``.agent_name``
``.agent_profile_url``
``.appliances``
``.basement``
``.bathrooms``
``.bedrooms``
``.brokerage``
``.city``
``.cooling_system``
``.elementary_school``
``.exterior_material``
``.floor_material``
``.heating_sources``
``.heating_system``
``.high_school``
``.home_description``
``.home_detail_link``
``.home_info``
``.home_size``
``.home_type``
``.latitude``
``.longitude``
``.middle_school``
``.neighborhood``
``.num_floors``
``.num_rooms``
``.page_view_count_this_month``
``.page_view_count_total``
``.parking_type``
``.photo_gallery``
``.posting_agent``
``.posting_last_update``
``.posting_mls``
``.posting_status``
``.posting_type``
``.price``
``.property_size``
``.roof``
``.rooms``
``.school_district``
``.state``
``.street``
``.view``
``.year_built``
``.year_updated``
``.zillow_id``
``.zipcode``
"""
attribute_mapping = {
# attributes in common with GetDeepSearchResults
"bathrooms": "editedFacts/bathrooms",
"bedrooms": "editedFacts/bedrooms",
"city": "result/address/city",
"home_detail_link": "links/homeDetails",
"home_size": "editedFacts/finishedSqFt",
"home_type": "editedFacts/useCode",
"latitude": "address/latitude",
"longitude": "address/longitude",
"property_size": "editedFacts/lotSizeSqFt",
"state": "result/address/state",
"street": "result/address/street",
"year_built": "editedFacts/yearBuilt",
"zillow_id": "zpid",
"zipcode": "result/address/zipcode",
# new attributes in GetUpdatedPropertyDetails
"agent_name": "posting/agentName",
"agent_profile_url": "posting/agentProfileUrl",
"appliances": "editedFacts/appliances",
"basement": "editedFacts/basement",
"brokerage": "posting/brokerage",
"cooling_system": "editedFacts/coolingSystem",
"elementary_school": "elementarySchool",
"exterior_material": "editedFacts/exteriorMaterial",
"floor_material": "editedFacts/floorCovering",
"heating_sources": "editedFacts/heatingSources",
"heating_system": "editedFacts/heatingSystem",
"high_school": "highSchool",
"home_description": "homeDescription",
"home_info": "links/homeInfo",
"middle_school": "middleSchool",
"neighborhood": "neighborhood",
"num_floors": "editedFacts/numFloors",
"num_rooms": "editedFacts/numRooms",
"page_view_count_this_month": "pageViewCount/currentMonth",
"page_view_count_total": "pageViewCount/total",
"parking_type": "editedFacts/parkingType",
"photo_gallery": "links/photoGallery",
"photo_gallery": "links/photoGallery",
"posting_agent": "posting/agentName",
"posting_last_update": "posting/lastUpdatedDate",
"posting_mls": "posting/mls",
"posting_status": "posting/status",
"posting_type": "posting/type",
"price": "price",
"roof": "editedFacts/roof",
"rooms": "editedFacts/rooms",
"school_district": "schoolDistrict",
"view": "editedFacts/view",
"year_updated": "editedFacts/yearUpdated",
}
def __init__(self, data, *args, **kwargs):
"""Constructor method
"""
self.data = data.findall("response")[0]
for attr in self.attribute_mapping.__iter__():
try:
self.__setattr__(attr, self.get_attr(attr))
except AttributeError:
print("AttributeError with {}".format(attr))
|
|
import datetime
from typing import List
from flask import abort, render_template, request, url_for
from flask_restx import Namespace, Resource
from sqlalchemy import func as sa_func
from sqlalchemy.sql import and_, false, true
from CTFd.api.v1.helpers.request import validate_args
from CTFd.api.v1.helpers.schemas import sqlalchemy_to_pydantic
from CTFd.api.v1.schemas import APIDetailedSuccessResponse, APIListSuccessResponse
from CTFd.cache import clear_standings
from CTFd.constants import RawEnum
from CTFd.models import ChallengeFiles as ChallengeFilesModel
from CTFd.models import Challenges
from CTFd.models import ChallengeTopics as ChallengeTopicsModel
from CTFd.models import Fails, Flags, Hints, HintUnlocks, Solves, Submissions, Tags, db
from CTFd.plugins.challenges import CHALLENGE_CLASSES, get_chal_class
from CTFd.schemas.challenges import ChallengeSchema
from CTFd.schemas.flags import FlagSchema
from CTFd.schemas.hints import HintSchema
from CTFd.schemas.tags import TagSchema
from CTFd.utils import config, get_config
from CTFd.utils import user as current_user
from CTFd.utils.config.visibility import (
accounts_visible,
challenges_visible,
scores_visible,
)
from CTFd.utils.dates import ctf_ended, ctf_paused, ctftime, isoformat, unix_time_to_utc
from CTFd.utils.decorators import (
admins_only,
during_ctf_time_only,
require_verified_emails,
)
from CTFd.utils.decorators.visibility import (
check_challenge_visibility,
check_score_visibility,
)
from CTFd.utils.helpers.models import build_model_filters
from CTFd.utils.logging import log
from CTFd.utils.modes import generate_account_url, get_model
from CTFd.utils.security.signing import serialize
from CTFd.utils.user import (
authed,
get_current_team,
get_current_team_attrs,
get_current_user,
get_current_user_attrs,
is_admin,
)
challenges_namespace = Namespace(
"challenges", description="Endpoint to retrieve Challenges"
)
ChallengeModel = sqlalchemy_to_pydantic(
Challenges, include={"solves": int, "solved_by_me": bool}
)
TransientChallengeModel = sqlalchemy_to_pydantic(Challenges, exclude=["id"])
class ChallengeDetailedSuccessResponse(APIDetailedSuccessResponse):
data: ChallengeModel
class ChallengeListSuccessResponse(APIListSuccessResponse):
data: List[ChallengeModel]
challenges_namespace.schema_model(
"ChallengeDetailedSuccessResponse", ChallengeDetailedSuccessResponse.apidoc()
)
challenges_namespace.schema_model(
"ChallengeListSuccessResponse", ChallengeListSuccessResponse.apidoc()
)
def _build_solves_query(extra_filters=(), admin_view=False):
"""Returns queries and data that that are used for showing an account's solves.
It returns a tuple of
- SQLAlchemy query with (challenge_id, solve_count_for_challenge_id)
- Current user's solved challenge IDs
"""
# This can return None (unauth) if visibility is set to public
user = get_current_user()
# We only set a condition for matching user solves if there is a user and
# they have an account ID (user mode or in a team in teams mode)
AccountModel = get_model()
if user is not None and user.account_id is not None:
user_solved_cond = Solves.account_id == user.account_id
else:
user_solved_cond = false()
# We have to filter solves to exclude any made after the current freeze
# time unless we're in an admin view as determined by the caller.
freeze = get_config("freeze")
if freeze and not admin_view:
freeze_cond = Solves.date < unix_time_to_utc(freeze)
else:
freeze_cond = true()
# Finally, we never count solves made by hidden or banned users/teams, even
# if we are an admin. This is to match the challenge detail API.
exclude_solves_cond = and_(
AccountModel.banned == false(), AccountModel.hidden == false(),
)
# This query counts the number of solves per challenge, as well as the sum
# of correct solves made by the current user per the condition above (which
# should probably only be 0 or 1!)
solves_q = (
db.session.query(Solves.challenge_id, sa_func.count(Solves.challenge_id),)
.join(AccountModel)
.filter(*extra_filters, freeze_cond, exclude_solves_cond)
.group_by(Solves.challenge_id)
)
# Also gather the user's solve items which can be different from above query
# For example, even if we are a hidden user, we should see that we have solved a challenge
# however as a hidden user we are not included in the count of the above query
if admin_view:
# If we're an admin we should show all challenges as solved to break through any requirements
challenges = Challenges.query.all()
solve_ids = {challenge.id for challenge in challenges}
else:
# If not an admin we calculate solves as normal
solve_ids = (
Solves.query.with_entities(Solves.challenge_id)
.filter(user_solved_cond)
.all()
)
solve_ids = {value for value, in solve_ids}
return solves_q, solve_ids
@challenges_namespace.route("")
class ChallengeList(Resource):
@check_challenge_visibility
@during_ctf_time_only
@require_verified_emails
@challenges_namespace.doc(
description="Endpoint to get Challenge objects in bulk",
responses={
200: ("Success", "ChallengeListSuccessResponse"),
400: (
"An error occured processing the provided or stored data",
"APISimpleErrorResponse",
),
},
)
@validate_args(
{
"name": (str, None),
"max_attempts": (int, None),
"value": (int, None),
"category": (str, None),
"type": (str, None),
"state": (str, None),
"q": (str, None),
"field": (
RawEnum(
"ChallengeFields",
{
"name": "name",
"description": "description",
"category": "category",
"type": "type",
"state": "state",
},
),
None,
),
},
location="query",
)
def get(self, query_args):
# Require a team if in teams mode
# TODO: Convert this into a re-useable decorator
# TODO: The require_team decorator doesnt work because of no admin passthru
if get_current_user_attrs():
if is_admin():
pass
else:
if config.is_teams_mode() and get_current_team_attrs() is None:
abort(403)
# Build filtering queries
q = query_args.pop("q", None)
field = str(query_args.pop("field", None))
filters = build_model_filters(model=Challenges, query=q, field=field)
# Admins get a shortcut to see all challenges despite pre-requisites
admin_view = is_admin() and request.args.get("view") == "admin"
solve_counts = {}
# Build a query for to show challenge solve information. We only
# give an admin view if the request argument has been provided.
#
# NOTE: This is different behaviour to the challenge detail
# endpoint which only needs the current user to be an admin rather
# than also also having to provide `view=admin` as a query arg.
solves_q, user_solves = _build_solves_query(admin_view=admin_view)
# Aggregate the query results into the hashes defined at the top of
# this block for later use
for chal_id, solve_count in solves_q:
solve_counts[chal_id] = solve_count
if scores_visible() and accounts_visible():
solve_count_dfl = 0
else:
# Empty out the solves_count if we're hiding scores/accounts
solve_counts = {}
# This is necessary to match the challenge detail API which returns
# `None` for the solve count if visiblity checks fail
solve_count_dfl = None
# Build the query for the challenges which may be listed
chal_q = Challenges.query
# Admins can see hidden and locked challenges in the admin view
if admin_view is False:
chal_q = chal_q.filter(
and_(Challenges.state != "hidden", Challenges.state != "locked")
)
chal_q = (
chal_q.filter_by(**query_args)
.filter(*filters)
.order_by(Challenges.value, Challenges.id)
)
# Iterate through the list of challenges, adding to the object which
# will be JSONified back to the client
response = []
tag_schema = TagSchema(view="user", many=True)
# Gather all challenge IDs so that we can determine invalid challenge prereqs
all_challenge_ids = {
c.id for c in Challenges.query.with_entities(Challenges.id).all()
}
for challenge in chal_q:
if challenge.requirements:
requirements = challenge.requirements.get("prerequisites", [])
anonymize = challenge.requirements.get("anonymize")
prereqs = set(requirements).intersection(all_challenge_ids)
if user_solves >= prereqs or admin_view:
pass
else:
if anonymize:
response.append(
{
"id": challenge.id,
"type": "hidden",
"name": "???",
"value": 0,
"solves": None,
"solved_by_me": False,
"category": "???",
"tags": [],
"template": "",
"script": "",
}
)
# Fallthrough to continue
continue
try:
challenge_type = get_chal_class(challenge.type)
except KeyError:
# Challenge type does not exist. Fall through to next challenge.
continue
# Challenge passes all checks, add it to response
response.append(
{
"id": challenge.id,
"type": challenge_type.name,
"name": challenge.name,
"value": challenge.value,
"solves": solve_counts.get(challenge.id, solve_count_dfl),
"solved_by_me": challenge.id in user_solves,
"category": challenge.category,
"tags": tag_schema.dump(challenge.tags).data,
"template": challenge_type.templates["view"],
"script": challenge_type.scripts["view"],
}
)
db.session.close()
return {"success": True, "data": response}
@admins_only
@challenges_namespace.doc(
description="Endpoint to create a Challenge object",
responses={
200: ("Success", "ChallengeDetailedSuccessResponse"),
400: (
"An error occured processing the provided or stored data",
"APISimpleErrorResponse",
),
},
)
def post(self):
data = request.form or request.get_json()
# Load data through schema for validation but not for insertion
schema = ChallengeSchema()
response = schema.load(data)
if response.errors:
return {"success": False, "errors": response.errors}, 400
challenge_type = data["type"]
challenge_class = get_chal_class(challenge_type)
challenge = challenge_class.create(request)
response = challenge_class.read(challenge)
return {"success": True, "data": response}
@challenges_namespace.route("/types")
class ChallengeTypes(Resource):
@admins_only
def get(self):
response = {}
for class_id in CHALLENGE_CLASSES:
challenge_class = CHALLENGE_CLASSES.get(class_id)
response[challenge_class.id] = {
"id": challenge_class.id,
"name": challenge_class.name,
"templates": challenge_class.templates,
"scripts": challenge_class.scripts,
"create": render_template(
challenge_class.templates["create"].lstrip("/")
),
}
return {"success": True, "data": response}
@challenges_namespace.route("/<challenge_id>")
class Challenge(Resource):
@check_challenge_visibility
@during_ctf_time_only
@require_verified_emails
@challenges_namespace.doc(
description="Endpoint to get a specific Challenge object",
responses={
200: ("Success", "ChallengeDetailedSuccessResponse"),
400: (
"An error occured processing the provided or stored data",
"APISimpleErrorResponse",
),
},
)
def get(self, challenge_id):
if is_admin():
chal = Challenges.query.filter(Challenges.id == challenge_id).first_or_404()
else:
chal = Challenges.query.filter(
Challenges.id == challenge_id,
and_(Challenges.state != "hidden", Challenges.state != "locked"),
).first_or_404()
try:
chal_class = get_chal_class(chal.type)
except KeyError:
abort(
500,
f"The underlying challenge type ({chal.type}) is not installed. This challenge can not be loaded.",
)
if chal.requirements:
requirements = chal.requirements.get("prerequisites", [])
anonymize = chal.requirements.get("anonymize")
# Gather all challenge IDs so that we can determine invalid challenge prereqs
all_challenge_ids = {
c.id for c in Challenges.query.with_entities(Challenges.id).all()
}
if challenges_visible():
user = get_current_user()
if user:
solve_ids = (
Solves.query.with_entities(Solves.challenge_id)
.filter_by(account_id=user.account_id)
.order_by(Solves.challenge_id.asc())
.all()
)
else:
# We need to handle the case where a user is viewing challenges anonymously
solve_ids = []
solve_ids = {value for value, in solve_ids}
prereqs = set(requirements).intersection(all_challenge_ids)
if solve_ids >= prereqs or is_admin():
pass
else:
if anonymize:
return {
"success": True,
"data": {
"id": chal.id,
"type": "hidden",
"name": "???",
"value": 0,
"solves": None,
"solved_by_me": False,
"category": "???",
"tags": [],
"template": "",
"script": "",
},
}
abort(403)
else:
abort(403)
tags = [
tag["value"] for tag in TagSchema("user", many=True).dump(chal.tags).data
]
unlocked_hints = set()
hints = []
if authed():
user = get_current_user()
team = get_current_team()
# TODO: Convert this into a re-useable decorator
if is_admin():
pass
else:
if config.is_teams_mode() and team is None:
abort(403)
unlocked_hints = {
u.target
for u in HintUnlocks.query.filter_by(
type="hints", account_id=user.account_id
)
}
files = []
for f in chal.files:
token = {
"user_id": user.id,
"team_id": team.id if team else None,
"file_id": f.id,
}
files.append(
url_for("views.files", path=f.location, token=serialize(token))
)
else:
files = [url_for("views.files", path=f.location) for f in chal.files]
for hint in Hints.query.filter_by(challenge_id=chal.id).all():
if hint.id in unlocked_hints or ctf_ended():
hints.append(
{"id": hint.id, "cost": hint.cost, "content": hint.content}
)
else:
hints.append({"id": hint.id, "cost": hint.cost})
response = chal_class.read(challenge=chal)
solves_q, user_solves = _build_solves_query(
extra_filters=(Solves.challenge_id == chal.id,)
)
# If there are no solves for this challenge ID then we have 0 rows
maybe_row = solves_q.first()
if maybe_row:
challenge_id, solve_count = maybe_row
solved_by_user = challenge_id in user_solves
else:
solve_count, solved_by_user = 0, False
# Hide solve counts if we are hiding solves/accounts
if scores_visible() is False or accounts_visible() is False:
solve_count = None
if authed():
# Get current attempts for the user
attempts = Submissions.query.filter_by(
account_id=user.account_id, challenge_id=challenge_id
).count()
else:
attempts = 0
response["solves"] = solve_count
response["solved_by_me"] = solved_by_user
response["attempts"] = attempts
response["files"] = files
response["tags"] = tags
response["hints"] = hints
response["view"] = render_template(
chal_class.templates["view"].lstrip("/"),
solves=solve_count,
solved_by_me=solved_by_user,
files=files,
tags=tags,
hints=[Hints(**h) for h in hints],
max_attempts=chal.max_attempts,
attempts=attempts,
challenge=chal,
)
db.session.close()
return {"success": True, "data": response}
@admins_only
@challenges_namespace.doc(
description="Endpoint to edit a specific Challenge object",
responses={
200: ("Success", "ChallengeDetailedSuccessResponse"),
400: (
"An error occured processing the provided or stored data",
"APISimpleErrorResponse",
),
},
)
def patch(self, challenge_id):
data = request.get_json()
# Load data through schema for validation but not for insertion
schema = ChallengeSchema()
response = schema.load(data)
if response.errors:
return {"success": False, "errors": response.errors}, 400
challenge = Challenges.query.filter_by(id=challenge_id).first_or_404()
challenge_class = get_chal_class(challenge.type)
challenge = challenge_class.update(challenge, request)
response = challenge_class.read(challenge)
return {"success": True, "data": response}
@admins_only
@challenges_namespace.doc(
description="Endpoint to delete a specific Challenge object",
responses={200: ("Success", "APISimpleSuccessResponse")},
)
def delete(self, challenge_id):
challenge = Challenges.query.filter_by(id=challenge_id).first_or_404()
chal_class = get_chal_class(challenge.type)
chal_class.delete(challenge)
return {"success": True}
@challenges_namespace.route("/attempt")
class ChallengeAttempt(Resource):
@check_challenge_visibility
@during_ctf_time_only
@require_verified_emails
def post(self):
if authed() is False:
return {"success": True, "data": {"status": "authentication_required"}}, 403
if request.content_type != "application/json":
request_data = request.form
else:
request_data = request.get_json()
challenge_id = request_data.get("challenge_id")
if current_user.is_admin():
preview = request.args.get("preview", False)
if preview:
challenge = Challenges.query.filter_by(id=challenge_id).first_or_404()
chal_class = get_chal_class(challenge.type)
status, message = chal_class.attempt(challenge, request)
return {
"success": True,
"data": {
"status": "correct" if status else "incorrect",
"message": message,
},
}
if ctf_paused():
return (
{
"success": True,
"data": {
"status": "paused",
"message": "{} is paused".format(config.ctf_name()),
},
},
403,
)
user = get_current_user()
team = get_current_team()
# TODO: Convert this into a re-useable decorator
if config.is_teams_mode() and team is None:
abort(403)
fails = Fails.query.filter_by(
account_id=user.account_id, challenge_id=challenge_id
).count()
challenge = Challenges.query.filter_by(id=challenge_id).first_or_404()
if challenge.state == "hidden":
abort(404)
if challenge.state == "locked":
abort(403)
if challenge.requirements:
requirements = challenge.requirements.get("prerequisites", [])
solve_ids = (
Solves.query.with_entities(Solves.challenge_id)
.filter_by(account_id=user.account_id)
.order_by(Solves.challenge_id.asc())
.all()
)
solve_ids = {solve_id for solve_id, in solve_ids}
# Gather all challenge IDs so that we can determine invalid challenge prereqs
all_challenge_ids = {
c.id for c in Challenges.query.with_entities(Challenges.id).all()
}
prereqs = set(requirements).intersection(all_challenge_ids)
if solve_ids >= prereqs:
pass
else:
abort(403)
chal_class = get_chal_class(challenge.type)
# Anti-bruteforce / submitting Flags too quickly
kpm = current_user.get_wrong_submissions_per_minute(user.account_id)
kpm_limit = int(get_config("incorrect_submissions_per_min", default=10))
if kpm > kpm_limit:
if ctftime():
chal_class.fail(
user=user, team=team, challenge=challenge, request=request
)
log(
"submissions",
"[{date}] {name} submitted {submission} on {challenge_id} with kpm {kpm} [TOO FAST]",
name=user.name,
submission=request_data.get("submission", "").encode("utf-8"),
challenge_id=challenge_id,
kpm=kpm,
)
# Submitting too fast
return (
{
"success": True,
"data": {
"status": "ratelimited",
"message": "You're submitting flags too fast. Slow down.",
},
},
429,
)
solves = Solves.query.filter_by(
account_id=user.account_id, challenge_id=challenge_id
).first()
# Challenge not solved yet
if not solves:
# Hit max attempts
max_tries = challenge.max_attempts
if max_tries and fails >= max_tries > 0:
return (
{
"success": True,
"data": {
"status": "incorrect",
"message": "You have 0 tries remaining",
},
},
403,
)
status, message = chal_class.attempt(challenge, request)
if status: # The challenge plugin says the input is right
if ctftime() or current_user.is_admin():
chal_class.solve(
user=user, team=team, challenge=challenge, request=request
)
clear_standings()
log(
"submissions",
"[{date}] {name} submitted {submission} on {challenge_id} with kpm {kpm} [CORRECT]",
name=user.name,
submission=request_data.get("submission", "").encode("utf-8"),
challenge_id=challenge_id,
kpm=kpm,
)
return {
"success": True,
"data": {"status": "correct", "message": message},
}
else: # The challenge plugin says the input is wrong
if ctftime() or current_user.is_admin():
chal_class.fail(
user=user, team=team, challenge=challenge, request=request
)
clear_standings()
log(
"submissions",
"[{date}] {name} submitted {submission} on {challenge_id} with kpm {kpm} [WRONG]",
name=user.name,
submission=request_data.get("submission", "").encode("utf-8"),
challenge_id=challenge_id,
kpm=kpm,
)
if max_tries:
# Off by one since fails has changed since it was gotten
attempts_left = max_tries - fails - 1
tries_str = "tries"
if attempts_left == 1:
tries_str = "try"
# Add a punctuation mark if there isn't one
if message[-1] not in "!().;?[]{}":
message = message + "."
return {
"success": True,
"data": {
"status": "incorrect",
"message": "{} You have {} {} remaining.".format(
message, attempts_left, tries_str
),
},
}
else:
return {
"success": True,
"data": {"status": "incorrect", "message": message},
}
# Challenge already solved
else:
log(
"submissions",
"[{date}] {name} submitted {submission} on {challenge_id} with kpm {kpm} [ALREADY SOLVED]",
name=user.name,
submission=request_data.get("submission", "").encode("utf-8"),
challenge_id=challenge_id,
kpm=kpm,
)
return {
"success": True,
"data": {
"status": "already_solved",
"message": "You already solved this",
},
}
@challenges_namespace.route("/<challenge_id>/solves")
class ChallengeSolves(Resource):
@check_challenge_visibility
@check_score_visibility
@during_ctf_time_only
@require_verified_emails
def get(self, challenge_id):
response = []
challenge = Challenges.query.filter_by(id=challenge_id).first_or_404()
# TODO: Need a generic challenge visibility call.
# However, it should be stated that a solve on a gated challenge is not considered private.
if challenge.state == "hidden" and is_admin() is False:
abort(404)
Model = get_model()
# Note that we specifically query for the Solves.account.name
# attribute here because it is faster than having SQLAlchemy
# query for the attribute directly and it's unknown what the
# affects of changing the relationship lazy attribute would be
solves = (
Solves.query.add_columns(Model.name.label("account_name"))
.join(Model, Solves.account_id == Model.id)
.filter(
Solves.challenge_id == challenge_id,
Model.banned == False,
Model.hidden == False,
)
.order_by(Solves.date.asc())
)
freeze = get_config("freeze")
if freeze:
preview = request.args.get("preview")
if (is_admin() is False) or (is_admin() is True and preview):
dt = datetime.datetime.utcfromtimestamp(freeze)
solves = solves.filter(Solves.date < dt)
for solve in solves:
# Seperate out the account name and the Solve object from the SQLAlchemy tuple
solve, account_name = solve
response.append(
{
"account_id": solve.account_id,
"name": account_name,
"date": isoformat(solve.date),
"account_url": generate_account_url(account_id=solve.account_id),
}
)
return {"success": True, "data": response}
@challenges_namespace.route("/<challenge_id>/files")
class ChallengeFiles(Resource):
@admins_only
def get(self, challenge_id):
response = []
challenge_files = ChallengeFilesModel.query.filter_by(
challenge_id=challenge_id
).all()
for f in challenge_files:
response.append({"id": f.id, "type": f.type, "location": f.location})
return {"success": True, "data": response}
@challenges_namespace.route("/<challenge_id>/tags")
class ChallengeTags(Resource):
@admins_only
def get(self, challenge_id):
response = []
tags = Tags.query.filter_by(challenge_id=challenge_id).all()
for t in tags:
response.append(
{"id": t.id, "challenge_id": t.challenge_id, "value": t.value}
)
return {"success": True, "data": response}
@challenges_namespace.route("/<challenge_id>/topics")
class ChallengeTopics(Resource):
@admins_only
def get(self, challenge_id):
response = []
topics = ChallengeTopicsModel.query.filter_by(challenge_id=challenge_id).all()
for t in topics:
response.append(
{
"id": t.id,
"challenge_id": t.challenge_id,
"topic_id": t.topic_id,
"value": t.topic.value,
}
)
return {"success": True, "data": response}
@challenges_namespace.route("/<challenge_id>/hints")
class ChallengeHints(Resource):
@admins_only
def get(self, challenge_id):
hints = Hints.query.filter_by(challenge_id=challenge_id).all()
schema = HintSchema(many=True)
response = schema.dump(hints)
if response.errors:
return {"success": False, "errors": response.errors}, 400
return {"success": True, "data": response.data}
@challenges_namespace.route("/<challenge_id>/flags")
class ChallengeFlags(Resource):
@admins_only
def get(self, challenge_id):
flags = Flags.query.filter_by(challenge_id=challenge_id).all()
schema = FlagSchema(many=True)
response = schema.dump(flags)
if response.errors:
return {"success": False, "errors": response.errors}, 400
return {"success": True, "data": response.data}
@challenges_namespace.route("/<challenge_id>/requirements")
class ChallengeRequirements(Resource):
@admins_only
def get(self, challenge_id):
challenge = Challenges.query.filter_by(id=challenge_id).first_or_404()
return {"success": True, "data": challenge.requirements}
|
|
import chainer
import chainerx
import numpy
from chainerx_tests import math_utils
from chainerx_tests import op_utils
@op_utils.op_test(['native:0', 'cuda:0'])
@chainer.testing.parameterize(*(
chainer.testing.product([
chainer.testing.from_pytest_parameterize(
'shape', [
(2, 2),
(3, 3, 3),
(5, 5, 5),
(4, 1, 2, 4)
]),
chainer.testing.from_pytest_parameterize(
'in_dtypes,out_dtype', math_utils.in_out_dtypes_math_functions)
])
))
class TestErf(op_utils.ChainerOpTest):
dodge_nondifferentiable = True
def setup(self, float_dtype):
dtype = float_dtype
if dtype == 'float16':
self.check_forward_options.update({'rtol': 1e-3, 'atol': 1e-3})
self.check_backward_options.update({'rtol': 5e-2, 'atol': 5e-2})
self.check_double_backward_options.update({
'rtol': 5e-2, 'atol': 5e-2})
self.dtype = dtype
def generate_inputs(self):
shape = self.shape
dtype = self.dtype
x = numpy.random.normal(-1, 1, shape).astype(dtype)
return x,
def forward_chainerx(self, inputs):
x, = inputs
y = chainerx.erf(x)
return y,
def forward_chainer(self, inputs):
x, = inputs
y = chainer.functions.erf(x)
return y,
@op_utils.op_test(['native:0', 'cuda:0'])
@chainer.testing.parameterize(*(
# Special shapes
chainer.testing.product({
'shape': [(), (1,), (1, 1, 1), (2, 3)],
'in_dtypes,out_dtype': math_utils.in_out_dtypes_math_functions,
'input': [0, 2, -2],
})
# Special shapes (array.size = 0)
+ chainer.testing.product({
'shape': [(0), (2, 0, 3)],
'in_dtypes,out_dtype': math_utils.in_out_dtypes_math_functions,
'input': [0, 2, -2],
'check_numpy_strides_compliance': [False],
})
# Special values
+ chainer.testing.product({
'shape': [(2, 3)],
'in_dtypes,out_dtype': math_utils.in_out_float_dtypes_math_functions,
'input': [float('inf'), -float('inf'), float('nan')],
'skip_backward_test': [True],
'skip_double_backward_test': [True],
})
))
class TestExp(math_utils.UnaryMathTestBase, op_utils.NumpyOpTest):
def func(self, xp, a):
return xp.exp(a)
@op_utils.op_test(['native:0', 'cuda:0'])
@chainer.testing.parameterize(*(
# Special shapes
chainer.testing.product({
'shape': [(), (1,), (1, 1, 1), (2, 3)],
'in_dtypes,out_dtype': math_utils.in_out_dtypes_math_functions,
'input': [0, 2, -2],
})
# Special shapes (array.size = 0)
+ chainer.testing.product({
'shape': [(0), (2, 0, 3)],
'in_dtypes,out_dtype': math_utils.in_out_dtypes_math_functions,
'input': [0, 2, -2],
'check_numpy_strides_compliance': [False],
})
# Special values
+ chainer.testing.product({
'shape': [(2, 3)],
'in_dtypes,out_dtype': math_utils.in_out_float_dtypes_math_functions,
'input': [float('inf'), -float('inf'), float('nan')],
'skip_backward_test': [True],
'skip_double_backward_test': [True],
})
))
class TestExpm1(math_utils.UnaryMathTestBase, op_utils.NumpyOpTest):
def func(self, xp, a):
return xp.expm1(a)
@op_utils.op_test(['native:0', 'cuda:0'])
@chainer.testing.parameterize(*(
# Special shapes
chainer.testing.product({
'shape': [(), (1,), (1, 1, 1), (2, 3)],
'in_dtypes,out_dtype': math_utils.in_out_dtypes_math_functions,
'input': [0, 2, -2],
})
# Special shapes (array.size = 0)
+ chainer.testing.product({
'shape': [(0), (2, 0, 3)],
'in_dtypes,out_dtype': math_utils.in_out_dtypes_math_functions,
'input': [0, 2, -2],
'check_numpy_strides_compliance': [False],
})
# Special values
+ chainer.testing.product({
'shape': [(2, 3)],
'in_dtypes,out_dtype': math_utils.in_out_float_dtypes_math_functions,
'input': [float('inf'), -float('inf'), float('nan')],
'skip_backward_test': [True],
'skip_double_backward_test': [True],
})
))
class TestExp2(math_utils.UnaryMathTestBase, op_utils.NumpyOpTest):
def func(self, xp, a):
return xp.exp2(a)
@op_utils.op_test(['native:0', 'cuda:0'])
@chainer.testing.parameterize(*(
# Special shapes
chainer.testing.product({
'shape': [(), (1,), (1, 1, 1), (2, 3)],
'in_dtypes,out_dtype': math_utils.in_out_dtypes_math_functions,
'input': [1, 3],
})
# Special shapes (array.size = 0)
+ chainer.testing.product({
'shape': [(0,), (2, 0, 3)],
'in_dtypes,out_dtype': math_utils.in_out_dtypes_math_functions,
'input': [1, 3],
'check_numpy_strides_compliance': [False],
})
# Special values
+ chainer.testing.product({
'shape': [(2, 3)],
'in_dtypes,out_dtype': math_utils.in_out_float_dtypes_math_functions,
'input': [float('inf'), -float('inf'), float('nan'), -1, 0],
'skip_backward_test': [True],
'skip_double_backward_test': [True],
})
))
class TestLog(math_utils.UnaryMathTestBase, op_utils.NumpyOpTest):
def func(self, xp, a):
return xp.log(a)
@op_utils.op_test(['native:0', 'cuda:0'])
@chainer.testing.parameterize(*(
# Special shapes
chainer.testing.product({
'shape': [(), (1,), (1, 1, 1), (2, 3)],
'in_dtypes,out_dtype': math_utils.in_out_dtypes_math_functions,
'input': [1, 3],
})
# Special shapes (array.size = 0)
+ chainer.testing.product({
'shape': [(0,), (2, 0, 3)],
'in_dtypes,out_dtype': math_utils.in_out_dtypes_math_functions,
'input': [1, 3],
'check_numpy_strides_compliance': [False],
})
# Special values
+ chainer.testing.product({
'shape': [(2, 3)],
'in_dtypes,out_dtype': math_utils.in_out_float_dtypes_math_functions,
'input': [float('inf'), -float('inf'), float('nan'), -1, 0],
'skip_backward_test': [True],
'skip_double_backward_test': [True],
})
))
class TestLog10(math_utils.UnaryMathTestBase, op_utils.NumpyOpTest):
def func(self, xp, a):
return xp.log10(a)
@op_utils.op_test(['native:0', 'cuda:0'])
@chainer.testing.parameterize(*(
# Special shapes
chainer.testing.product({
'shape': [(), (1,), (1, 1, 1), (2, 3)],
'in_dtypes,out_dtype': math_utils.in_out_float_dtypes_math_functions,
'input': [1, 3],
})
# Special shapes (array.size = 0)
+ chainer.testing.product({
'shape': [(0,), (2, 0, 3)],
'in_dtypes,out_dtype': math_utils.in_out_float_dtypes_math_functions,
'input': [1, 3],
'check_numpy_strides_compliance': [False],
})
# Special values
+ chainer.testing.product({
'shape': [(2, 3)],
'in_dtypes,out_dtype': math_utils.in_out_float_dtypes_math_functions,
'input': [float('inf'), -float('inf'), float('nan'), -1, 0],
'skip_backward_test': [True],
'skip_double_backward_test': [True],
})
))
class TestLog2(math_utils.UnaryMathTestBase, op_utils.NumpyOpTest):
def func(self, xp, a):
return xp.log2(a)
@op_utils.op_test(['native:0', 'cuda:0'])
@chainer.testing.parameterize(*(
# Special shapes
chainer.testing.product({
'shape': [(), (1,), (1, 1, 1), (2, 3)],
'in_dtypes,out_dtype': math_utils.in_out_float_dtypes_math_functions,
'input': [1, 3],
})
# Special shapes (array.size = 0)
+ chainer.testing.product({
'shape': [(0,), (2, 0, 3)],
'in_dtypes,out_dtype': math_utils.in_out_float_dtypes_math_functions,
'input': [1, 3],
'check_numpy_strides_compliance': [False],
})
# Special values
+ chainer.testing.product({
'shape': [(2, 3)],
'in_dtypes,out_dtype': math_utils.in_out_float_dtypes_math_functions,
'input': [float('inf'), -float('inf'), float('nan'), -1, 0],
'skip_backward_test': [True],
'skip_double_backward_test': [True],
})
))
class TestLog1p(math_utils.UnaryMathTestBase, op_utils.NumpyOpTest):
def func(self, xp, a):
return xp.log1p(a)
|
|
# Copyright 2015-2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for yapf.pytree_unwrapper."""
import sys
import textwrap
import unittest
from yapf.yapflib import comment_splicer
from yapf.yapflib import pytree_unwrapper
from yapf.yapflib import pytree_utils
from yapf.yapflib import pytree_visitor
class PytreeUnwrapperTest(unittest.TestCase):
def _ParseAndUnwrap(self, code, dumptree=False):
"""Produces unwrapped lines from the given code.
Parses the code into a tree, performs comment splicing and runs the
unwrapper.
Arguments:
code: code to parse as a string
dumptree: if True, the parsed pytree (after comment splicing) is dumped
to stderr. Useful for debugging.
Returns:
List of unwrapped lines.
"""
tree = pytree_utils.ParseCodeToTree(code)
comment_splicer.SpliceComments(tree)
if dumptree:
pytree_visitor.DumpPyTree(tree, target_stream=sys.stderr)
return pytree_unwrapper.UnwrapPyTree(tree)
def _CheckUnwrappedLines(self, uwlines, list_of_expected):
"""Check that the given UnwrappedLines match expectations.
Args:
uwlines: list of UnwrappedLine
list_of_expected: list of (depth, values) pairs. Non-semantic tokens are
filtered out from the expected values.
"""
actual = []
for uwl in uwlines:
filtered_values = [ft.value for ft in uwl.tokens
if ft.name not in pytree_utils.NONSEMANTIC_TOKENS]
actual.append((uwl.depth, filtered_values))
self.assertEqual(list_of_expected, actual)
def testSimpleFileScope(self):
code = textwrap.dedent(r"""
x = 1
# a comment
y = 2
""")
uwlines = self._ParseAndUnwrap(code)
self._CheckUnwrappedLines(uwlines, [
(0, ['x', '=', '1']),
(0, ['# a comment']),
(0, ['y', '=', '2'])]) # yapf: disable
def testSimpleMultilineStatement(self):
code = textwrap.dedent(r"""
y = (1 +
x)
""")
uwlines = self._ParseAndUnwrap(code)
self._CheckUnwrappedLines(uwlines, [
(0, ['y', '=', '(', '1', '+', 'x', ')'])]) # yapf: disable
def testFileScopeWithInlineComment(self):
code = textwrap.dedent(r"""
x = 1 # a comment
y = 2
""")
uwlines = self._ParseAndUnwrap(code)
self._CheckUnwrappedLines(uwlines, [
(0, ['x', '=', '1', '# a comment']),
(0, ['y', '=', '2'])]) # yapf: disable
def testSimpleIf(self):
code = textwrap.dedent(r"""
if foo:
x = 1
y = 2
""")
uwlines = self._ParseAndUnwrap(code)
self._CheckUnwrappedLines(uwlines, [
(0, ['if', 'foo', ':']),
(1, ['x', '=', '1']),
(1, ['y', '=', '2'])]) # yapf: disable
def testSimpleIfWithComments(self):
code = textwrap.dedent(r"""
# c1
if foo: # c2
x = 1
y = 2
""")
uwlines = self._ParseAndUnwrap(code)
self._CheckUnwrappedLines(uwlines, [
(0, ['# c1']),
(0, ['if', 'foo', ':', '# c2']),
(1, ['x', '=', '1']),
(1, ['y', '=', '2'])]) # yapf: disable
def testIfWithCommentsInside(self):
code = textwrap.dedent(r"""
if foo:
# c1
x = 1 # c2
# c3
y = 2
""")
uwlines = self._ParseAndUnwrap(code)
self._CheckUnwrappedLines(uwlines, [
(0, ['if', 'foo', ':']),
(1, ['# c1']),
(1, ['x', '=', '1', '# c2']),
(1, ['# c3']),
(1, ['y', '=', '2'])]) # yapf: disable
def testIfElifElse(self):
code = textwrap.dedent(r"""
if x:
x = 1 # c1
elif y: # c2
y = 1
else:
# c3
z = 1
""")
uwlines = self._ParseAndUnwrap(code)
self._CheckUnwrappedLines(uwlines, [
(0, ['if', 'x', ':']),
(1, ['x', '=', '1', '# c1']),
(0, ['elif', 'y', ':', '# c2']),
(1, ['y', '=', '1']),
(0, ['else', ':']),
(1, ['# c3']),
(1, ['z', '=', '1'])]) # yapf: disable
def testNestedCompoundTwoLevel(self):
code = textwrap.dedent(r"""
if x:
x = 1 # c1
while t:
# c2
j = 1
k = 1
""")
uwlines = self._ParseAndUnwrap(code)
self._CheckUnwrappedLines(uwlines, [
(0, ['if', 'x', ':']),
(1, ['x', '=', '1', '# c1']),
(1, ['while', 't', ':']),
(2, ['# c2']),
(2, ['j', '=', '1']),
(1, ['k', '=', '1'])]) # yapf: disable
def testSimpleWhile(self):
code = textwrap.dedent(r"""
while x > 1: # c1
# c2
x = 1
""")
uwlines = self._ParseAndUnwrap(code)
self._CheckUnwrappedLines(uwlines, [
(0, ['while', 'x', '>', '1', ':', '# c1']),
(1, ['# c2']),
(1, ['x', '=', '1'])]) # yapf: disable
def testSimpleTry(self):
code = textwrap.dedent(r"""
try:
pass
except:
pass
except:
pass
else:
pass
finally:
pass
""")
uwlines = self._ParseAndUnwrap(code)
self._CheckUnwrappedLines(uwlines, [
(0, ['try', ':']),
(1, ['pass']),
(0, ['except', ':']),
(1, ['pass']),
(0, ['except', ':']),
(1, ['pass']),
(0, ['else', ':']),
(1, ['pass']),
(0, ['finally', ':']),
(1, ['pass'])]) # yapf: disable
def testSimpleFuncdef(self):
code = textwrap.dedent(r"""
def foo(x): # c1
# c2
return x
""")
uwlines = self._ParseAndUnwrap(code)
self._CheckUnwrappedLines(uwlines, [
(0, ['def', 'foo', '(', 'x', ')', ':', '# c1']),
(1, ['# c2']),
(1, ['return', 'x'])]) # yapf: disable
def testTwoFuncDefs(self):
code = textwrap.dedent(r"""
def foo(x): # c1
# c2
return x
def bar(): # c3
# c4
return x
""")
uwlines = self._ParseAndUnwrap(code)
self._CheckUnwrappedLines(uwlines, [
(0, ['def', 'foo', '(', 'x', ')', ':', '# c1']),
(1, ['# c2']),
(1, ['return', 'x']),
(0, ['def', 'bar', '(', ')', ':', '# c3']),
(1, ['# c4']),
(1, ['return', 'x'])]) # yapf: disable
def testSimpleClassDef(self):
code = textwrap.dedent(r"""
class Klass: # c1
# c2
p = 1
""")
uwlines = self._ParseAndUnwrap(code)
self._CheckUnwrappedLines(uwlines, [
(0, ['class', 'Klass', ':', '# c1']),
(1, ['# c2']),
(1, ['p', '=', '1'])]) # yapf: disable
def testSingleLineStmtInFunc(self):
code = textwrap.dedent(r"""
def f(): return 37
""")
uwlines = self._ParseAndUnwrap(code)
self._CheckUnwrappedLines(uwlines, [
(0, ['def', 'f', '(', ')', ':']),
(1, ['return', '37'])]) # yapf: disable
def testMultipleComments(self):
code = textwrap.dedent(r"""
# Comment #1
# Comment #2
def f():
pass
""")
uwlines = self._ParseAndUnwrap(code)
self._CheckUnwrappedLines(uwlines, [
(0, ['# Comment #1']),
(0, ['# Comment #2']),
(0, ['def', 'f', '(', ')', ':']),
(1, ['pass'])]) # yapf: disable
def testSplitListWithComment(self):
code = textwrap.dedent(r"""
a = [
'a',
'b',
'c', # hello world
]
""")
uwlines = self._ParseAndUnwrap(code)
self._CheckUnwrappedLines(uwlines, [
(0, ['a', '=', '[', "'a'", ',', "'b'", ',',
"'c'", ',', '# hello world', ']'])]) # yapf: disable
class MatchBracketsTest(unittest.TestCase):
def _ParseAndUnwrap(self, code, dumptree=False):
"""Produces unwrapped lines from the given code.
Parses the code into a tree, match brackets and runs the unwrapper.
Arguments:
code: code to parse as a string
dumptree: if True, the parsed pytree (after comment splicing) is dumped to
stderr. Useful for debugging.
Returns:
List of unwrapped lines.
"""
tree = pytree_utils.ParseCodeToTree(code)
comment_splicer.SpliceComments(tree)
if dumptree:
pytree_visitor.DumpPyTree(tree, target_stream=sys.stderr)
return pytree_unwrapper.UnwrapPyTree(tree)
def _CheckMatchingBrackets(self, uwlines, list_of_expected):
"""Check that the tokens have the expected matching bracket.
Arguments:
uwlines: list of UnwrappedLine.
list_of_expected: list of (index, index) pairs. The matching brackets at
the indexes need to match. Non-semantic tokens are filtered out from the
expected values.
"""
actual = []
for uwl in uwlines:
filtered_values = [(ft, ft.matching_bracket) for ft in uwl.tokens
if ft.name not in pytree_utils.NONSEMANTIC_TOKENS]
if filtered_values:
actual.append(filtered_values)
for index, bracket_list in enumerate(list_of_expected):
uwline = actual[index]
if not bracket_list:
for value in uwline:
self.assertIsNone(value[1])
else:
for open_bracket, close_bracket in bracket_list:
self.assertEqual(uwline[open_bracket][0], uwline[close_bracket][1])
self.assertEqual(uwline[close_bracket][0], uwline[open_bracket][1])
def testFunctionDef(self):
code = textwrap.dedent("""\
def foo(a, b={'hello': ['w','d']}, c=[42, 37]):
pass
""")
uwlines = self._ParseAndUnwrap(code)
self._CheckMatchingBrackets(uwlines, [
[(2, 24), (7, 15), (10, 14), (19, 23)],
[]
]) # yapf: disable
def testDecorator(self):
code = textwrap.dedent("""\
@bar()
def foo(a, b, c):
pass
""")
uwlines = self._ParseAndUnwrap(code)
self._CheckMatchingBrackets(uwlines, [
[(2, 3)],
[(2, 8)],
[]
]) # yapf: disable
def testClassDef(self):
code = textwrap.dedent("""\
class A(B, C, D):
pass
""")
uwlines = self._ParseAndUnwrap(code)
self._CheckMatchingBrackets(uwlines, [
[(2, 8)],
[]
]) # yapf: disable
if __name__ == '__main__':
unittest.main()
|
|
import datetime
from flask_admin.babel import lazy_gettext
from flask_admin.model import filters
from .tools import parse_like_term
from mongoengine.queryset import Q
class BaseMongoEngineFilter(filters.BaseFilter):
"""
Base MongoEngine filter.
"""
def __init__(self, column, name, options=None, data_type=None):
"""
Constructor.
:param column:
Model field
:param name:
Display name
:param options:
Fixed set of options. If provided, will use drop down instead of textbox.
:param data_type:
Client data type
"""
super(BaseMongoEngineFilter, self).__init__(name, options, data_type)
self.column = column
# Common filters
class FilterEqual(BaseMongoEngineFilter):
def apply(self, query, value):
flt = {'%s' % self.column.name: value}
return query.filter(**flt)
def operation(self):
return lazy_gettext('equals')
class FilterNotEqual(BaseMongoEngineFilter):
def apply(self, query, value):
flt = {'%s__ne' % self.column.name: value}
return query.filter(**flt)
def operation(self):
return lazy_gettext('not equal')
class FilterLike(BaseMongoEngineFilter):
def apply(self, query, value):
term, data = parse_like_term(value)
flt = {'%s__%s' % (self.column.name, term): data}
return query.filter(**flt)
def operation(self):
return lazy_gettext('contains')
class FilterNotLike(BaseMongoEngineFilter):
def apply(self, query, value):
term, data = parse_like_term(value)
flt = {'%s__not__%s' % (self.column.name, term): data}
return query.filter(**flt)
def operation(self):
return lazy_gettext('not contains')
class FilterGreater(BaseMongoEngineFilter):
def apply(self, query, value):
flt = {'%s__gt' % self.column.name: value}
return query.filter(**flt)
def operation(self):
return lazy_gettext('greater than')
class FilterSmaller(BaseMongoEngineFilter):
def apply(self, query, value):
flt = {'%s__lt' % self.column.name: value}
return query.filter(**flt)
def operation(self):
return lazy_gettext('smaller than')
class FilterEmpty(BaseMongoEngineFilter, filters.BaseBooleanFilter):
def apply(self, query, value):
if value == '1':
flt = {'%s' % self.column.name: None}
else:
flt = {'%s__ne' % self.column.name: None}
return query.filter(**flt)
def operation(self):
return lazy_gettext('empty')
class FilterInList(BaseMongoEngineFilter):
def __init__(self, column, name, options=None, data_type=None):
super(FilterInList, self).__init__(column, name, options, data_type='select2-tags')
def clean(self, value):
return [v.strip() for v in value.split(',') if v.strip()]
def apply(self, query, value):
flt = {'%s__in' % self.column.name: value}
return query.filter(**flt)
def operation(self):
return lazy_gettext('in list')
class FilterNotInList(FilterInList):
def apply(self, query, value):
flt = {'%s__nin' % self.column.name: value}
return query.filter(**flt)
def operation(self):
return lazy_gettext('not in list')
# Customized type filters
class BooleanEqualFilter(FilterEqual, filters.BaseBooleanFilter):
def apply(self, query, value):
flt = {'%s' % self.column.name: value == '1'}
return query.filter(**flt)
class BooleanNotEqualFilter(FilterNotEqual, filters.BaseBooleanFilter):
def apply(self, query, value):
flt = {'%s' % self.column.name: value != '1'}
return query.filter(**flt)
class IntEqualFilter(FilterEqual, filters.BaseIntFilter):
pass
class IntNotEqualFilter(FilterNotEqual, filters.BaseIntFilter):
pass
class IntGreaterFilter(FilterGreater, filters.BaseIntFilter):
pass
class IntSmallerFilter(FilterSmaller, filters.BaseIntFilter):
pass
class IntInListFilter(filters.BaseIntListFilter, FilterInList):
pass
class IntNotInListFilter(filters.BaseIntListFilter, FilterNotInList):
pass
class FloatEqualFilter(FilterEqual, filters.BaseFloatFilter):
pass
class FloatNotEqualFilter(FilterNotEqual, filters.BaseFloatFilter):
pass
class FloatGreaterFilter(FilterGreater, filters.BaseFloatFilter):
pass
class FloatSmallerFilter(FilterSmaller, filters.BaseFloatFilter):
pass
class FloatInListFilter(filters.BaseFloatListFilter, FilterInList):
pass
class FloatNotInListFilter(filters.BaseFloatListFilter, FilterNotInList):
pass
class DateTimeEqualFilter(FilterEqual, filters.BaseDateTimeFilter):
pass
class DateTimeNotEqualFilter(FilterNotEqual, filters.BaseDateTimeFilter):
pass
class DateTimeGreaterFilter(FilterGreater, filters.BaseDateTimeFilter):
pass
class DateTimeSmallerFilter(FilterSmaller, filters.BaseDateTimeFilter):
pass
class DateTimeBetweenFilter(BaseMongoEngineFilter, filters.BaseDateTimeBetweenFilter):
def __init__(self, column, name, options=None, data_type=None):
super(DateTimeBetweenFilter, self).__init__(column,
name,
options,
data_type='datetimerangepicker')
def apply(self, query, value):
start, end = value
flt = {'%s__gte' % self.column.name: start, '%s__lte' % self.column.name: end}
return query.filter(**flt)
class DateTimeNotBetweenFilter(DateTimeBetweenFilter):
def apply(self, query, value):
start, end = value
return query.filter(Q(**{'%s__not__gte' % self.column.name: start}) |
Q(**{'%s__not__lte' % self.column.name: end}))
def operation(self):
return lazy_gettext('not between')
# Base peewee filter field converter
class FilterConverter(filters.BaseFilterConverter):
strings = (FilterEqual, FilterNotEqual, FilterLike, FilterNotLike,
FilterEmpty, FilterInList, FilterNotInList)
int_filters = (IntEqualFilter, IntNotEqualFilter, IntGreaterFilter,
IntSmallerFilter, FilterEmpty, IntInListFilter,
IntNotInListFilter)
float_filters = (FloatEqualFilter, FloatNotEqualFilter, FloatGreaterFilter,
FloatSmallerFilter, FilterEmpty, FloatInListFilter,
FloatNotInListFilter)
bool_filters = (BooleanEqualFilter, BooleanNotEqualFilter)
datetime_filters = (DateTimeEqualFilter, DateTimeNotEqualFilter,
DateTimeGreaterFilter, DateTimeSmallerFilter,
DateTimeBetweenFilter, DateTimeNotBetweenFilter,
FilterEmpty)
def convert(self, type_name, column, name):
filter_name = type_name.lower()
if filter_name in self.converters:
return self.converters[filter_name](column, name)
return None
@filters.convert('StringField', 'EmailField', 'URLField')
def conv_string(self, column, name):
return [f(column, name) for f in self.strings]
@filters.convert('BooleanField')
def conv_bool(self, column, name):
return [f(column, name) for f in self.bool_filters]
@filters.convert('IntField', 'LongField')
def conv_int(self, column, name):
return [f(column, name) for f in self.int_filters]
@filters.convert('DecimalField', 'FloatField')
def conv_float(self, column, name):
return [f(column, name) for f in self.float_filters]
@filters.convert('DateTimeField', 'ComplexDateTimeField')
def conv_datetime(self, column, name):
return [f(column, name) for f in self.datetime_filters]
|
|
#!/usr/bin/python
import sys
import os
from oauth2client import client
import gflags
import httplib2
from apiclient.discovery import build
from oauth2client.file import Storage
from oauth2client.client import OAuth2WebServerFlow
from oauth2client.tools import run
from datetime import datetime, date, time, timedelta
import pytz
from dateutil.parser import parse
import re
import tts,stt
class Calendar:
def __init__(self,client_id = '71057278230-krkhag877g29lng5rp63qg9bhvoellcn.apps.googleusercontent.com'
,client_secret = 'CCaXElmLd89gBUA202L9717t'
,APIkey = 'AIzaSyBg0V344J5CuQP_lsfQcIAx0ajv6BOBTfw'
,calendarId = '[email protected]' ):
# Your OAuth 2.0 Client ID and Secret. If you do not have an ID and Secret yet,
# please go to https://console.developers.google.com and create a set.
self.stt = stt.STTEngine()
self.tts = tts.TTSEngine()
cest = pytz.timezone('Europe/Madrid')
self.now = datetime.now(tz=cest) # timezone?
self.dt = parse(str(self.now))
self.calendarId = calendarId
self.weekDay = ['monday','tuesday','wednesday','thursday','friday','saturday','sunday']
# The calendar API OAuth 2.0 scope.
SCOPE = u'https://www.googleapis.com/auth/calendar'
#Retrieve and display the access and refresh token.
flow = client.OAuth2WebServerFlow(
client_id=client_id,
client_secret=client_secret,
scope=[SCOPE],
user_agent='Ads Python Client Library',
redirect_uri='urn:ietf:wg:oauth:2.0:oob')
authorize_url = flow.step1_get_authorize_url()
storage = Storage('calendar.dat')
credentials = storage.get()
if credentials is None or credentials.invalid == True:
credentials = run(flow, storage)
# Create an httplib2.Http object to handle our HTTP requests and authorize it
# with our good Credentials.
http = httplib2.Http()
http = credentials.authorize(http)
# Build a service object for interacting with the API. Visit
# the Google Developers Console
# to get a developerKey for your own application.
service = build(serviceName='calendar', version='v3', http=http, developerKey=APIkey)
self.service = service
def getEvents(self, text):
#'List all' my events
#'List' the events of this 'week'
#'List' the events of this 'month'
#'List' the events of "2" 'days'
#'List' the event of "7 of March" at "15:00"
#'List' the event of "this friday"
if "week" in text:
days = 7
elif "month" in text:
days = 30 - int(self.dt.strftime('%d'))
elif "days" in text:
days = int(re.findall("(\d+)",text)[0])
elif "all" in text:
days = 0
else:
days = -1
dat = re.findall("of\s(.+)",text)
#"7 of March" at "15:00"
#"this friday"
if dat:
dat = dat[0]
else:
days = 0
#Get only date and not time
da = re.findall("at\s\d+:\d+", dat)
if da:
dat = re.findall("(.+)\sat",dat)[0]
#"7 of March"
#"this friday"
#deleteDay = -1 -> Actual day because day not set
#deleteDay = 1 -> Get all input day
#deleteDay = 0 -> Get an interval of time
#By default get all the day
deleteDay = 1
#Check if there are a number or a day of the week
day = re.findall("[0-9]",dat)
if day:
#"7 of March"
dayEvent = int(day[0])
monthRe = re.findall("of\s([a-z]+)",dat)
monthEvent = int(datetime.strptime(monthRe[0], '%B').strftime('%m'))
else:
#"this friday"
#Get the week day
dat = re.split('\s+', dat)
day = set(dat).intersection(self.weekDay)
if day:
day = day.pop()
#Compare the actual time with the input time
posNow = self.weekDay.index( self.dt.strftime('%A').lower())
pos = self.weekDay.index(day)
if (pos - posNow) < 0:
posCount = abs(pos - posNow) + 1
else:
posCount = pos - posNow
dayEvent = int( self.dt.strftime('%d')) + posCount
monthEvent = int( self.dt.strftime('%m'))
else:
#No date set, get actual day
deleteDay = -1
inter = re.findall("of.+",text)[0]
#of "7 of March" at "15:00"
#of "this friday"
#Check if we have time
da = re.findall("at\s\d+:\d+", inter)
if da:
#time set, time not all day
deleteDay = 0
tim = re.findall("at\s(\d+:\d+)",text)[0]
tim = tim.split(':')
hourEvent = int(tim[0])
minEvent = int(tim[1])
if days == -1:
#deleteDay = -1 -> Actual day because day not set
#deleteDay = 1 -> Get all input day
#deleteDay = 0 -> Get an interval of time
if deleteDay == -1:
timeMin = datetime(year=self.now.year, month=self.now.month, day=self.now.day, hour=0, minute=0)
timeMax = timeMin + timedelta(minutes=1435) #1435
timeMin = timeMin.isoformat() + self.dt.strftime('%z')
timeMax = timeMax.isoformat() + self.dt.strftime('%z')
elif deleteDay == 1:
timeMin = datetime(year=self.now.year, month=monthEvent, day=dayEvent, hour=0, minute=0)
timeMax = timeMin + timedelta(minutes=1435) #1435
timeMin = timeMin.isoformat() + self.dt.strftime('%z')
timeMax = timeMax.isoformat() + self.dt.strftime('%z')
else:
#Get all the events of the day
timeMin = datetime(year=self.now.year, month=monthEvent, day=dayEvent, hour=0, minute=0)
timeMax = timeMin + timedelta(minutes=1435) #1435
timeMin = timeMin.isoformat() + self.dt.strftime('%z')
timeMax = timeMax.isoformat() + self.dt.strftime('%z')
events = self.service.events().list(calendarId=self.calendarId,timeMin=timeMin, timeMax=timeMax).execute()
#Calcule the start time of the event (minutes)
timeStart = hourEvent*60 + minEvent
count=0
evet = events
#Get every event and check the start time
for event in events['items']:
#'2015-03-08T14:00:00+01:00'
dt = parse(event['start']['dateTime'])
if (int(dt.strftime('%H'))*60+int(dt.strftime('%M'))) == timeStart:
evet['items'][0] = event
else:
#Count of unmached events
count = count +1
#Remove unmatched events
for i in range(count):
evet['items'].pop()
return evet
events = self.service.events().list(calendarId=self.calendarId,timeMin=timeMin, timeMax=timeMax).execute()
#Get all the events
elif days == 0:
events = self.service.events().list(calendarId=self.calendarId).execute()
#Get the events depending of the days interval
else:
timeMax = datetime(year=self.now.year, month=self.now.month, day=self.now.day, tzinfo=cest) + timedelta(days=days)
timeMax = timeMax.isoformat()
events = self.service.events().list(calendarId=self.calendarId,timeMax=timeMax).execute()
return events
def setEvents(self, text):
#'Add' new event called "asdfgh" 'for' "this tuesday" (not necessary) 'at' "15:00" of "2 hours"
#'Add' new event called "asdsfdg" 'for' "the 7 of march" (not necessary) 'at' "19:00" to "20:00"
summary = re.findall("called\s([a-z,0-9,\s]+)\sfor",text)
if summary:
summary = summary[0]
else:
summary = "event"
dat = re.findall("for\s(.+)",text)[0]
#"7 of March" at "15:00"
#"this friday"
#Get only date and not time
da = re.findall("at\s\d+:\d+", dat)
if da:
dat = re.findall("(.+)\sat",dat)[0]
#"7 of March"
#"this friday"
#Check if there are a number or a day of the week
day = re.findall("[0-9]",dat)
if day:
#"7 of March"
dayEvent = int(day[0])
monthRe = re.findall("of\s([a-z]+)",dat)
monthEvent = int(datetime.strptime(monthRe[0], '%B').strftime('%m'))
else:
#"this friday"
#Get the week day
dat = re.split('\s+', dat)
day = set(dat).intersection(self.weekDay)
if day:
day = day.pop()
#Compare the actual time with the input time
posNow = self.weekDay.index( self.dt.strftime('%A').lower())
pos = self.weekDay.index(day)
if (pos - posNow) < 0:
posCount = abs(pos - posNow) + 1
else:
posCount = pos - posNow
dayEvent = int( self.dt.strftime('%d')) + posCount
monthEvent = int( self.dt.strftime('%m'))
else:
#Get actual date if day of the week or number day is not set
dayEvent = int( self.dt.strftime('%d'))
monthEvent = int( self.dt.strftime('%m'))
inter = re.findall("for.+",text)[0]
#Check if we have time
da = re.findall("at\s\d+:\d+", inter)
if da:
tim = re.findall("at\s(\d+:\d+)",text)[0]
tim = tim.split(':')
hourEvent = int(tim[0])
minEvent = int(tim[1])
#Check if we have interval
inte = re.findall("at\s\d+:\d+(.+)",text)[0]
#Check the type of interval
da = re.findall("of\s\d+\s\D+", dat)
daa = re.findall("\d+:\d+\sto\s\d+:\d+", dat)
if da:
inte = re.split('\s+', inte)
#Check the scale
if "minutes" in inte[3]:
intervalEvent = int(inte[2])
elif "hours" in inte[3]:
intervalEvent = int(inte[2])*60
elif "hour" in inte[3]:
intervalEvent = int(inte[2])*60
else:
return False
elif daa:
inte = re.split('\s+', inte)
interEv = inte[2].split(':')
intervalEvent = (int(interEv[0])*60 + int(interEv[1]))-(hourEvent*60 + minEvent)
if intervalEvent < 0:
return False
else:
#Interval time by default
intervalEvent = 60
else:
#Event by default from 08:00 to 22:00
hourEvent = 8
minEvent = 0
intervalEvent = 14*60
startTime = datetime(year=self.now.year, month=monthEvent, day=dayEvent, hour=hourEvent, minute=minEvent)
endTime = startTime + timedelta(minutes=intervalEvent)
startTime = startTime.isoformat() + self.dt.strftime('%z')
endTime = endTime.isoformat() + self.dt.strftime('%z')
body = {'summary':summary, 'start':{'dateTime': startTime}, 'end':{'dateTime':endTime}}
event = self.service.events().insert(calendarId=self.calendarId, body=body).execute()
return True
def deleteEvents(self, text):
#'Delete all' my events
#'Delete all' the events of this 'week'
#'Delete all' the events of this 'month'
#'Delete the event' of "7 of March" at "15:00"
events = self.getEvents(text)
for event in events['items']:
self.service.events().delete(calendarId=self.calendarId, eventId=event['id']).execute()
#Return True if something was deleted
if events['items']:
return True
else:
return False
def think(self, text):
#'' static words
#"" input words
#general questions
#the 'plans' or 'sheduler' of this 'week' or this 'month'
#Acces to the 'calendar'
#Assistant: Accesing to calendar, what do you want to do?
#Add:
#'Add' new event 'called' "asdfgh" 'for' "this tuesday" (not necessary) 'at' "15:00" of "2 hours"
#'Add' new event 'called' "asdsfdg" 'for' "the 7 of march" (not necessary) 'at' "19:00" to "20:00"
#List or tell me or get
#'List all' my events
#'List' the events of this 'week'
#'List' the events of this 'month'
#'List' the events of "2" 'days'
#Get all the elements
#Get elements of friday
#Delete: -> Ask to confirm the delete
#'Delete all' my events
#'Delete all' the events of this 'week'
#'Delete all' the events of this 'month'
#'Delete the event' of "7 of March" at "15:00"
self.tts.say("What operation do you want to do?")
print "What operation do you want to do?" + "\n"
os.system("sox -d voice1.flac silence 1 0.1 5% 1 1.0 5%")
text = self.stt.transcript("voice1.flac").lower()
print text
if "add" in text:
if self.setEvents(text):
#Convert text of 'add event' to 'get events'
text = text.replace('for', 'of')
#Delete unnecesary text
text = re.findall("of\s.+",text)[0]
#Get event to check if is saved
events = self.getEvents(text)
for event in events['items']:
if event:
dt = parse(event['start']['dateTime'])
dtf = parse(event['end']['dateTime'])
textEvent = "Event called: "+event['summary']+" at "+dt.strftime('%A')+", "+str(dt.day)+" of "+dt.strftime('%B')+" at "+dt.strftime('%H')+":"+dt.strftime('%M')+" to "+dtf.strftime('%H')+":"+dtf.strftime('%M')
textEvent += ". Successfully added"
return textEvent
else:
return "Not added"
else:
return "Incorrect input"
elif "delete" in text:
if self.deleteEvents(text):
return "successfully deteled"
else:
return "Nothing to detele"
else:
textEvent = ""
events = self.getEvents(text)
for event in events['items']:
dt = parse(event['start']['dateTime'])
dtf = parse(event['end']['dateTime'])
textEvent += "Event called: "+event['summary']+" at "+dt.strftime('%A')+", "+str(dt.day)+" of "+dt.strftime('%B')+" at "+dt.strftime('%H')+":"+dt.strftime('%M')+" to "+dtf.strftime('%H')+":"+dtf.strftime('%M') + "\n"
return textEvent
"""
if __name__ == '__main__':
text0 = 'Add new event called prueb1 for this tuesday at 19:00 to 21:00'
text1 = 'Add new event called my lunch for the 10 of march at 19:00 of 1 hours'
text2 = 'Add new event called my lunch for the 15 of march'
text3 = 'Add new event called my lunch for the 12 of april at 20:00 of 30 minutes'
textGet = 'Delete the events of the 4 of april'
textGet1 = 'Get all the events'
textDel = 'Delete the events of this friday'
#text = 'get the events of this friday'
returnedText = sys.argv[1:][0]
print "input text:"
print text + "\n"
print returnedText + "\n"
if "add" in returnedText:
if calendar.setEvents(returnedText):
returnedText = returnedText.replace('for', 'of')
returnedText = re.findall("of\s.+",returnedText)[0]
print returnedText
events = calendar.getEvents(returnedText)
for event in events['items']:
dt = parse(event['start']['dateTime'])
dtf = parse(event['end']['dateTime'])
textEvent = "Event called: "+event['summary']+" at "+dt.strftime('%A')+", "+str(dt.day)+" of "+dt.strftime('%B')+" at "+dt.strftime('%H')+":"+dt.strftime('%M')+" to "+dtf.strftime('%H')+":"+dtf.strftime('%M')
print textEvent
say(textEvent)
say("Succesfully added")
print "Succesfully added"
else:
say("Incorrect input")
print "Incorrect input"
elif "delete" in returnedText:
if calendar.deleteEvents(returnedText):
say("Succesfully deteled")
print "Succesfully deteled"
else:
say("Nothing to deteled")
print "Nothing to deteled"
else:
events = calendar.getEvents(returnedText)
for event in events['items']:
dt = parse(event['start']['dateTime'])
dtf = parse(event['end']['dateTime'])
textEvent = "Event called: "+event['summary']+" at "+dt.strftime('%A')+", "+str(dt.day)+" of "+dt.strftime('%B')+" at "+dt.strftime('%H')+":"+dt.strftime('%M')+" to "+dtf.strftime('%H')+":"+dtf.strftime('%M')
print textEvent
say(textEvent)
"""
|
|
# -*- coding: utf-8 -*-
"""Testing and demonstrating program for 'lambert' of pytwobodyorbit
Created on Fri Dec 14 08:44:47 2018
@author: Shushi Uetsuki/whiskie14142
"""
import numpy as np
import tkinter
from pytwobodyorbit import TwoBodyOrbit
from pytwobodyorbit import lambert
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
import matplotlib
# Standard gravitational parameter for the Sun
# With this parameter, lenght should be in meters,
# and time should be in seconds
sunmu = 1.32712440041e20
# Create instance of TwoBodyOrbit
orbit = TwoBodyOrbit('object', mu=sunmu)
# Seconds of a day
secofday = 86400.0
# prepare plotting
matplotlib.rcParams['toolbar'] = 'none'
plt.ion() # set pyplot to the interactive mode
fig=plt.figure(figsize=(11,11))
ax=fig.gca(projection='3d', aspect='equal')
ax.set_clip_on(True)
ax.set_xlim(-3.0e11, 3.0e11)
ax.set_ylim(-3.0e11, 3.0e11)
ax.set_zlim(-3.0e11, 3.0e11)
ax.set_xlabel('X')
ax.set_ylabel('Y')
ax.set_zlabel('Z')
fig.tight_layout()
fig.canvas.set_window_title('3D Orbit')
ax.text2D(0.02, 1.00, 'Rotate: move mouse with L button held down', transform=ax.transAxes)
ax.text2D(0.02, 0.97, 'Zoom: move mouse up/down with R button held down', transform=ax.transAxes)
mngr = plt.get_current_fig_manager()
mngr.window.setGeometry(660, 40, 600, 600)
class TestLambert(tkinter.Frame):
def __init__(self, master=None):
super().__init__(master)
self.pack()
self.master = master
self.arline = None
self.arsc = None
self.object = [' P1', ' P2', ' Sun']
self.arname = [None, None, None]
self.create_widgets()
def create_widgets(self):
self.Lspace0 = tkinter.Label(self, text=' ', font=('Times', 4))
self.Lspace0.grid(row=0, column=0)
self.comment = tkinter.Text(self, width=80, height=19,
font=('Helvetica', 10), wrap=tkinter.WORD)
scom = "This program demonstrates 'lambert' function of the module 'pytwobodyorbit'.\n\n" + \
"The 'lambert' function solves so-called 'Lambert's Probrem'. It computes a two-body orbit of an object from its initial position (P1), terminal position (P2), and flight time from P1 to P2; it yields initial velocity and terminal velocity of the object.\n\n" + \
"In this program, we use the Sun as the central body. The program shows initial velocity and terminal velocity of the object. It shows classical orbital elements, and residuals of terminating position and velocity. In addition, it shows the orbit in the 3D chart.\n\n" + \
"USAGE:\nEdit coordinates of P1 and P2, and flight time, and click [Compute Prograde Orb] button for direct (prograde) orbit, or [Compute Retrograde Orb] for retrograde orbit.\n\n" + \
"UNITS:\nLength - meters\nVelocity - meters per second\nTime - days"
self.comment.insert(1.0, scom)
self.comment['state'] = tkinter.DISABLED
self.comment.grid(row=1, column=0, columnspan=3)
self.Lspace = tkinter.Label(self, text=' ', font=('Times', 4))
self.Lspace.grid(row=2, column=0)
self.L1_X = tkinter.Label(self, text='Initial Position: P1(X) ')
self.L1_X.grid(row=3, column=0, sticky=tkinter.E)
self.pos1_X = tkinter.StringVar(value=' 1.50000000000e+11')
self.Epos1_X = tkinter.Entry(self, bd=1, textvariable=self.pos1_X)
self.Epos1_X.grid(row=3, column=1, sticky=tkinter.W)
self.L1_Y = tkinter.Label(self, text='Initial Position: P1(Y) ')
self.L1_Y.grid(row=4, column=0, sticky=tkinter.E)
self.pos1_Y = tkinter.StringVar(value=' 0.00000000000e+11')
self.Epos1_Y = tkinter.Entry(self, bd=1, textvariable=self.pos1_Y)
self.Epos1_Y.grid(row=4, column=1, sticky=tkinter.W)
self.L1_Z = tkinter.Label(self, text='Initial Position: P1(Z) ')
self.L1_Z.grid(row=5, column=0, sticky=tkinter.E)
self.pos1_Z = tkinter.StringVar(value=' 0.00000000000e+11')
self.Epos1_Z = tkinter.Entry(self, bd=1, textvariable=self.pos1_Z)
self.Epos1_Z.grid(row=5, column=1, sticky=tkinter.W)
self.Lspace2 = tkinter.Label(self, text=' ', font=('Times', 4))
self.Lspace2.grid(row=6, column=0)
self.L2_X = tkinter.Label(self, text='Terminal Position: P2(X) ')
self.L2_X.grid(row=7, column=0, sticky=tkinter.E)
self.pos2_X = tkinter.StringVar(value='-0.50000000000e+11')
self.Epos2_X = tkinter.Entry(self, bd=1, textvariable=self.pos2_X)
self.Epos2_X.grid(row=7, column=1, sticky=tkinter.W)
self.L2_Y = tkinter.Label(self, text='Terminal Position: P2(Y) ')
self.L2_Y.grid(row=8, column=0, sticky=tkinter.E)
self.pos2_Y = tkinter.StringVar(value=' 1.30000000000e+11')
self.Epos2_Y = tkinter.Entry(self, bd=1, textvariable=self.pos2_Y)
self.Epos2_Y.grid(row=8, column=1, sticky=tkinter.W)
self.L2_Z = tkinter.Label(self, text='Terminal Position: P2(Z) ')
self.L2_Z.grid(row=9, column=0, sticky=tkinter.E)
self.pos2_Z = tkinter.StringVar(value=' 0.40000000000e+11')
self.Epos2_Z = tkinter.Entry(self, bd=1, textvariable=self.pos2_Z)
self.Epos2_Z.grid(row=9, column=1, sticky=tkinter.W)
self.Lspace3 = tkinter.Label(self, text=' ', font=('Times', 4))
self.Lspace3.grid(row=10, column=0)
self.Ltime = tkinter.Label(self, text='Flight Time (days) ')
self.Ltime.grid(row=11, column=0, sticky=tkinter.E)
self.ftime = tkinter.StringVar(value=' 100.0')
self.Eftime = tkinter.Entry(self, bd=1, textvariable=self.ftime)
self.Eftime.grid(row=11, column=1, sticky=tkinter.W)
self.Lspace4 = tkinter.Label(self, text=' ', font=('Times', 4))
self.Lspace4.grid(row=12, column=0)
self.solve_Lam_p = tkinter.Button(self)
self.solve_Lam_p['text'] = ' Compute Prograde Orb '
self.solve_Lam_p['command'] = self.prograde
self.solve_Lam_p.grid(row=13, column=1, sticky=tkinter.W)
self.solve_Lam_r = tkinter.Button(self)
self.solve_Lam_r['text'] = ' Compute Retrograde Orb '
self.solve_Lam_r['command'] = self.retrograde
self.solve_Lam_r.grid(row=13, column=2, sticky=tkinter.W)
self.Lspace5 = tkinter.Label(self, text=' ', font=('Arial',9,'bold'))
self.Lspace5.grid(row=14, column=0, columnspan=3)
self.Lspace6 = tkinter.Label(self, text=' ', font=('Times', 4))
self.Lspace6.grid(row=29, column=0)
self.quitapp = tkinter.Button(self)
self.quitapp['text'] = ' Quit '
self.quitapp['command'] = self.master.destroy
self.quitapp.grid(row=30, column=2, sticky=tkinter.E)
def prograde(self):
self.compute(prog=True)
def retrograde(self):
self.compute(prog=False)
def compute(self, prog=True):
# Clicking of the button [Compute and Draw] runs this method
# Get initial position
pos1 = np.array([float(self.pos1_X.get()), float(self.pos1_Y.get()),
float(self.pos1_Z.get())])
# Get terminal position
pos2 = np.array([float(self.pos2_X.get()), float(self.pos2_Y.get()),
float(self.pos2_Z.get())])
ps = np.array([pos1, pos2, np.zeros(3)]).T
if self.arsc is not None:
self.arsc.remove()
self.arsc = None
for j in range(3):
self.arname[j].remove()
self.arsc = ax.scatter(ps[0], ps[1], ps[2], marker='+', color='b')
for j in range(3):
self.arname[j] = ax.text(ps[0, j], ps[1, j], ps[2, j],
self.object[j], color='b', fontsize=9)
if self.arline is not None:
self.arline[0].remove()
self.arline = None
# Get flight time (days) and convert into seconds
duration = float(self.ftime.get()) * secofday
self.Lspace5['text'] = ' '
try:
# Compute initial and terminal velocity with solveGauss.
# You may try ccw=False.
ivel, tvel = lambert(pos1, pos2, duration, sunmu, prog)
except ValueError as ve:
self.Lspace5['text'] = ve.args[0]
return
sivel = 'Initial Velocity (meters per second) = ' + str(ivel)
self.Livel = tkinter.Label(self, text=sivel, width=80, anchor=tkinter.W)
self.Livel.grid(row=15, column=0, columnspan=3, sticky=tkinter.W)
stvel = 'Terminate Velocity (meters per second) = ' +str(tvel)
self.Ltvel = tkinter.Label(self, text=stvel, width=80, anchor=tkinter.W)
self.Ltvel.grid(row=16, column=0, columnspan=3, sticky=tkinter.W)
# Define orbit from epoch, initial position, and initial velocity
orbit.setOrbCart(0.0, pos1, ivel)
# Get Classical orbital elements and show them
# Convert unit of time to seconds
kepl = orbit.elmKepl()
skepl = 'Classical Orbital Elements' + \
'\n epoch = ' + '{:.6f}'.format(kepl['epoch'] / secofday) + \
'\n a = ' + '{:.11e}'.format(kepl['a']) + \
'\n e = ' + '{:.11f}'.format(kepl['e']) + \
'\n i = ' + '{:12.9f}'.format(kepl['i']) + \
'\n LoAN = ' + '{:12.9f}'.format(kepl['LoAN']) + \
'\n AoP = ' + '{:12.9f}'.format(kepl['AoP']) + \
'\n TA = ' + '{:12.9f}'.format(kepl['TA']) + \
'\n T = ' + '{:12.9f}'.format(kepl['T'] / secofday)
if kepl['MA'] is None:
skepl = skepl + '\n MA = None'
else:
skepl = skepl + '\n MA = ' + '{:12.9f}'.format(kepl['MA'])
if kepl['n'] is None:
skepl = skepl + '\n n = None'
else:
skepl = skepl + '\n n = ' + '{:12.9f}'.format(kepl['n'] * secofday)
if kepl['P'] is None:
skepl = skepl + '\n P = None'
else:
skepl = skepl + '\n P = ' + '{:12.9f}'.format(kepl['P'] / secofday)
skepl = skepl + ' '
self.Lkepl = tkinter.Label(self, text=skepl, justify=tkinter.LEFT, anchor=tkinter.NW, height=12)
self.Lkepl.grid(row=17, column=0, columnspan=3, sticky=tkinter.W)
# Get points on orbit
x, y, z, t = orbit.points(1001)
# Plot an orbital line
self.arline = ax.plot(x, y, z, color='r', lw=0.75)
plt.draw()
# Get predicted position and velocity at the end of the flight
predpos, predvel = orbit.posvelatt(duration)
# Compute residuals of positions and velocities at the terminating point
sdpos = 'Residuals in position (meters) = ' + str(pos2 - predpos)
self.Ldpos = tkinter.Label(self, text=sdpos, width=80, anchor=tkinter.W)
self.Ldpos.grid(row=19, column=0, columnspan=3, sticky=tkinter.W)
sdvel = 'Residuals in velocity (meters per second) = ' + str(tvel - predvel)
self.Ldvel = tkinter.Label(self, text=sdvel, width=80, anchor=tkinter.W)
self.Ldvel.grid(row=20, column=0, columnspan=3, sticky=tkinter.W)
if __name__ == '__main__':
mw =tkinter.Tk()
mw.title("Demonstrate 'lambert' function of pytwobodyorbit")
mw.geometry('620x880+10+10')
app = TestLambert(master=mw)
app.mainloop()
|
|
"""
Test for Nest climate platform for the Smart Device Management API.
These tests fake out the subscriber/devicemanager, and are not using a real
pubsub subscriber.
"""
from google_nest_sdm.device import Device
from google_nest_sdm.event import EventMessage
from homeassistant.components.climate.const import (
ATTR_CURRENT_TEMPERATURE,
ATTR_FAN_MODE,
ATTR_FAN_MODES,
ATTR_HVAC_ACTION,
ATTR_HVAC_MODES,
ATTR_PRESET_MODE,
ATTR_PRESET_MODES,
ATTR_TARGET_TEMP_HIGH,
ATTR_TARGET_TEMP_LOW,
CURRENT_HVAC_COOL,
CURRENT_HVAC_HEAT,
CURRENT_HVAC_OFF,
FAN_OFF,
FAN_ON,
HVAC_MODE_AUTO,
HVAC_MODE_COOL,
HVAC_MODE_HEAT,
HVAC_MODE_HEAT_COOL,
HVAC_MODE_OFF,
PRESET_ECO,
PRESET_NONE,
)
from homeassistant.const import ATTR_TEMPERATURE
from .common import async_setup_sdm_platform
from tests.components.climate import common
PLATFORM = "climate"
async def setup_climate(hass, raw_traits=None, auth=None):
"""Load Nest climate devices."""
devices = None
if raw_traits:
traits = raw_traits
traits["sdm.devices.traits.Info"] = {"customName": "My Thermostat"}
devices = {
"some-device-id": Device.MakeDevice(
{
"name": "some-device-id",
"type": "sdm.devices.types.Thermostat",
"traits": traits,
},
auth=auth,
),
}
return await async_setup_sdm_platform(hass, PLATFORM, devices)
async def test_no_devices(hass):
"""Test no devices returned by the api."""
await setup_climate(hass)
assert len(hass.states.async_all()) == 0
async def test_climate_devices(hass):
"""Test no eligible climate devices returned by the api."""
await setup_climate(hass, {"sdm.devices.traits.CameraImage": {}})
assert len(hass.states.async_all()) == 0
async def test_thermostat_off(hass):
"""Test a thermostat that is not running."""
await setup_climate(
hass,
{
"sdm.devices.traits.ThermostatHvac": {"status": "OFF"},
"sdm.devices.traits.ThermostatMode": {
"availableModes": ["HEAT", "COOL", "HEATCOOL", "OFF"],
"mode": "OFF",
},
"sdm.devices.traits.Temperature": {
"ambientTemperatureCelsius": 16.2,
},
},
)
assert len(hass.states.async_all()) == 1
thermostat = hass.states.get("climate.my_thermostat")
assert thermostat is not None
assert thermostat.state == HVAC_MODE_OFF
assert thermostat.attributes[ATTR_HVAC_ACTION] == CURRENT_HVAC_OFF
assert thermostat.attributes[ATTR_CURRENT_TEMPERATURE] == 16.2
assert set(thermostat.attributes[ATTR_HVAC_MODES]) == {
HVAC_MODE_HEAT,
HVAC_MODE_COOL,
HVAC_MODE_HEAT_COOL,
HVAC_MODE_OFF,
}
assert thermostat.attributes[ATTR_TEMPERATURE] is None
assert thermostat.attributes[ATTR_TARGET_TEMP_LOW] is None
assert thermostat.attributes[ATTR_TARGET_TEMP_HIGH] is None
assert ATTR_PRESET_MODE not in thermostat.attributes
assert ATTR_PRESET_MODES not in thermostat.attributes
assert ATTR_FAN_MODE not in thermostat.attributes
assert ATTR_FAN_MODES not in thermostat.attributes
async def test_thermostat_heat(hass):
"""Test a thermostat that is heating."""
await setup_climate(
hass,
{
"sdm.devices.traits.ThermostatHvac": {
"status": "HEATING",
},
"sdm.devices.traits.ThermostatMode": {
"availableModes": ["HEAT", "COOL", "HEATCOOL", "OFF"],
"mode": "HEAT",
},
"sdm.devices.traits.Temperature": {
"ambientTemperatureCelsius": 16.2,
},
"sdm.devices.traits.ThermostatTemperatureSetpoint": {
"heatCelsius": 22.0,
},
},
)
assert len(hass.states.async_all()) == 1
thermostat = hass.states.get("climate.my_thermostat")
assert thermostat is not None
assert thermostat.state == HVAC_MODE_HEAT
assert thermostat.attributes[ATTR_HVAC_ACTION] == CURRENT_HVAC_HEAT
assert thermostat.attributes[ATTR_CURRENT_TEMPERATURE] == 16.2
assert set(thermostat.attributes[ATTR_HVAC_MODES]) == {
HVAC_MODE_HEAT,
HVAC_MODE_COOL,
HVAC_MODE_HEAT_COOL,
HVAC_MODE_OFF,
}
assert thermostat.attributes[ATTR_TEMPERATURE] == 22.0
assert thermostat.attributes[ATTR_TARGET_TEMP_LOW] is None
assert thermostat.attributes[ATTR_TARGET_TEMP_HIGH] is None
assert ATTR_PRESET_MODE not in thermostat.attributes
assert ATTR_PRESET_MODES not in thermostat.attributes
async def test_thermostat_cool(hass):
"""Test a thermostat that is cooling."""
await setup_climate(
hass,
{
"sdm.devices.traits.ThermostatHvac": {
"status": "COOLING",
},
"sdm.devices.traits.ThermostatMode": {
"availableModes": ["HEAT", "COOL", "HEATCOOL", "OFF"],
"mode": "COOL",
},
"sdm.devices.traits.Temperature": {
"ambientTemperatureCelsius": 29.9,
},
"sdm.devices.traits.ThermostatTemperatureSetpoint": {
"coolCelsius": 28.0,
},
},
)
assert len(hass.states.async_all()) == 1
thermostat = hass.states.get("climate.my_thermostat")
assert thermostat is not None
assert thermostat.state == HVAC_MODE_COOL
assert thermostat.attributes[ATTR_HVAC_ACTION] == CURRENT_HVAC_COOL
assert thermostat.attributes[ATTR_CURRENT_TEMPERATURE] == 29.9
assert set(thermostat.attributes[ATTR_HVAC_MODES]) == {
HVAC_MODE_HEAT,
HVAC_MODE_COOL,
HVAC_MODE_HEAT_COOL,
HVAC_MODE_OFF,
}
assert thermostat.attributes[ATTR_TEMPERATURE] == 28.0
assert thermostat.attributes[ATTR_TARGET_TEMP_LOW] is None
assert thermostat.attributes[ATTR_TARGET_TEMP_HIGH] is None
assert ATTR_PRESET_MODE not in thermostat.attributes
assert ATTR_PRESET_MODES not in thermostat.attributes
async def test_thermostat_heatcool(hass):
"""Test a thermostat that is cooling in heatcool mode."""
await setup_climate(
hass,
{
"sdm.devices.traits.ThermostatHvac": {
"status": "COOLING",
},
"sdm.devices.traits.ThermostatMode": {
"availableModes": ["HEAT", "COOL", "HEATCOOL", "OFF"],
"mode": "HEATCOOL",
},
"sdm.devices.traits.Temperature": {
"ambientTemperatureCelsius": 29.9,
},
"sdm.devices.traits.ThermostatTemperatureSetpoint": {
"heatCelsius": 22.0,
"coolCelsius": 28.0,
},
},
)
assert len(hass.states.async_all()) == 1
thermostat = hass.states.get("climate.my_thermostat")
assert thermostat is not None
assert thermostat.state == HVAC_MODE_HEAT_COOL
assert thermostat.attributes[ATTR_HVAC_ACTION] == CURRENT_HVAC_COOL
assert thermostat.attributes[ATTR_CURRENT_TEMPERATURE] == 29.9
assert set(thermostat.attributes[ATTR_HVAC_MODES]) == {
HVAC_MODE_HEAT,
HVAC_MODE_COOL,
HVAC_MODE_HEAT_COOL,
HVAC_MODE_OFF,
}
assert thermostat.attributes[ATTR_TARGET_TEMP_LOW] == 22.0
assert thermostat.attributes[ATTR_TARGET_TEMP_HIGH] == 28.0
assert thermostat.attributes[ATTR_TEMPERATURE] is None
assert ATTR_PRESET_MODE not in thermostat.attributes
assert ATTR_PRESET_MODES not in thermostat.attributes
async def test_thermostat_eco_off(hass):
"""Test a thermostat cooling with eco off."""
await setup_climate(
hass,
{
"sdm.devices.traits.ThermostatHvac": {
"status": "COOLING",
},
"sdm.devices.traits.ThermostatMode": {
"availableModes": ["HEAT", "COOL", "HEATCOOL", "OFF"],
"mode": "HEATCOOL",
},
"sdm.devices.traits.ThermostatEco": {
"availableModes": ["MANUAL_ECO", "OFF"],
"mode": "OFF",
"heatCelsius": 20.0,
"coolCelsius": 22.0,
},
"sdm.devices.traits.Temperature": {
"ambientTemperatureCelsius": 29.9,
},
"sdm.devices.traits.ThermostatTemperatureSetpoint": {
"heatCelsius": 22.0,
"coolCelsius": 28.0,
},
},
)
assert len(hass.states.async_all()) == 1
thermostat = hass.states.get("climate.my_thermostat")
assert thermostat is not None
assert thermostat.state == HVAC_MODE_HEAT_COOL
assert thermostat.attributes[ATTR_HVAC_ACTION] == CURRENT_HVAC_COOL
assert thermostat.attributes[ATTR_CURRENT_TEMPERATURE] == 29.9
assert set(thermostat.attributes[ATTR_HVAC_MODES]) == {
HVAC_MODE_HEAT,
HVAC_MODE_COOL,
HVAC_MODE_HEAT_COOL,
HVAC_MODE_AUTO,
HVAC_MODE_OFF,
}
assert thermostat.attributes[ATTR_TARGET_TEMP_LOW] == 22.0
assert thermostat.attributes[ATTR_TARGET_TEMP_HIGH] == 28.0
assert thermostat.attributes[ATTR_TEMPERATURE] is None
assert thermostat.attributes[ATTR_PRESET_MODE] == PRESET_NONE
assert thermostat.attributes[ATTR_PRESET_MODES] == [PRESET_ECO, PRESET_NONE]
async def test_thermostat_eco_on(hass):
"""Test a thermostat in eco mode."""
await setup_climate(
hass,
{
"sdm.devices.traits.ThermostatHvac": {
"status": "COOLING",
},
"sdm.devices.traits.ThermostatMode": {
"availableModes": ["HEAT", "COOL", "HEATCOOL", "OFF"],
"mode": "HEATCOOL",
},
"sdm.devices.traits.ThermostatEco": {
"availableModes": ["MANUAL_ECO", "OFF"],
"mode": "MANUAL_ECO",
"heatCelsius": 21.0,
"coolCelsius": 29.0,
},
"sdm.devices.traits.Temperature": {
"ambientTemperatureCelsius": 29.9,
},
"sdm.devices.traits.ThermostatTemperatureSetpoint": {
"heatCelsius": 22.0,
"coolCelsius": 28.0,
},
},
)
assert len(hass.states.async_all()) == 1
thermostat = hass.states.get("climate.my_thermostat")
assert thermostat is not None
assert thermostat.state == HVAC_MODE_AUTO
assert thermostat.attributes[ATTR_HVAC_ACTION] == CURRENT_HVAC_COOL
assert thermostat.attributes[ATTR_CURRENT_TEMPERATURE] == 29.9
assert set(thermostat.attributes[ATTR_HVAC_MODES]) == {
HVAC_MODE_HEAT,
HVAC_MODE_COOL,
HVAC_MODE_HEAT_COOL,
HVAC_MODE_AUTO,
HVAC_MODE_OFF,
}
assert thermostat.attributes[ATTR_TARGET_TEMP_LOW] == 21.0
assert thermostat.attributes[ATTR_TARGET_TEMP_HIGH] == 29.0
assert thermostat.attributes[ATTR_TEMPERATURE] is None
assert thermostat.attributes[ATTR_PRESET_MODE] == PRESET_ECO
assert thermostat.attributes[ATTR_PRESET_MODES] == [PRESET_ECO, PRESET_NONE]
class FakeAuth:
"""A fake implementation of the auth class that records requests."""
def __init__(self):
"""Initialize FakeAuth."""
self.method = None
self.url = None
self.json = None
async def request(self, method, url, json):
"""Capure the request arguments for tests to assert on."""
self.method = method
self.url = url
self.json = json
async def test_thermostat_set_hvac_mode(hass):
"""Test a thermostat changing hvac modes."""
auth = FakeAuth()
subscriber = await setup_climate(
hass,
{
"sdm.devices.traits.ThermostatHvac": {"status": "OFF"},
"sdm.devices.traits.ThermostatMode": {
"availableModes": ["HEAT", "COOL", "HEATCOOL", "OFF"],
"mode": "OFF",
},
},
auth=auth,
)
assert len(hass.states.async_all()) == 1
thermostat = hass.states.get("climate.my_thermostat")
assert thermostat is not None
assert thermostat.state == HVAC_MODE_OFF
assert thermostat.attributes[ATTR_HVAC_ACTION] == CURRENT_HVAC_OFF
await common.async_set_hvac_mode(hass, HVAC_MODE_HEAT)
await hass.async_block_till_done()
assert auth.method == "post"
assert auth.url == "some-device-id:executeCommand"
assert auth.json == {
"command": "sdm.devices.commands.ThermostatMode.SetMode",
"params": {"mode": "HEAT"},
}
# Local state does not reflect the update
thermostat = hass.states.get("climate.my_thermostat")
assert thermostat is not None
assert thermostat.state == HVAC_MODE_OFF
assert thermostat.attributes[ATTR_HVAC_ACTION] == CURRENT_HVAC_OFF
# Simulate pubsub message when mode changes
event = EventMessage(
{
"eventId": "some-event-id",
"timestamp": "2019-01-01T00:00:01Z",
"resourceUpdate": {
"name": "some-device-id",
"traits": {
"sdm.devices.traits.ThermostatMode": {
"availableModes": ["HEAT", "COOL", "HEATCOOL", "OFF"],
"mode": "HEAT",
},
},
},
},
auth=None,
)
subscriber.receive_event(event)
await hass.async_block_till_done() # Process dispatch/update signal
thermostat = hass.states.get("climate.my_thermostat")
assert thermostat is not None
assert thermostat.state == HVAC_MODE_HEAT
assert thermostat.attributes[ATTR_HVAC_ACTION] == CURRENT_HVAC_OFF
# Simulate pubsub message when the thermostat starts heating
event = EventMessage(
{
"eventId": "some-event-id",
"timestamp": "2019-01-01T00:00:01Z",
"resourceUpdate": {
"name": "some-device-id",
"traits": {
"sdm.devices.traits.ThermostatHvac": {
"status": "HEATING",
},
},
},
},
auth=None,
)
subscriber.receive_event(event)
await hass.async_block_till_done() # Process dispatch/update signal
thermostat = hass.states.get("climate.my_thermostat")
assert thermostat is not None
assert thermostat.state == HVAC_MODE_HEAT
assert thermostat.attributes[ATTR_HVAC_ACTION] == CURRENT_HVAC_HEAT
async def test_thermostat_set_eco_preset(hass):
"""Test a thermostat put into eco mode."""
auth = FakeAuth()
subscriber = await setup_climate(
hass,
{
"sdm.devices.traits.ThermostatHvac": {"status": "OFF"},
"sdm.devices.traits.ThermostatEco": {
"availableModes": ["MANUAL_ECO", "OFF"],
"mode": "OFF",
"heatCelsius": 15.0,
"coolCelsius": 28.0,
},
"sdm.devices.traits.ThermostatMode": {
"availableModes": ["HEAT", "COOL", "HEATCOOL", "OFF"],
"mode": "OFF",
},
},
auth=auth,
)
assert len(hass.states.async_all()) == 1
thermostat = hass.states.get("climate.my_thermostat")
assert thermostat is not None
assert thermostat.state == HVAC_MODE_OFF
assert thermostat.attributes[ATTR_HVAC_ACTION] == CURRENT_HVAC_OFF
assert thermostat.attributes[ATTR_PRESET_MODE] == PRESET_NONE
# Turn on eco mode
await common.async_set_preset_mode(hass, PRESET_ECO)
await hass.async_block_till_done()
assert auth.method == "post"
assert auth.url == "some-device-id:executeCommand"
assert auth.json == {
"command": "sdm.devices.commands.ThermostatEco.SetMode",
"params": {"mode": "MANUAL_ECO"},
}
# Local state does not reflect the update
thermostat = hass.states.get("climate.my_thermostat")
assert thermostat is not None
assert thermostat.state == HVAC_MODE_OFF
assert thermostat.attributes[ATTR_HVAC_ACTION] == CURRENT_HVAC_OFF
assert thermostat.attributes[ATTR_PRESET_MODE] == PRESET_NONE
# Simulate pubsub message when mode changes
event = EventMessage(
{
"eventId": "some-event-id",
"timestamp": "2019-01-01T00:00:01Z",
"resourceUpdate": {
"name": "some-device-id",
"traits": {
"sdm.devices.traits.ThermostatEco": {
"availableModes": ["HEAT", "COOL", "HEATCOOL", "OFF"],
"mode": "MANUAL_ECO",
"heatCelsius": 15.0,
"coolCelsius": 28.0,
},
},
},
},
auth=auth,
)
subscriber.receive_event(event)
await hass.async_block_till_done() # Process dispatch/update signal
thermostat = hass.states.get("climate.my_thermostat")
assert thermostat is not None
assert thermostat.state == HVAC_MODE_AUTO
assert thermostat.attributes[ATTR_HVAC_ACTION] == CURRENT_HVAC_OFF
assert thermostat.attributes[ATTR_PRESET_MODE] == PRESET_ECO
# Turn off eco mode
await common.async_set_preset_mode(hass, PRESET_NONE)
await hass.async_block_till_done()
assert auth.method == "post"
assert auth.url == "some-device-id:executeCommand"
assert auth.json == {
"command": "sdm.devices.commands.ThermostatEco.SetMode",
"params": {"mode": "OFF"},
}
async def test_thermostat_set_cool(hass):
"""Test a thermostat in cool mode with a temperature change."""
auth = FakeAuth()
await setup_climate(
hass,
{
"sdm.devices.traits.ThermostatHvac": {"status": "OFF"},
"sdm.devices.traits.ThermostatMode": {
"availableModes": ["HEAT", "COOL", "HEATCOOL", "OFF"],
"mode": "COOL",
},
"sdm.devices.traits.ThermostatTemperatureSetpoint": {
"coolCelsius": 25.0,
},
},
auth=auth,
)
assert len(hass.states.async_all()) == 1
thermostat = hass.states.get("climate.my_thermostat")
assert thermostat is not None
assert thermostat.state == HVAC_MODE_COOL
await common.async_set_temperature(hass, temperature=24.0)
await hass.async_block_till_done()
assert auth.method == "post"
assert auth.url == "some-device-id:executeCommand"
assert auth.json == {
"command": "sdm.devices.commands.ThermostatTemperatureSetpoint.SetCool",
"params": {"coolCelsius": 24.0},
}
async def test_thermostat_set_heat(hass):
"""Test a thermostat heating mode with a temperature change."""
auth = FakeAuth()
await setup_climate(
hass,
{
"sdm.devices.traits.ThermostatHvac": {"status": "OFF"},
"sdm.devices.traits.ThermostatMode": {
"availableModes": ["HEAT", "COOL", "HEATCOOL", "OFF"],
"mode": "HEAT",
},
"sdm.devices.traits.ThermostatTemperatureSetpoint": {
"heatCelsius": 19.0,
},
},
auth=auth,
)
assert len(hass.states.async_all()) == 1
thermostat = hass.states.get("climate.my_thermostat")
assert thermostat is not None
assert thermostat.state == HVAC_MODE_HEAT
await common.async_set_temperature(hass, temperature=20.0)
await hass.async_block_till_done()
assert auth.method == "post"
assert auth.url == "some-device-id:executeCommand"
assert auth.json == {
"command": "sdm.devices.commands.ThermostatTemperatureSetpoint.SetHeat",
"params": {"heatCelsius": 20.0},
}
async def test_thermostat_set_heat_cool(hass):
"""Test a thermostat in heatcool mode with a temperature change."""
auth = FakeAuth()
await setup_climate(
hass,
{
"sdm.devices.traits.ThermostatHvac": {"status": "OFF"},
"sdm.devices.traits.ThermostatMode": {
"availableModes": ["HEAT", "COOL", "HEATCOOL", "OFF"],
"mode": "HEATCOOL",
},
"sdm.devices.traits.ThermostatTemperatureSetpoint": {
"heatCelsius": 19.0,
"coolCelsius": 25.0,
},
},
auth=auth,
)
assert len(hass.states.async_all()) == 1
thermostat = hass.states.get("climate.my_thermostat")
assert thermostat is not None
assert thermostat.state == HVAC_MODE_HEAT_COOL
await common.async_set_temperature(
hass, target_temp_low=20.0, target_temp_high=24.0
)
await hass.async_block_till_done()
assert auth.method == "post"
assert auth.url == "some-device-id:executeCommand"
assert auth.json == {
"command": "sdm.devices.commands.ThermostatTemperatureSetpoint.SetRange",
"params": {"heatCelsius": 20.0, "coolCelsius": 24.0},
}
async def test_thermostat_fan_off(hass):
"""Test a thermostat with the fan not running."""
await setup_climate(
hass,
{
"sdm.devices.traits.Fan": {
"timerMode": "OFF",
"timerTimeout": "2019-05-10T03:22:54Z",
},
"sdm.devices.traits.ThermostatHvac": {"status": "OFF"},
"sdm.devices.traits.ThermostatMode": {
"availableModes": ["HEAT", "COOL", "HEATCOOL", "OFF"],
"mode": "OFF",
},
"sdm.devices.traits.Temperature": {
"ambientTemperatureCelsius": 16.2,
},
},
)
assert len(hass.states.async_all()) == 1
thermostat = hass.states.get("climate.my_thermostat")
assert thermostat is not None
assert thermostat.state == HVAC_MODE_OFF
assert thermostat.attributes[ATTR_HVAC_ACTION] == CURRENT_HVAC_OFF
assert thermostat.attributes[ATTR_CURRENT_TEMPERATURE] == 16.2
assert set(thermostat.attributes[ATTR_HVAC_MODES]) == {
HVAC_MODE_HEAT,
HVAC_MODE_COOL,
HVAC_MODE_HEAT_COOL,
HVAC_MODE_OFF,
}
assert thermostat.attributes[ATTR_FAN_MODE] == FAN_OFF
assert thermostat.attributes[ATTR_FAN_MODES] == [FAN_ON, FAN_OFF]
async def test_thermostat_fan_on(hass):
"""Test a thermostat with the fan running."""
await setup_climate(
hass,
{
"sdm.devices.traits.Fan": {
"timerMode": "ON",
"timerTimeout": "2019-05-10T03:22:54Z",
},
"sdm.devices.traits.ThermostatHvac": {
"status": "OFF",
},
"sdm.devices.traits.ThermostatMode": {
"availableModes": ["HEAT", "COOL", "HEATCOOL", "OFF"],
"mode": "OFF",
},
"sdm.devices.traits.Temperature": {
"ambientTemperatureCelsius": 16.2,
},
},
)
assert len(hass.states.async_all()) == 1
thermostat = hass.states.get("climate.my_thermostat")
assert thermostat is not None
assert thermostat.state == HVAC_MODE_OFF
assert thermostat.attributes[ATTR_HVAC_ACTION] == CURRENT_HVAC_OFF
assert thermostat.attributes[ATTR_CURRENT_TEMPERATURE] == 16.2
assert set(thermostat.attributes[ATTR_HVAC_MODES]) == {
HVAC_MODE_HEAT,
HVAC_MODE_COOL,
HVAC_MODE_HEAT_COOL,
HVAC_MODE_OFF,
}
assert thermostat.attributes[ATTR_FAN_MODE] == FAN_ON
assert thermostat.attributes[ATTR_FAN_MODES] == [FAN_ON, FAN_OFF]
async def test_thermostat_set_fan(hass):
"""Test a thermostat enabling the fan."""
auth = FakeAuth()
await setup_climate(
hass,
{
"sdm.devices.traits.Fan": {
"timerMode": "ON",
"timerTimeout": "2019-05-10T03:22:54Z",
},
"sdm.devices.traits.ThermostatHvac": {
"status": "OFF",
},
"sdm.devices.traits.ThermostatMode": {
"availableModes": ["HEAT", "COOL", "HEATCOOL", "OFF"],
"mode": "OFF",
},
},
auth=auth,
)
assert len(hass.states.async_all()) == 1
thermostat = hass.states.get("climate.my_thermostat")
assert thermostat is not None
assert thermostat.state == HVAC_MODE_OFF
assert thermostat.attributes[ATTR_FAN_MODE] == FAN_ON
assert thermostat.attributes[ATTR_FAN_MODES] == [FAN_ON, FAN_OFF]
# Turn off fan mode
await common.async_set_fan_mode(hass, FAN_OFF)
await hass.async_block_till_done()
assert auth.method == "post"
assert auth.url == "some-device-id:executeCommand"
assert auth.json == {
"command": "sdm.devices.commands.Fan.SetTimer",
"params": {"timerMode": "OFF"},
}
async def test_thermostat_fan_empty(hass):
"""Test a fan trait with an empty response."""
await setup_climate(
hass,
{
"sdm.devices.traits.Fan": {},
"sdm.devices.traits.ThermostatHvac": {"status": "OFF"},
"sdm.devices.traits.ThermostatMode": {
"availableModes": ["HEAT", "COOL", "HEATCOOL", "OFF"],
"mode": "OFF",
},
"sdm.devices.traits.Temperature": {
"ambientTemperatureCelsius": 16.2,
},
},
)
assert len(hass.states.async_all()) == 1
thermostat = hass.states.get("climate.my_thermostat")
assert thermostat is not None
assert thermostat.state == HVAC_MODE_OFF
assert thermostat.attributes[ATTR_HVAC_ACTION] == CURRENT_HVAC_OFF
assert thermostat.attributes[ATTR_CURRENT_TEMPERATURE] == 16.2
assert set(thermostat.attributes[ATTR_HVAC_MODES]) == {
HVAC_MODE_HEAT,
HVAC_MODE_COOL,
HVAC_MODE_HEAT_COOL,
HVAC_MODE_OFF,
}
assert ATTR_FAN_MODE not in thermostat.attributes
assert ATTR_FAN_MODES not in thermostat.attributes
async def test_thermostat_target_temp(hass):
"""Test a thermostat changing hvac modes and affected on target temps."""
auth = FakeAuth()
subscriber = await setup_climate(
hass,
{
"sdm.devices.traits.ThermostatHvac": {
"status": "HEATING",
},
"sdm.devices.traits.ThermostatMode": {
"availableModes": ["HEAT", "COOL", "HEATCOOL", "OFF"],
"mode": "HEAT",
},
"sdm.devices.traits.Temperature": {
"ambientTemperatureCelsius": 20.1,
},
"sdm.devices.traits.ThermostatTemperatureSetpoint": {
"heatCelsius": 23.0,
},
},
auth=auth,
)
assert len(hass.states.async_all()) == 1
thermostat = hass.states.get("climate.my_thermostat")
assert thermostat is not None
assert thermostat.state == HVAC_MODE_HEAT
assert thermostat.attributes[ATTR_TEMPERATURE] == 23.0
assert thermostat.attributes[ATTR_TARGET_TEMP_LOW] is None
assert thermostat.attributes[ATTR_TARGET_TEMP_HIGH] is None
# Simulate pubsub message changing modes
event = EventMessage(
{
"eventId": "some-event-id",
"timestamp": "2019-01-01T00:00:01Z",
"resourceUpdate": {
"name": "some-device-id",
"traits": {
"sdm.devices.traits.ThermostatMode": {
"availableModes": ["HEAT", "COOL", "HEATCOOL", "OFF"],
"mode": "HEATCOOL",
},
"sdm.devices.traits.ThermostatTemperatureSetpoint": {
"heatCelsius": 22.0,
"coolCelsius": 28.0,
},
},
},
},
auth=None,
)
subscriber.receive_event(event)
await hass.async_block_till_done() # Process dispatch/update signal
thermostat = hass.states.get("climate.my_thermostat")
assert thermostat is not None
assert thermostat.state == HVAC_MODE_HEAT_COOL
assert thermostat.attributes[ATTR_TARGET_TEMP_LOW] == 22.0
assert thermostat.attributes[ATTR_TARGET_TEMP_HIGH] == 28.0
assert thermostat.attributes[ATTR_TEMPERATURE] is None
|
|
# Copyright 2014 Cloudbase Solutions Srl
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import importlib
import os
import unittest
try:
import unittest.mock as mock
except ImportError:
import mock
from oslo.config import cfg
from cloudbaseinit import exception
from cloudbaseinit.tests import testutils
CONF = cfg.CONF
class TestWindowsConfigDriveManager(unittest.TestCase):
def setUp(self):
self._ctypes_mock = mock.MagicMock()
self._module_patcher = mock.patch.dict('sys.modules',
{'ctypes': self._ctypes_mock})
self._module_patcher.start()
self.windows = importlib.import_module(
"cloudbaseinit.metadata.services.osconfigdrive.windows")
self.physical_disk = importlib.import_module(
"cloudbaseinit.utils.windows.physical_disk")
self.physical_disk.Win32_DiskGeometry = mock.MagicMock()
self.windows.physical_disk.PhysicalDisk = mock.MagicMock()
self._config_manager = self.windows.WindowsConfigDriveManager()
def tearDown(self):
self._module_patcher.stop()
@mock.patch('cloudbaseinit.osutils.factory.get_os_utils')
@mock.patch('os.path.exists')
def _test_get_config_drive_cdrom_mount_point(self, mock_join,
mock_get_os_utils, exists):
mock_osutils = mock.MagicMock()
mock_get_os_utils.return_value = mock_osutils
mock_osutils.get_cdrom_drives.return_value = ['fake drive']
mock_osutils.get_volume_label.return_value = 'config-2'
mock_join.return_value = exists
response = self._config_manager._get_config_drive_cdrom_mount_point()
mock_osutils.get_cdrom_drives.assert_called_once_with()
mock_osutils.get_volume_label.assert_called_once_with('fake drive')
if exists:
self.assertEqual('fake drive', response)
else:
self.assertIsNone(response)
def test_get_config_drive_cdrom_mount_point_exists_true(self):
self._test_get_config_drive_cdrom_mount_point(exists=True)
def test_get_config_drive_cdrom_mount_point_exists_false(self):
self._test_get_config_drive_cdrom_mount_point(exists=False)
def test_c_char_array_to_c_ushort(self):
mock_buf = mock.MagicMock()
contents = self._ctypes_mock.cast.return_value.contents
response = self._config_manager._c_char_array_to_c_ushort(mock_buf, 1)
self.assertEqual(2, self._ctypes_mock.cast.call_count)
self._ctypes_mock.POINTER.assert_called_with(
self._ctypes_mock.wintypes.WORD)
self._ctypes_mock.cast.assert_called_with(
mock_buf.__getitem__(), self._ctypes_mock.POINTER.return_value)
self.assertEqual(contents.value.__lshift__().__add__(), response)
@mock.patch('cloudbaseinit.metadata.services.osconfigdrive.windows.'
'WindowsConfigDriveManager._c_char_array_to_c_ushort')
def _test_get_iso_disk_size(self, mock_c_char_array_to_c_ushort,
media_type, value, iso_id):
if media_type == "fixed":
media_type = self.physical_disk.Win32_DiskGeometry.FixedMedia
boot_record_off = 0x8000
volume_size_off = 80
block_size_off = 128
mock_phys_disk = mock.MagicMock()
mock_buff = mock.MagicMock()
mock_geom = mock.MagicMock()
mock_phys_disk.get_geometry.return_value = mock_geom
mock_geom.MediaType = media_type
mock_geom.Cylinders = value
mock_geom.TracksPerCylinder = 2
mock_geom.SectorsPerTrack = 2
mock_geom.BytesPerSector = 2
mock_phys_disk.read.return_value = (mock_buff, 'fake value')
mock_buff.__getitem__.return_value = iso_id
mock_c_char_array_to_c_ushort.return_value = 100
disk_size = mock_geom.Cylinders * mock_geom.TracksPerCylinder * \
mock_geom.SectorsPerTrack * mock_geom.BytesPerSector
offset = boot_record_off / mock_geom.BytesPerSector * \
mock_geom.BytesPerSector
buf_off_volume = boot_record_off - offset + volume_size_off
buf_off_block = boot_record_off - offset + block_size_off
response = self._config_manager._get_iso_disk_size(mock_phys_disk)
mock_phys_disk.get_geometry.assert_called_once_with()
if media_type != self.physical_disk.Win32_DiskGeometry.FixedMedia:
self.assertIsNone(response)
elif disk_size <= offset + mock_geom.BytesPerSector:
self.assertIsNone(response)
else:
mock_phys_disk.seek.assert_called_once_with(offset)
mock_phys_disk.read.assert_called_once_with(
mock_geom.BytesPerSector)
if iso_id != 'CD001':
self.assertIsNone(response)
else:
mock_c_char_array_to_c_ushort.assert_has_calls(
mock.call(mock_buff, buf_off_volume),
mock.call(mock_buff, buf_off_block))
self.assertEqual(10000, response)
def test_test_get_iso_disk_size(self):
self._test_get_iso_disk_size(
media_type="fixed",
value=100, iso_id='CD001')
def test_test_get_iso_disk_size_other_media_type(self):
self._test_get_iso_disk_size(media_type="other", value=100,
iso_id='CD001')
def test_test_get_iso_disk_size_other_disk_size_too_small(self):
self._test_get_iso_disk_size(
media_type="fixed",
value=0, iso_id='CD001')
def test_test_get_iso_disk_size_other_id(self):
self._test_get_iso_disk_size(
media_type="fixed",
value=100, iso_id='other id')
def test_write_iso_file(self):
mock_buff = mock.MagicMock()
mock_geom = mock.MagicMock()
mock_geom.BytesPerSector = 2
mock_phys_disk = mock.MagicMock()
mock_phys_disk.read.return_value = (mock_buff, 10)
fake_path = os.path.join('fake', 'path')
mock_phys_disk.get_geometry.return_value = mock_geom
with mock.patch('six.moves.builtins.open', mock.mock_open(),
create=True) as f:
self._config_manager._write_iso_file(mock_phys_disk, fake_path,
10)
f().write.assert_called_once_with(mock_buff)
mock_phys_disk.seek.assert_called_once_with(0)
mock_phys_disk.read.assert_called_once_with(10)
@mock.patch('os.makedirs')
def _test_extract_iso_files(self, mock_makedirs, exit_code):
fake_path = os.path.join('fake', 'path')
fake_target_path = os.path.join(fake_path, 'target')
args = [CONF.bsdtar_path, '-xf', fake_path, '-C', fake_target_path]
mock_os_utils = mock.MagicMock()
mock_os_utils.execute_process.return_value = ('fake out', 'fake err',
exit_code)
if exit_code:
self.assertRaises(exception.CloudbaseInitException,
self._config_manager._extract_iso_files,
mock_os_utils, fake_path, fake_target_path)
else:
self._config_manager._extract_iso_files(mock_os_utils, fake_path,
fake_target_path)
mock_os_utils.execute_process.assert_called_once_with(args, False)
mock_makedirs.assert_called_once_with(fake_target_path)
def test_extract_iso_files(self):
self._test_extract_iso_files(exit_code=None)
def test_extract_iso_files_exception(self):
self._test_extract_iso_files(exit_code=1)
@mock.patch('cloudbaseinit.metadata.services.osconfigdrive.windows.'
'WindowsConfigDriveManager._get_iso_disk_size')
@mock.patch('cloudbaseinit.metadata.services.osconfigdrive.windows.'
'WindowsConfigDriveManager._write_iso_file')
def _test_extract_iso_disk_file(self, mock_write_iso_file,
mock_get_iso_disk_size, exception):
mock_osutils = mock.MagicMock()
fake_path = os.path.join('fake', 'path')
fake_path_physical = os.path.join(fake_path, 'physical')
mock_osutils.get_physical_disks.return_value = [fake_path_physical]
mock_get_iso_disk_size.return_value = 'fake iso size'
mock_PhysDisk = self.windows.physical_disk.PhysicalDisk.return_value
if exception:
mock_PhysDisk.open.side_effect = [Exception]
response = self._config_manager._extract_iso_disk_file(
osutils=mock_osutils, iso_file_path=fake_path)
if not exception:
mock_get_iso_disk_size.assert_called_once_with(
mock_PhysDisk)
mock_write_iso_file.assert_called_once_with(
mock_PhysDisk, fake_path, 'fake iso size')
self.windows.physical_disk.PhysicalDisk.assert_called_once_with(
fake_path_physical)
mock_osutils.get_physical_disks.assert_called_once_with()
mock_PhysDisk.open.assert_called_once_with()
mock_PhysDisk.close.assert_called_once_with()
self.assertTrue(response)
else:
self.assertFalse(response)
def test_extract_iso_disk_file_disk_found(self):
self._test_extract_iso_disk_file(exception=False)
def test_extract_iso_disk_file_disk_not_found(self):
self._test_extract_iso_disk_file(exception=True)
@mock.patch('cloudbaseinit.metadata.services.osconfigdrive.windows.'
'WindowsConfigDriveManager._get_conf_drive_from_raw_hdd')
@mock.patch('cloudbaseinit.metadata.services.osconfigdrive.windows.'
'WindowsConfigDriveManager._get_conf_drive_from_cdrom_drive')
@mock.patch('cloudbaseinit.metadata.services.osconfigdrive.windows.'
'WindowsConfigDriveManager._get_conf_drive_from_vfat')
def _test_get_config_drive_files(self,
mock_get_conf_drive_from_vfat,
mock_get_conf_drive_from_cdrom_drive,
mock_get_conf_drive_from_raw_hdd,
raw_hdd_found=False,
cdrom_drive_found=False,
vfat_found=False):
fake_path = os.path.join('fake', 'path')
mock_get_conf_drive_from_raw_hdd.return_value = raw_hdd_found
mock_get_conf_drive_from_cdrom_drive.return_value = cdrom_drive_found
mock_get_conf_drive_from_vfat.return_value = vfat_found
response = self._config_manager.get_config_drive_files(
target_path=fake_path)
if vfat_found:
mock_get_conf_drive_from_vfat.assert_called_once_with(fake_path)
self.assertFalse(mock_get_conf_drive_from_raw_hdd.called)
self.assertFalse(mock_get_conf_drive_from_cdrom_drive.called)
elif cdrom_drive_found:
mock_get_conf_drive_from_vfat.assert_called_once_with(fake_path)
mock_get_conf_drive_from_cdrom_drive.assert_called_once_with(
fake_path)
mock_get_conf_drive_from_raw_hdd.assert_called_once_with(
fake_path)
elif raw_hdd_found:
mock_get_conf_drive_from_vfat.assert_called_once_with(fake_path)
mock_get_conf_drive_from_raw_hdd.assert_called_once_with(
fake_path)
self.assertFalse(mock_get_conf_drive_from_cdrom_drive.called)
self.assertTrue(response)
def test_get_config_drive_files(self):
self._test_get_config_drive_files(raw_hdd_found=True)
self._test_get_config_drive_files(cdrom_drive_found=True)
self._test_get_config_drive_files(vfat_found=True)
@mock.patch('cloudbaseinit.metadata.services.osconfigdrive.windows.'
'WindowsConfigDriveManager.'
'_get_config_drive_cdrom_mount_point')
@mock.patch('shutil.copytree')
def _test_get_conf_drive_from_cdrom_drive(self, mock_copytree,
mock_get_config_cdrom_mount,
mount_point):
fake_path = os.path.join('fake', 'path')
mock_get_config_cdrom_mount.return_value = mount_point
response = self._config_manager._get_conf_drive_from_cdrom_drive(
fake_path)
mock_get_config_cdrom_mount.assert_called_once_with()
if mount_point:
mock_copytree.assert_called_once_with(mount_point, fake_path)
self.assertTrue(response)
else:
self.assertFalse(response)
def test_get_conf_drive_from_cdrom_drive_with_mountpoint(self):
self._test_get_conf_drive_from_cdrom_drive(
mount_point='fake mount point')
def test_get_conf_drive_from_cdrom_drive_without_mountpoint(self):
self._test_get_conf_drive_from_cdrom_drive(
mount_point=None)
@mock.patch('os.remove')
@mock.patch('os.path.exists')
@mock.patch('tempfile.gettempdir')
@mock.patch('uuid.uuid4')
@mock.patch('cloudbaseinit.metadata.services.osconfigdrive.windows.'
'WindowsConfigDriveManager._extract_iso_disk_file')
@mock.patch('cloudbaseinit.metadata.services.osconfigdrive.windows.'
'WindowsConfigDriveManager._extract_iso_files')
@mock.patch('cloudbaseinit.osutils.factory.get_os_utils')
def _test_get_conf_drive_from_raw_hdd(self, mock_get_os_utils,
mock_extract_iso_files,
mock_extract_iso_disk_file,
mock_uuid4, mock_gettempdir,
mock_exists, mock_remove,
found_drive):
fake_target_path = os.path.join('fake', 'path')
fake_iso_path = os.path.join('fake_dir', 'fake_id' + '.iso')
mock_uuid4.return_value = 'fake_id'
mock_gettempdir.return_value = 'fake_dir'
mock_extract_iso_disk_file.return_value = found_drive
mock_exists.return_value = found_drive
response = self._config_manager._get_conf_drive_from_raw_hdd(
fake_target_path)
mock_get_os_utils.assert_called_once_with()
mock_gettempdir.assert_called_once_with()
mock_extract_iso_disk_file.assert_called_once_with(
mock_get_os_utils(), fake_iso_path)
if found_drive:
mock_extract_iso_files.assert_called_once_with(
mock_get_os_utils(), fake_iso_path, fake_target_path)
mock_exists.assert_called_once_with(fake_iso_path)
mock_remove.assert_called_once_with(fake_iso_path)
self.assertTrue(response)
else:
self.assertFalse(response)
def test_get_conf_drive_from_raw_hdd_found_drive(self):
self._test_get_conf_drive_from_raw_hdd(found_drive=True)
def test_get_conf_drive_from_raw_hdd_no_drive_found(self):
self._test_get_conf_drive_from_raw_hdd(found_drive=False)
@mock.patch('os.makedirs')
@mock.patch('cloudbaseinit.utils.windows.vfat.copy_from_vfat_drive')
@mock.patch('cloudbaseinit.utils.windows.vfat.is_vfat_drive')
@mock.patch('cloudbaseinit.osutils.factory.get_os_utils')
def test_get_conf_drive_from_vfat(self, mock_get_os_utils,
mock_is_vfat_drive,
mock_copy_from_vfat_drive,
mock_os_makedirs):
mock_osutils = mock_get_os_utils.return_value
mock_osutils.get_physical_disks.return_value = (
mock.sentinel.drive1,
mock.sentinel.drive2,
)
mock_is_vfat_drive.side_effect = (None, True)
with testutils.LogSnatcher('cloudbaseinit.metadata.services.'
'osconfigdrive.windows') as snatcher:
response = self._config_manager._get_conf_drive_from_vfat(
mock.sentinel.target_path)
self.assertTrue(response)
mock_osutils.get_physical_disks.assert_called_once_with()
expected_is_vfat_calls = [
mock.call(mock_osutils, mock.sentinel.drive1),
mock.call(mock_osutils, mock.sentinel.drive2),
]
self.assertEqual(expected_is_vfat_calls, mock_is_vfat_drive.mock_calls)
mock_copy_from_vfat_drive.assert_called_once_with(
mock_osutils,
mock.sentinel.drive2,
mock.sentinel.target_path)
expected_logging = [
'Config Drive found on disk %r' % mock.sentinel.drive2,
]
self.assertEqual(expected_logging, snatcher.output)
mock_os_makedirs.assert_called_once_with(mock.sentinel.target_path)
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Compares theorerical scattering curves generated from PDB structure files to
experimental x-ray and neutron scattering curves.
PDBs are converted into sphere models and the Debye equation used to compute
the theoretical curves. The original sphere models are surrounded with a
hydration layer of spheres before the creation of curves to be compared to x-ray
data.
Within the Perkins lab this replaces the do_curve script
"""
# Copyright 2014 University College London
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import print_function
import argparse
import sys
import os
import re
import glob
import yaml
import sct
import sct.six as six
def parse_arguments():
"""
Parse command line arguments and ensure correct combinations present
"""
parser = argparse.ArgumentParser(
description='Compare theoretical curves generated from PDB files to experimental SAS curves\n')
parser.add_argument('-i', '--input_path', nargs='?', type=str,
help='Path to the input PDB files', required=True)
parser.add_argument('-o', '--output_path', nargs='?', type=str,
default='.', help='Path in which to save output files')
parser.add_argument(
'-p',
'--parameter_file',
nargs='?',
type=str,
help='Path to a file containing input parameters',
required=True)
parser.add_argument(
'-x',
'--xray',
nargs='+',
type=str,
help='Paths to files containing experimental x-ray scattering curve',
default=None)
parser.add_argument(
'-n',
'--neutron',
nargs='+',
type=str,
help='Paths to files containing experimental neutron scattering curve',
default=None)
parser.add_argument(
'-t',
'--title',
nargs='?',
type=str,
help='Title to use for summary output file',
default='sct_output')
parser.add_argument(
'-a',
'--add_res',
nargs='?',
type=str,
default=None,
help='Path to YAML file containing mass and volume for residues not originally used by sluv/SCT')
parser.add_argument('-xu', '--xray_unit', choices=['nm', 'a'],
default='a', help='Unit for Q in input x-ray data')
parser.add_argument('-nu', '--neutron_unit', choices=['nm', 'a'],
default='a', help='Unit for Q in input neutron data')
parser.add_argument('-ou', '--output_unit', choices=['nm', 'a'],
default='a', help='Unit for Q in output data')
parser.add_argument(
'--chi2',
action='store_true',
default=False,
help='Select comparison metric to be Chi squared not R factor')
args = parser.parse_args()
if (args.neutron is None) and (args.xray is None):
print("At least one experimental curve is required for comparison (xray, neutron or both).\n")
sys.exit(1)
return args
# Human sort taken from from:
# http://nedbatchelder.com/blog/200712/human_sorting.html
# Provided with no formal license but site indicates that you can do with
# it as you wish
def tryint(s):
if s.isdigit():
return int(s)
else:
return s
def alphanum_key(s):
""" Turn a string into a list of string and number chunks.
"z23a" -> ["z", 23, "a"]
"""
return [tryint(c) for c in re.split('([0-9]+)', s.lower())]
def sort_nicely(l):
""" Sort the given list in the way that humans expect.
"""
return sorted(l, key=alphanum_key)
def main():
print("Running modern SCT workflow")
print("---------------------------\n")
args = parse_arguments()
if args.add_res:
res_file = open(args.add_res, 'r')
add_res = yaml.load(res_file)
for res, data in six.iteritems(add_res):
sct.seq.all_residues.append(res)
sct.seq.res_vols['perkins1986a']['residue'][res] = data['vol']
sct.seq.params['mass'][res] = data['mass']
sct.pdb.accept_resids.append(res)
# Read in parameters and check we have those we need for the workflow
needed = ['curve', 'sphere', 'rg', 'rxs1', 'rfac']
if args.xray is not None:
needed.append('hydrate')
param = sct.param.parse_parameter_file(args.parameter_file, needed)
# Create output directory and open file for summary output
if not os.path.exists(args.output_path):
os.makedirs(args.output_path)
print("> Processing experimental data")
# Read in experimental curves and calculate Rg and Rxs
# Setup output directories for theoretical curves and sphere models
# Output summary analysis of the experimental data curves
try:
neut_data, xray_data, out_paths = sct.tasks.process_expt_data(
args.neutron, args.neutron_unit, args.xray, args.xray_unit, args.output_path, args.title, param)
except Exception as e:
print(str(e))
sys.exit(1)
# Create the file for model output
summary_name = os.path.join(args.output_path, args.title + '.sum')
summary_data = open(summary_name, 'w')
# Print the header for the summary data from sphere model analysis (Rxs2 added after Rxs1 if a range is supplied in param):
# Path to input PDBs
# Neutron X-ray
# Model Rg_model Rg_curve Rxs1_curve Volume (Rfactor scale) * neutron
# curves Rg_model Rg_curve Rxs1_curve Volume (Rfactor scale) * xray curves
sct.tasks.write_summary_header(
args.input_path,
args.neutron,
args.xray,
param,
summary_data,
args.chi2)
# Get list of PDBs in the input directory
if args.input_path[-4:] == '.pdb':
pdb_files = [args.input_path]
else:
pdb_filter = os.path.join(args.input_path, '*.pdb')
pdb_files = glob.glob(pdb_filter)
if len(pdb_files) < 1:
print("Error: No PDB files found to analyze")
sys.exit(1)
# Sort files so that they are in human expected alpha numerical order
# This means that XXXX2.pdb will sort before XXXX100.pdb
pdb_files = sort_nicely(pdb_files)
print("> Analyzing input PDBs")
# Loop over input PDBs
for count, pdb in enumerate(pdb_files):
if (count % 20 == 0):
print("\tProcessing PDB number " + str(count))
try:
# Create sphere models, compute scattering curves and compare to
# experimental curves
# Dry models are compared to neutron data, wet to xray data.
dry_data, wet_data = sct.tasks.perform_sas_analysis_pdb(pdb,
neut_data,
xray_data,
param,
out_paths)
except IOError as e:
print("Error loading PDB file name %s: %s" % (pdb, e))
continue
pdb_basename = os.path.basename(pdb)
pdb_id = os.path.splitext(pdb_basename)[0]
# Format the modelling output data for printing
neut_summ = sct.tasks.sas_model_summary_output(dry_data, param)
xray_summ = sct.tasks.sas_model_summary_output(wet_data, param)
# Output all summary data to file
summary_data.write('{0:s}\t{1:s}{2:s}\n'.format(pdb_id,
neut_summ,
xray_summ))
summary_data.close()
print("Done")
if __name__ == "__main__":
main()
|
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functions for calculating loss, accuracy, and other model metrics.
Metrics:
- Padded loss, accuracy, and negative log perplexity. Source:
https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/utils/metrics.py
- BLEU approximation. Source:
https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/utils/bleu_hook.py
- ROUGE score. Source:
https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/utils/rouge.py
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import math
import numpy as np
import six
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow.compat.v1 as tf
def _pad_tensors_to_same_length(x, y):
"""Pad x and y so that the results have the same length (second dimension)."""
with tf.name_scope("pad_to_same_length"):
x_length = tf.shape(x)[1]
y_length = tf.shape(y)[1]
max_length = tf.maximum(x_length, y_length)
x = tf.pad(x, [[0, 0], [0, max_length - x_length], [0, 0]])
y = tf.pad(y, [[0, 0], [0, max_length - y_length]])
return x, y
def padded_cross_entropy_loss(logits, labels, smoothing, vocab_size):
"""Calculate cross entropy loss while ignoring padding.
Args:
logits: Tensor of size [batch_size, length_logits, vocab_size]
labels: Tensor of size [batch_size, length_labels]
smoothing: Label smoothing constant, used to determine the on and off values
vocab_size: int size of the vocabulary
Returns:
Returns the cross entropy loss and weight tensors: float32 tensors with
shape [batch_size, max(length_logits, length_labels)]
"""
with tf.name_scope("loss", values=[logits, labels]):
logits, labels = _pad_tensors_to_same_length(logits, labels)
# Calculate smoothing cross entropy
with tf.name_scope("smoothing_cross_entropy", values=[logits, labels]):
confidence = 1.0 - smoothing
low_confidence = (1.0 - confidence) / tf.cast(vocab_size - 1, tf.float32)
soft_targets = tf.one_hot(
tf.cast(labels, tf.int32),
depth=vocab_size,
on_value=confidence,
off_value=low_confidence)
xentropy = tf.nn.softmax_cross_entropy_with_logits_v2(
logits=logits, labels=soft_targets)
# Calculate the best (lowest) possible value of cross entropy, and
# subtract from the cross entropy loss.
normalizing_constant = -(
confidence * tf.log(confidence) + tf.cast(vocab_size - 1, tf.float32)
* low_confidence * tf.log(low_confidence + 1e-20))
xentropy -= normalizing_constant
weights = tf.cast(tf.not_equal(labels, 0), tf.float32)
return xentropy * weights, weights
def _convert_to_eval_metric(metric_fn):
"""Wrap a metric fn that returns scores and weights as an eval metric fn.
The input metric_fn returns values for the current batch. The wrapper
aggregates the return values collected over all of the batches evaluated.
Args:
metric_fn: function that returns scores and weights for the current batch's
logits and predicted labels.
Returns:
function that aggregates the scores and weights from metric_fn.
"""
def problem_metric_fn(*args):
"""Returns an aggregation of the metric_fn's returned values."""
(scores, weights) = metric_fn(*args)
# The tf.metrics.mean function assures correct aggregation.
return tf.metrics.mean(scores, weights)
return problem_metric_fn
def get_eval_metrics(logits, labels, params):
"""Return dictionary of model evaluation metrics."""
metrics = {
"accuracy": _convert_to_eval_metric(padded_accuracy)(logits, labels),
"accuracy_top5": _convert_to_eval_metric(padded_accuracy_top5)(
logits, labels),
"accuracy_per_sequence": _convert_to_eval_metric(
padded_sequence_accuracy)(logits, labels),
"neg_log_perplexity": _convert_to_eval_metric(padded_neg_log_perplexity)(
logits, labels, params["vocab_size"]),
}
if not params["use_tpu"]:
# TPU does not support tf.py_func
metrics.update({
"approx_bleu_score": _convert_to_eval_metric(
bleu_score)(logits, labels),
"rouge_2_fscore": _convert_to_eval_metric(
rouge_2_fscore)(logits, labels),
"rouge_L_fscore": _convert_to_eval_metric(
rouge_l_fscore)(logits, labels),
})
# Prefix each of the metric names with "metrics/". This allows the metric
# graphs to display under the "metrics" category in TensorBoard.
metrics = {"metrics/%s" % k: v for k, v in six.iteritems(metrics)}
return metrics
def padded_accuracy(logits, labels):
"""Percentage of times that predictions matches labels on non-0s."""
with tf.variable_scope("padded_accuracy", values=[logits, labels]):
logits, labels = _pad_tensors_to_same_length(logits, labels)
weights = tf.cast(tf.not_equal(labels, 0), tf.float32)
outputs = tf.cast(tf.argmax(logits, axis=-1), tf.int32)
padded_labels = tf.cast(labels, tf.int32)
return tf.cast(tf.equal(outputs, padded_labels), tf.float32), weights
def padded_accuracy_topk(logits, labels, k):
"""Percentage of times that top-k predictions matches labels on non-0s."""
with tf.variable_scope("padded_accuracy_topk", values=[logits, labels]):
logits, labels = _pad_tensors_to_same_length(logits, labels)
weights = tf.cast(tf.not_equal(labels, 0), tf.float32)
effective_k = tf.minimum(k, tf.shape(logits)[-1])
_, outputs = tf.nn.top_k(logits, k=effective_k)
outputs = tf.cast(outputs, tf.int32)
padded_labels = tf.cast(labels, tf.int32)
padded_labels = tf.expand_dims(padded_labels, axis=-1)
padded_labels += tf.zeros_like(outputs) # Pad to same shape.
same = tf.cast(tf.equal(outputs, padded_labels), tf.float32)
same_topk = tf.reduce_sum(same, axis=-1)
return same_topk, weights
def padded_accuracy_top5(logits, labels):
return padded_accuracy_topk(logits, labels, 5)
def padded_sequence_accuracy(logits, labels):
"""Percentage of times that predictions matches labels everywhere (non-0)."""
with tf.variable_scope("padded_sequence_accuracy", values=[logits, labels]):
logits, labels = _pad_tensors_to_same_length(logits, labels)
weights = tf.cast(tf.not_equal(labels, 0), tf.float32)
outputs = tf.cast(tf.argmax(logits, axis=-1), tf.int32)
padded_labels = tf.cast(labels, tf.int32)
not_correct = (tf.cast(tf.not_equal(outputs, padded_labels), tf.float32) *
weights)
axis = list(range(1, len(outputs.get_shape())))
correct_seq = 1.0 - tf.minimum(1.0, tf.reduce_sum(not_correct, axis=axis))
return correct_seq, tf.constant(1.0)
def padded_neg_log_perplexity(logits, labels, vocab_size):
"""Average log-perplexity excluding padding 0s. No smoothing."""
num, den = padded_cross_entropy_loss(logits, labels, 0, vocab_size)
return -num, den
def bleu_score(logits, labels):
"""Approximate BLEU score computation between labels and predictions.
An approximate BLEU scoring method since we do not glue word pieces or
decode the ids and tokenize the output. By default, we use ngram order of 4
and use brevity penalty. Also, this does not have beam search.
Args:
logits: Tensor of size [batch_size, length_logits, vocab_size]
labels: Tensor of size [batch-size, length_labels]
Returns:
bleu: int, approx bleu score
"""
predictions = tf.cast(tf.argmax(logits, axis=-1), tf.int32)
# TODO: Look into removing use of py_func
bleu = tf.py_func(compute_bleu, (labels, predictions), tf.float32)
return bleu, tf.constant(1.0)
def _get_ngrams_with_counter(segment, max_order):
"""Extracts all n-grams up to a given maximum order from an input segment.
Args:
segment: text segment from which n-grams will be extracted.
max_order: maximum length in tokens of the n-grams returned by this
methods.
Returns:
The Counter containing all n-grams upto max_order in segment
with a count of how many times each n-gram occurred.
"""
ngram_counts = collections.Counter()
for order in xrange(1, max_order + 1):
for i in xrange(0, len(segment) - order + 1):
ngram = tuple(segment[i:i + order])
ngram_counts[ngram] += 1
return ngram_counts
def compute_bleu(reference_corpus, translation_corpus, max_order=4,
use_bp=True):
"""Computes BLEU score of translated segments against one or more references.
Args:
reference_corpus: list of references for each translation. Each
reference should be tokenized into a list of tokens.
translation_corpus: list of translations to score. Each translation
should be tokenized into a list of tokens.
max_order: Maximum n-gram order to use when computing BLEU score.
use_bp: boolean, whether to apply brevity penalty.
Returns:
BLEU score.
"""
reference_length = 0
translation_length = 0
bp = 1.0
geo_mean = 0
matches_by_order = [0] * max_order
possible_matches_by_order = [0] * max_order
precisions = []
for (references, translations) in zip(reference_corpus, translation_corpus):
reference_length += len(references)
translation_length += len(translations)
ref_ngram_counts = _get_ngrams_with_counter(references, max_order)
translation_ngram_counts = _get_ngrams_with_counter(translations, max_order)
overlap = dict((ngram,
min(count, translation_ngram_counts[ngram]))
for ngram, count in ref_ngram_counts.items())
for ngram in overlap:
matches_by_order[len(ngram) - 1] += overlap[ngram]
for ngram in translation_ngram_counts:
possible_matches_by_order[len(ngram) - 1] += translation_ngram_counts[
ngram]
precisions = [0] * max_order
smooth = 1.0
for i in xrange(0, max_order):
if possible_matches_by_order[i] > 0:
precisions[i] = float(matches_by_order[i]) / possible_matches_by_order[i]
if matches_by_order[i] > 0:
precisions[i] = float(matches_by_order[i]) / possible_matches_by_order[
i]
else:
smooth *= 2
precisions[i] = 1.0 / (smooth * possible_matches_by_order[i])
else:
precisions[i] = 0.0
if max(precisions) > 0:
p_log_sum = sum(math.log(p) for p in precisions if p)
geo_mean = math.exp(p_log_sum / max_order)
if use_bp:
ratio = translation_length / reference_length
bp = math.exp(1 - 1. / ratio) if ratio < 1.0 else 1.0
bleu = geo_mean * bp
return np.float32(bleu)
def rouge_2_fscore(logits, labels):
"""ROUGE-2 F1 score computation between labels and predictions.
This is an approximate ROUGE scoring method since we do not glue word pieces
or decode the ids and tokenize the output.
Args:
logits: tensor, model predictions
labels: tensor, gold output.
Returns:
rouge2_fscore: approx rouge-2 f1 score.
"""
predictions = tf.cast(tf.argmax(logits, axis=-1), tf.int32)
# TODO: Look into removing use of py_func
rouge_2_f_score = tf.py_func(rouge_n, (predictions, labels), tf.float32)
return rouge_2_f_score, tf.constant(1.0)
def _get_ngrams(n, text):
"""Calculates n-grams.
Args:
n: which n-grams to calculate
text: An array of tokens
Returns:
A set of n-grams
"""
ngram_set = set()
text_length = len(text)
max_index_ngram_start = text_length - n
for i in range(max_index_ngram_start + 1):
ngram_set.add(tuple(text[i:i + n]))
return ngram_set
def rouge_n(eval_sentences, ref_sentences, n=2):
"""Computes ROUGE-N f1 score of two text collections of sentences.
Source: https://www.microsoft.com/en-us/research/publication/
rouge-a-package-for-automatic-evaluation-of-summaries/
Args:
eval_sentences: Predicted sentences.
ref_sentences: Sentences from the reference set
n: Size of ngram. Defaults to 2.
Returns:
f1 score for ROUGE-N
"""
f1_scores = []
for eval_sentence, ref_sentence in zip(eval_sentences, ref_sentences):
eval_ngrams = _get_ngrams(n, eval_sentence)
ref_ngrams = _get_ngrams(n, ref_sentence)
ref_count = len(ref_ngrams)
eval_count = len(eval_ngrams)
# Count the overlapping ngrams between evaluated and reference
overlapping_ngrams = eval_ngrams.intersection(ref_ngrams)
overlapping_count = len(overlapping_ngrams)
# Handle edge case. This isn't mathematically correct, but it's good enough
if eval_count == 0:
precision = 0.0
else:
precision = float(overlapping_count) / eval_count
if ref_count == 0:
recall = 0.0
else:
recall = float(overlapping_count) / ref_count
f1_scores.append(2.0 * ((precision * recall) / (precision + recall + 1e-8)))
# return overlapping_count / reference_count
return np.mean(f1_scores, dtype=np.float32)
def rouge_l_fscore(predictions, labels):
"""ROUGE scores computation between labels and predictions.
This is an approximate ROUGE scoring method since we do not glue word pieces
or decode the ids and tokenize the output.
Args:
predictions: tensor, model predictions
labels: tensor, gold output.
Returns:
rouge_l_fscore: approx rouge-l f1 score.
"""
outputs = tf.cast(tf.argmax(predictions, axis=-1), tf.int32)
rouge_l_f_score = tf.py_func(rouge_l_sentence_level, (outputs, labels),
tf.float32)
return rouge_l_f_score, tf.constant(1.0)
def rouge_l_sentence_level(eval_sentences, ref_sentences):
"""Computes ROUGE-L (sentence level) of two collections of sentences.
Source: https://www.microsoft.com/en-us/research/publication/
rouge-a-package-for-automatic-evaluation-of-summaries/
Calculated according to:
R_lcs = LCS(X,Y)/m
P_lcs = LCS(X,Y)/n
F_lcs = ((1 + beta^2)*R_lcs*P_lcs) / (R_lcs + (beta^2) * P_lcs)
where:
X = reference summary
Y = Candidate summary
m = length of reference summary
n = length of candidate summary
Args:
eval_sentences: The sentences that have been picked by the summarizer
ref_sentences: The sentences from the reference set
Returns:
A float: F_lcs
"""
f1_scores = []
for eval_sentence, ref_sentence in zip(eval_sentences, ref_sentences):
m = float(len(ref_sentence))
n = float(len(eval_sentence))
lcs = _len_lcs(eval_sentence, ref_sentence)
f1_scores.append(_f_lcs(lcs, m, n))
return np.mean(f1_scores, dtype=np.float32)
def _len_lcs(x, y):
"""Returns the length of the Longest Common Subsequence between two seqs.
Source: http://www.algorithmist.com/index.php/Longest_Common_Subsequence
Args:
x: sequence of words
y: sequence of words
Returns
integer: Length of LCS between x and y
"""
table = _lcs(x, y)
n, m = len(x), len(y)
return table[n, m]
def _lcs(x, y):
"""Computes the length of the LCS between two seqs.
The implementation below uses a DP programming algorithm and runs
in O(nm) time where n = len(x) and m = len(y).
Source: http://www.algorithmist.com/index.php/Longest_Common_Subsequence
Args:
x: collection of words
y: collection of words
Returns:
Table of dictionary of coord and len lcs
"""
n, m = len(x), len(y)
table = dict()
for i in range(n + 1):
for j in range(m + 1):
if i == 0 or j == 0:
table[i, j] = 0
elif x[i - 1] == y[j - 1]:
table[i, j] = table[i - 1, j - 1] + 1
else:
table[i, j] = max(table[i - 1, j], table[i, j - 1])
return table
def _f_lcs(llcs, m, n):
"""Computes the LCS-based F-measure score.
Source: http://research.microsoft.com/en-us/um/people/cyl/download/papers/
rouge-working-note-v1.3.1.pdf
Args:
llcs: Length of LCS
m: number of words in reference summary
n: number of words in candidate summary
Returns:
Float. LCS-based F-measure score
"""
r_lcs = llcs / m
p_lcs = llcs / n
beta = p_lcs / (r_lcs + 1e-12)
num = (1 + (beta ** 2)) * r_lcs * p_lcs
denom = r_lcs + ((beta ** 2) * p_lcs)
f_lcs = num / (denom + 1e-12)
return f_lcs
|
|
# -*- coding: utf-8 -*-
"""upload_docs
Implements a Distutils 'upload_docs' subcommand (upload documentation to
PyPI's pythonhosted.org).
"""
from base64 import standard_b64encode
from distutils import log
from distutils.errors import DistutilsOptionError
import os
import socket
import zipfile
import tempfile
import shutil
import itertools
import functools
if "__PEX_UNVENDORED__" in __import__("os").environ:
from setuptools.extern import six # vendor:skip
else:
from pex.third_party.setuptools.extern import six
if "__PEX_UNVENDORED__" in __import__("os").environ:
from setuptools.extern.six.moves import http_client, urllib # vendor:skip
else:
from pex.third_party.setuptools.extern.six.moves import http_client, urllib
if "__PEX_UNVENDORED__" in __import__("os").environ:
from pkg_resources import iter_entry_points # vendor:skip
else:
from pex.third_party.pkg_resources import iter_entry_points
from .upload import upload
def _encode(s):
errors = 'surrogateescape' if six.PY3 else 'strict'
return s.encode('utf-8', errors)
class upload_docs(upload):
# override the default repository as upload_docs isn't
# supported by Warehouse (and won't be).
DEFAULT_REPOSITORY = 'https://pypi.python.org/pypi/'
description = 'Upload documentation to PyPI'
user_options = [
('repository=', 'r',
"url of repository [default: %s]" % upload.DEFAULT_REPOSITORY),
('show-response', None,
'display full response text from server'),
('upload-dir=', None, 'directory to upload'),
]
boolean_options = upload.boolean_options
def has_sphinx(self):
if self.upload_dir is None:
for ep in iter_entry_points('distutils.commands', 'build_sphinx'):
return True
sub_commands = [('build_sphinx', has_sphinx)]
def initialize_options(self):
upload.initialize_options(self)
self.upload_dir = None
self.target_dir = None
def finalize_options(self):
upload.finalize_options(self)
if self.upload_dir is None:
if self.has_sphinx():
build_sphinx = self.get_finalized_command('build_sphinx')
self.target_dir = build_sphinx.builder_target_dir
else:
build = self.get_finalized_command('build')
self.target_dir = os.path.join(build.build_base, 'docs')
else:
self.ensure_dirname('upload_dir')
self.target_dir = self.upload_dir
if 'pypi.python.org' in self.repository:
log.warn("Upload_docs command is deprecated. Use RTD instead.")
self.announce('Using upload directory %s' % self.target_dir)
def create_zipfile(self, filename):
zip_file = zipfile.ZipFile(filename, "w")
try:
self.mkpath(self.target_dir) # just in case
for root, dirs, files in os.walk(self.target_dir):
if root == self.target_dir and not files:
tmpl = "no files found in upload directory '%s'"
raise DistutilsOptionError(tmpl % self.target_dir)
for name in files:
full = os.path.join(root, name)
relative = root[len(self.target_dir):].lstrip(os.path.sep)
dest = os.path.join(relative, name)
zip_file.write(full, dest)
finally:
zip_file.close()
def run(self):
# Run sub commands
for cmd_name in self.get_sub_commands():
self.run_command(cmd_name)
tmp_dir = tempfile.mkdtemp()
name = self.distribution.metadata.get_name()
zip_file = os.path.join(tmp_dir, "%s.zip" % name)
try:
self.create_zipfile(zip_file)
self.upload_file(zip_file)
finally:
shutil.rmtree(tmp_dir)
@staticmethod
def _build_part(item, sep_boundary):
key, values = item
title = '\nContent-Disposition: form-data; name="%s"' % key
# handle multiple entries for the same name
if not isinstance(values, list):
values = [values]
for value in values:
if isinstance(value, tuple):
title += '; filename="%s"' % value[0]
value = value[1]
else:
value = _encode(value)
yield sep_boundary
yield _encode(title)
yield b"\n\n"
yield value
if value and value[-1:] == b'\r':
yield b'\n' # write an extra newline (lurve Macs)
@classmethod
def _build_multipart(cls, data):
"""
Build up the MIME payload for the POST data
"""
boundary = b'--------------GHSKFJDLGDS7543FJKLFHRE75642756743254'
sep_boundary = b'\n--' + boundary
end_boundary = sep_boundary + b'--'
end_items = end_boundary, b"\n",
builder = functools.partial(
cls._build_part,
sep_boundary=sep_boundary,
)
part_groups = map(builder, data.items())
parts = itertools.chain.from_iterable(part_groups)
body_items = itertools.chain(parts, end_items)
content_type = 'multipart/form-data; boundary=%s' % boundary.decode('ascii')
return b''.join(body_items), content_type
def upload_file(self, filename):
with open(filename, 'rb') as f:
content = f.read()
meta = self.distribution.metadata
data = {
':action': 'doc_upload',
'name': meta.get_name(),
'content': (os.path.basename(filename), content),
}
# set up the authentication
credentials = _encode(self.username + ':' + self.password)
credentials = standard_b64encode(credentials)
if six.PY3:
credentials = credentials.decode('ascii')
auth = "Basic " + credentials
body, ct = self._build_multipart(data)
msg = "Submitting documentation to %s" % (self.repository)
self.announce(msg, log.INFO)
# build the Request
# We can't use urllib2 since we need to send the Basic
# auth right with the first request
schema, netloc, url, params, query, fragments = \
urllib.parse.urlparse(self.repository)
assert not params and not query and not fragments
if schema == 'http':
conn = http_client.HTTPConnection(netloc)
elif schema == 'https':
conn = http_client.HTTPSConnection(netloc)
else:
raise AssertionError("unsupported schema " + schema)
data = ''
try:
conn.connect()
conn.putrequest("POST", url)
content_type = ct
conn.putheader('Content-type', content_type)
conn.putheader('Content-length', str(len(body)))
conn.putheader('Authorization', auth)
conn.endheaders()
conn.send(body)
except socket.error as e:
self.announce(str(e), log.ERROR)
return
r = conn.getresponse()
if r.status == 200:
msg = 'Server response (%s): %s' % (r.status, r.reason)
self.announce(msg, log.INFO)
elif r.status == 301:
location = r.getheader('Location')
if location is None:
location = 'https://pythonhosted.org/%s/' % meta.get_name()
msg = 'Upload successful. Visit %s' % location
self.announce(msg, log.INFO)
else:
msg = 'Upload failed (%s): %s' % (r.status, r.reason)
self.announce(msg, log.ERROR)
if self.show_response:
print('-' * 75, r.read(), '-' * 75)
|
|
import os
import sys
import yaml
import inspect
import io
from datetime import datetime
from functools import wraps, partial
import aniso8601
from werkzeug.contrib.cache import SimpleCache
from werkzeug.local import LocalProxy, LocalStack
from jinja2 import BaseLoader, ChoiceLoader, TemplateNotFound
from flask import current_app, json, request as flask_request, _app_ctx_stack
from . import verifier, logger
from .convert import to_date, to_time, to_timedelta
from .cache import top_stream, set_stream
import collections
def find_ask():
"""
Find our instance of Ask, navigating Local's and possible blueprints.
Note: This only supports returning a reference to the first instance
of Ask found.
"""
if hasattr(current_app, 'ask'):
return getattr(current_app, 'ask')
else:
if hasattr(current_app, 'blueprints'):
blueprints = getattr(current_app, 'blueprints')
for blueprint_name in blueprints:
if hasattr(blueprints[blueprint_name], 'ask'):
return getattr(blueprints[blueprint_name], 'ask')
def dbgdump(obj, default=None, cls=None):
if current_app.config.get('ASK_PRETTY_DEBUG_LOGS', False):
indent = 2
else:
indent = None
msg = json.dumps(obj, indent=indent, default=default, cls=cls)
logger.debug(msg)
request = LocalProxy(lambda: find_ask().request)
session = LocalProxy(lambda: find_ask().session)
version = LocalProxy(lambda: find_ask().version)
context = LocalProxy(lambda: find_ask().context)
convert_errors = LocalProxy(lambda: find_ask().convert_errors)
current_stream = LocalProxy(lambda: find_ask().current_stream)
stream_cache = LocalProxy(lambda: find_ask().stream_cache)
from . import models
_converters = {'date': to_date, 'time': to_time, 'timedelta': to_timedelta}
class Ask(object):
"""The Ask object provides the central interface for interacting with the Alexa service.
Ask object maps Alexa Requests to flask view functions and handles Alexa sessions.
The constructor is passed a Flask App instance, and URL endpoint.
The Flask instance allows the convienient API of endpoints and their view functions,
so that Alexa requests may be mapped with syntax similar to a typical Flask server.
Route provides the entry point for the skill, and must be provided if an app is given.
Keyword Arguments:
app {Flask object} -- App instance - created with Flask(__name__) (default: {None})
route {str} -- entry point to which initial Alexa Requests are forwarded (default: {None})
blueprint {Flask blueprint} -- Flask Blueprint instance to use instead of Flask App (default: {None})
stream_cache {Werkzeug BasicCache} -- BasicCache-like object for storing Audio stream data (default: {SimpleCache})
path {str} -- path to templates yaml file for VUI dialog (default: {'templates.yaml'})
"""
def __init__(self, app=None, route=None, blueprint=None, stream_cache=None, path='templates.yaml'):
self.app = app
self._route = route
self._intent_view_funcs = {}
self._intent_converts = {}
self._intent_defaults = {}
self._intent_mappings = {}
self._launch_view_func = None
self._session_ended_view_func = None
self._on_session_started_callback = None
self._default_intent_view_func = None
self._player_request_view_funcs = {}
self._player_mappings = {}
self._player_converts = {}
if app is not None:
self.init_app(app, path)
elif blueprint is not None:
self.init_blueprint(blueprint, path)
if stream_cache is None:
self.stream_cache = SimpleCache()
else:
self.stream_cache = stream_cache
def init_app(self, app, path='templates.yaml'):
"""Initializes Ask app by setting configuration variables, loading templates, and maps Ask route to a flask view.
The Ask instance is given the following configuration variables by calling on Flask's configuration:
`ASK_APPLICATION_ID`:
Turn on application ID verification by setting this variable to an application ID or a
list of allowed application IDs. By default, application ID verification is disabled and a
warning is logged. This variable should be set in production to ensure
requests are being sent by the applications you specify.
Default: None
`ASK_VERIFY_REQUESTS`:
Enables or disables Alexa request verification, which ensures requests sent to your skill
are from Amazon's Alexa service. This setting should not be disabled in production.
It is useful for mocking JSON requests in automated tests.
Default: True
`ASK_VERIFY_TIMESTAMP_DEBUG`:
Turn on request timestamp verification while debugging by setting this to True.
Timestamp verification helps mitigate against replay attacks. It relies on the system clock
being synchronized with an NTP server. This setting should not be enabled in production.
Default: False
`ASK_PRETTY_DEBUG_LOGS`:
Add tabs and linebreaks to the Alexa request and response printed to the debug log.
This improves readability when printing to the console, but breaks formatting when logging to CloudWatch.
Default: False
"""
if self._route is None:
raise TypeError("route is a required argument when app is not None")
self.app = app
app.ask = self
app.add_url_rule(self._route, view_func=self._flask_view_func, methods=['POST'])
app.jinja_loader = ChoiceLoader([app.jinja_loader, YamlLoader(app, path)])
def init_blueprint(self, blueprint, path='templates.yaml'):
"""Initialize a Flask Blueprint, similar to init_app, but without the access
to the application config.
Keyword Arguments:
blueprint {Flask Blueprint} -- Flask Blueprint instance to initialize (Default: {None})
path {str} -- path to templates yaml file, relative to Blueprint (Default: {'templates.yaml'})
"""
if self._route is not None:
raise TypeError("route cannot be set when using blueprints!")
# we need to tuck our reference to this Ask instance into the blueprint object and find it later!
blueprint.ask = self
# BlueprintSetupState.add_url_rule gets called underneath the covers and
# concats the rule string, so we should set to an empty string to allow
# Blueprint('blueprint_api', __name__, url_prefix="/ask") to result in
# exposing the rule at "/ask" and not "/ask/".
blueprint.add_url_rule("", view_func=self._flask_view_func, methods=['POST'])
blueprint.jinja_loader = ChoiceLoader([YamlLoader(blueprint, path)])
@property
def ask_verify_requests(self):
return current_app.config.get('ASK_VERIFY_REQUESTS', True)
@property
def ask_verify_timestamp_debug(self):
return current_app.config.get('ASK_VERIFY_TIMESTAMP_DEBUG', False)
@property
def ask_application_id(self):
return current_app.config.get('ASK_APPLICATION_ID', None)
def on_session_started(self, f):
"""Decorator to call wrapped function upon starting a session.
@ask.on_session_started
def new_session():
log.info('new session started')
Because both launch and intent requests may begin a session, this decorator is used call
a function regardless of how the session began.
Arguments:
f {function} -- function to be called when session is started.
"""
self._on_session_started_callback = f
def launch(self, f):
"""Decorator maps a view function as the endpoint for an Alexa LaunchRequest and starts the skill.
@ask.launch
def launched():
return question('Welcome to Foo')
The wrapped function is registered as the launch view function and renders the response
for requests to the Launch URL.
A request to the launch URL is verified with the Alexa server before the payload is
passed to the view function.
Arguments:
f {function} -- Launch view function
"""
self._launch_view_func = f
@wraps(f)
def wrapper(*args, **kw):
self._flask_view_func(*args, **kw)
return f
def session_ended(self, f):
"""Decorator routes Alexa SessionEndedRequest to the wrapped view function to end the skill.
@ask.session_ended
def session_ended():
return "{}", 200
The wrapped function is registered as the session_ended view function
and renders the response for requests to the end of the session.
Arguments:
f {function} -- session_ended view function
"""
self._session_ended_view_func = f
@wraps(f)
def wrapper(*args, **kw):
self._flask_view_func(*args, **kw)
return f
def intent(self, intent_name, mapping={}, convert={}, default={}):
"""Decorator routes an Alexa IntentRequest and provides the slot parameters to the wrapped function.
Functions decorated as an intent are registered as the view function for the Intent's URL,
and provide the backend responses to give your Skill its functionality.
@ask.intent('WeatherIntent', mapping={'city': 'City'})
def weather(city):
return statement('I predict great weather for {}'.format(city))
Arguments:
intent_name {str} -- Name of the intent request to be mapped to the decorated function
Keyword Arguments:
mapping {dict} -- Maps parameters to intent slots of a different name
default: {}
convert {dict} -- Converts slot values to data types before assignment to parameters
default: {}
default {dict} -- Provides default values for Intent slots if Alexa reuqest
returns no corresponding slot, or a slot with an empty value
default: {}
"""
def decorator(f):
self._intent_view_funcs[intent_name] = f
self._intent_mappings[intent_name] = mapping
self._intent_converts[intent_name] = convert
self._intent_defaults[intent_name] = default
@wraps(f)
def wrapper(*args, **kw):
self._flask_view_func(*args, **kw)
return f
return decorator
def default_intent(self, f):
"""Decorator routes any Alexa IntentRequest that is not matched by any existing @ask.intent routing."""
self._default_intent_view_func = f
@wraps(f)
def wrapper(*args, **kw):
self._flask_view_func(*args, **kw)
return f
def display_element_selected(self, f):
"""Decorator routes Alexa Display.ElementSelected request to the wrapped view function.
@ask.display_element_selected
def eval_element():
return "", 200
The wrapped function is registered as the display_element_selected view function
and renders the response for requests.
Arguments:
f {function} -- display_element_selected view function
"""
self._display_element_selected_func = f
@wraps(f)
def wrapper(*args, **kw):
self._flask_view_func(*args, **kw)
return f
def on_purchase_completed(self, mapping={'payload': 'payload','name':'name','status':'status','token':'token'}, convert={}, default={}):
"""Decorator routes an Connections.Response to the wrapped function.
Request is sent when Alexa completes the purchase flow.
See https://developer.amazon.com/docs/in-skill-purchase/add-isps-to-a-skill.html#handle-results
The wrapped view function may accept parameters from the Request.
In addition to locale, requestId, timestamp, and type
@ask.on_purchase_completed( mapping={'payload': 'payload','name':'name','status':'status','token':'token'})
def completed(payload, name, status, token):
logger.info(payload)
logger.info(name)
logger.info(status)
logger.info(token)
"""
def decorator(f):
self._intent_view_funcs['Connections.Response'] = f
self._intent_mappings['Connections.Response'] = mapping
self._intent_converts['Connections.Response'] = convert
self._intent_defaults['Connections.Response'] = default
@wraps(f)
def wrapper(*args, **kwargs):
self._flask_view_func(*args, **kwargs)
return f
return decorator
def on_playback_started(self, mapping={'offset': 'offsetInMilliseconds'}, convert={}, default={}):
"""Decorator routes an AudioPlayer.PlaybackStarted Request to the wrapped function.
Request sent when Alexa begins playing the audio stream previously sent in a Play directive.
This lets your skill verify that playback began successfully.
This request is also sent when Alexa resumes playback after pausing it for a voice request.
The wrapped view function may accept parameters from the AudioPlayer Request.
In addition to locale, requestId, timestamp, and type
AudioPlayer Requests include:
offsetInMilliseconds - Position in stream when request was sent.
Not end of stream, often few ms after Play Directive offset.
This parameter is automatically mapped to 'offset' by default
token - token of the stream that is nearly finished.
@ask.on_playback_started()
def on_playback_start(token, offset):
logger.info('stream has token {}'.format(token))
logger.info('Current position within the stream is {} ms'.format(offset))
"""
def decorator(f):
self._intent_view_funcs['AudioPlayer.PlaybackStarted'] = f
self._intent_mappings['AudioPlayer.PlaybackStarted'] = mapping
self._intent_converts['AudioPlayer.PlaybackStarted'] = convert
self._intent_defaults['AudioPlayer.PlaybackStarted'] = default
@wraps(f)
def wrapper(*args, **kwargs):
self._flask_view_func(*args, **kwargs)
return f
return decorator
def on_playback_finished(self, mapping={'offset': 'offsetInMilliseconds'}, convert={}, default={}):
"""Decorator routes an AudioPlayer.PlaybackFinished Request to the wrapped function.
This type of request is sent when the stream Alexa is playing comes to an end on its own.
Note: If your skill explicitly stops the playback with the Stop directive,
Alexa sends PlaybackStopped instead of PlaybackFinished.
The wrapped view function may accept parameters from the AudioPlayer Request.
In addition to locale, requestId, timestamp, and type
AudioPlayer Requests include:
offsetInMilliseconds - Position in stream when request was sent.
Not end of stream, often few ms after Play Directive offset.
This parameter is automatically mapped to 'offset' by default.
token - token of the stream that is nearly finished.
Audioplayer Requests do not include the stream URL, it must be accessed from current_stream.url
"""
def decorator(f):
self._intent_view_funcs['AudioPlayer.PlaybackFinished'] = f
self._intent_mappings['AudioPlayer.PlaybackFinished'] = mapping
self._intent_converts['AudioPlayer.PlaybackFinished'] = convert
self._intent_defaults['AudioPlayer.PlaybackFinished'] = default
@wraps(f)
def wrapper(*args, **kwargs):
self._flask_view_func(*args, **kwargs)
return f
return decorator
def on_playback_stopped(self, mapping={'offset': 'offsetInMilliseconds'}, convert={}, default={}):
"""Decorator routes an AudioPlayer.PlaybackStopped Request to the wrapped function.
Sent when Alexa stops playing an audio stream in response to one of the following:
-AudioPlayer.Stop
-AudioPlayer.Play with a playBehavior of REPLACE_ALL.
-AudioPlayer.ClearQueue with a clearBehavior of CLEAR_ALL.
This request is also sent if the user makes a voice request to Alexa,
since this temporarily pauses the playback.
In this case, the playback begins automatically once the voice interaction is complete.
Note: If playback stops because the audio stream comes to an end on its own,
Alexa sends PlaybackFinished instead of PlaybackStopped.
The wrapped view function may accept parameters from the AudioPlayer Request.
In addition to locale, requestId, timestamp, and type
AudioPlayer Requests include:
offsetInMilliseconds - Position in stream when request was sent.
Not end of stream, often few ms after Play Directive offset.
This parameter is automatically mapped to 'offset' by default.
token - token of the stream that is nearly finished.
Audioplayer Requests do not include the stream URL, it must be accessed from current_stream.url
"""
def decorator(f):
self._intent_view_funcs['AudioPlayer.PlaybackStopped'] = f
self._intent_mappings['AudioPlayer.PlaybackStopped'] = mapping
self._intent_converts['AudioPlayer.PlaybackStopped'] = convert
self._intent_defaults['AudioPlayer.PlaybackStopped'] = default
@wraps(f)
def wrapper(*args, **kwargs):
self._flask_view_func(*args, **kwargs)
return f
return decorator
def on_playback_nearly_finished(self, mapping={'offset': 'offsetInMilliseconds'}, convert={}, default={}):
"""Decorator routes an AudioPlayer.PlaybackNearlyFinished Request to the wrapped function.
This AudioPlayer Request sent when the device is ready to receive a new stream.
To progress through a playlist, respond to this request with an enqueue or play_next audio response.
**Note** that this request is sent when Alexa is ready to receive a new stream to enqueue, and not
necessarily when the stream's offset is near the end.
The request may be sent by Alexa immediately after your skill sends a Play Directive.
The wrapped view function may accept parameters from the AudioPlayer Request.
In addition to locale, requestId, timestamp, and type
This AudioPlayer Request includes:
AudioPlayer Requests include:
offsetInMilliseconds - Position in stream when request was sent.
Not end of stream, often few ms after Play Directive offset.
This parameter is automatically mapped to 'offset' by default.
token - token of the stream that is nearly finished.
Audioplayer Requests do not include the stream URL, and must be accessed from current_stream
Example usage:
@ask.on_playback_nearly_finished()
def play_next_stream():
audio().enqueue(my_next_song)
# offsetInMilliseconds is mapped to offset by default for convenience
@ask.on_playback_nearly_finished()
def show_request_feedback(offset, token):
logging.info('Nearly Finished')
logging.info('Stream at {} ms when Playback Request sent'.format(offset))
logging.info('Stream holds the token {}'.format(token))
logging.info('Streaming from {}'.format(current_stream.url))
# example of changing the default parameter mapping
@ask.on_playback_nearly_finished(mapping={'pos': 'offsetInMilliseconds', 'stream_token': 'token'})
def show_request_feedback(pos, stream_token):
_infodump('Nearly Finished')
_infodump('Stream at {} ms when Playback Request sent'.format(pos))
_infodump('Stream holds the token {}'.format(stream_token))
"""
def decorator(f):
self._intent_view_funcs['AudioPlayer.PlaybackNearlyFinished'] = f
self._intent_mappings['AudioPlayer.PlaybackNearlyFinished'] = mapping
self._intent_converts['AudioPlayer.PlaybackNearlyFinished'] = convert
self._intent_defaults['AudioPlayer.PlaybackNearlyFinished'] = default
@wraps(f)
def wrapper(*args, **kwargs):
self._flask_view_func(*args, **kwargs)
return f
return decorator
def on_playback_failed(self, mapping={}, convert={}, default={}):
"""Decorator routes an AudioPlayer.PlaybackFailed Request to the wrapped function.
This AudioPlayer Request sent when Alexa encounters an error when attempting to play a stream.
The wrapped view function may accept parameters from the AudioPlayer Request.
In addition to locale, requestId, timestamp, and type
PlayBackFailed Requests include:
error - Contains error info under parameters type and message
token - represents the stream that failed to play.
currentPlaybackState - Details about the playback activity occurring at the time of the error
Contains the following parameters:
token - represents the audio stream currently playing when the error occurred.
Note that this may be different from the value of the request.token property.
offsetInMilliseconds - Position in stream when request was sent.
Not end of stream, often few ms after Play Directive offset.
This parameter is automatically mapped to 'offset' by default.
playerActivity - player state when the error occurred
"""
def decorator(f):
self._intent_view_funcs['AudioPlayer.PlaybackFailed'] = f
self._intent_mappings['AudioPlayer.PlaybackFailed'] = mapping
self._intent_converts['AudioPlayer.PlaybackFailed'] = convert
self._intent_defaults['AudioPlayer.PlaybackFailed'] = default
@wraps(f)
def wrapper(*args, **kwargs):
self._flask_view_func(*args, **kwargs)
return f
return decorator
@property
def request(self):
return getattr(_app_ctx_stack.top, '_ask_request', None)
@request.setter
def request(self, value):
_app_ctx_stack.top._ask_request = value
@property
def session(self):
return getattr(_app_ctx_stack.top, '_ask_session', models._Field())
@session.setter
def session(self, value):
_app_ctx_stack.top._ask_session = value
@property
def version(self):
return getattr(_app_ctx_stack.top, '_ask_version', None)
@version.setter
def version(self, value):
_app_ctx_stack.top._ask_version = value
@property
def context(self):
return getattr(_app_ctx_stack.top, '_ask_context', None)
@context.setter
def context(self, value):
_app_ctx_stack.top._ask_context = value
@property
def convert_errors(self):
return getattr(_app_ctx_stack.top, '_ask_convert_errors', None)
@convert_errors.setter
def convert_errors(self, value):
_app_ctx_stack.top._ask_convert_errors = value
@property
def current_stream(self):
#return getattr(_app_ctx_stack.top, '_ask_current_stream', models._Field())
user = self._get_user()
if user:
stream = top_stream(self.stream_cache, user)
if stream:
current = models._Field()
current.__dict__.update(stream)
return current
return models._Field()
@current_stream.setter
def current_stream(self, value):
# assumption 1 is we get a models._Field as value
# assumption 2 is if someone sets a value, it's resetting the stack
user = self._get_user()
if user:
set_stream(self.stream_cache, user, value.__dict__)
def run_aws_lambda(self, event):
"""Invoke the Flask Ask application from an AWS Lambda function handler.
Use this method to service AWS Lambda requests from a custom Alexa
skill. This method will invoke your Flask application providing a
WSGI-compatible environment that wraps the original Alexa event
provided to the AWS Lambda handler. Returns the output generated by
a Flask Ask application, which should be used as the return value
to the AWS Lambda handler function.
Example usage:
from flask import Flask
from flask_ask import Ask, statement
app = Flask(__name__)
ask = Ask(app, '/')
# This function name is what you defined when you create an
# AWS Lambda function. By default, AWS calls this function
# lambda_handler.
def lambda_handler(event, _context):
return ask.run_aws_lambda(event)
@ask.intent('HelloIntent')
def hello(firstname):
speech_text = "Hello %s" % firstname
return statement(speech_text).simple_card('Hello', speech_text)
"""
# We are guaranteed to be called by AWS as a Lambda function does not
# expose a public facing interface.
self.app.config['ASK_VERIFY_REQUESTS'] = False
# Convert an environment variable to a WSGI "bytes-as-unicode" string
enc, esc = sys.getfilesystemencoding(), 'surrogateescape'
def unicode_to_wsgi(u):
return u.encode(enc, esc).decode('iso-8859-1')
# Create a WSGI-compatible environ that can be passed to the
# application. It is loaded with the OS environment variables,
# mandatory CGI-like variables, as well as the mandatory WSGI
# variables.
environ = {k: unicode_to_wsgi(v) for k, v in os.environ.items()}
environ['REQUEST_METHOD'] = 'POST'
environ['PATH_INFO'] = '/'
environ['SERVER_NAME'] = 'AWS-Lambda'
environ['SERVER_PORT'] = '80'
environ['SERVER_PROTOCOL'] = 'HTTP/1.0'
environ['wsgi.version'] = (1, 0)
environ['wsgi.url_scheme'] = 'http'
environ['wsgi.errors'] = sys.stderr
environ['wsgi.multithread'] = False
environ['wsgi.multiprocess'] = False
environ['wsgi.run_once'] = True
# Convert the event provided by the AWS Lambda handler to a JSON
# string that can be read as the body of a HTTP POST request.
body = json.dumps(event)
environ['CONTENT_TYPE'] = 'application/json'
environ['CONTENT_LENGTH'] = len(body)
PY3 = sys.version_info[0] == 3
if PY3:
environ['wsgi.input'] = io.StringIO(body)
else:
environ['wsgi.input'] = io.BytesIO(body)
# Start response is a required callback that must be passed when
# the application is invoked. It is used to set HTTP status and
# headers. Read the WSGI spec for details (PEP3333).
headers = []
def start_response(status, response_headers, _exc_info=None):
headers[:] = [status, response_headers]
# Invoke the actual Flask application providing our environment,
# with our Alexa event as the body of the HTTP request, as well
# as the callback function above. The result will be an iterator
# that provides a serialized JSON string for our Alexa response.
result = self.app(environ, start_response)
try:
if not headers:
raise AssertionError("start_response() not called by WSGI app")
output = b"".join(result)
if not headers[0].startswith("2"):
raise AssertionError("Non-2xx from app: hdrs={}, body={}".format(headers, output))
# The Lambda handler expects a Python object that can be
# serialized as JSON, so we need to take the already serialized
# JSON and deserialize it.
return json.loads(output)
finally:
# Per the WSGI spec, we need to invoke the close method if it
# is implemented on the result object.
if hasattr(result, 'close'):
result.close()
def _get_user(self):
if self.context:
return self.context.get('System', {}).get('user', {}).get('userId')
return None
def _alexa_request(self, verify=True):
raw_body = flask_request.data
alexa_request_payload = json.loads(raw_body)
if verify:
cert_url = flask_request.headers['Signaturecertchainurl']
signature = flask_request.headers['Signature']
# load certificate - this verifies a the certificate url and format under the hood
cert = verifier.load_certificate(cert_url)
# verify signature
verifier.verify_signature(cert, signature, raw_body)
# verify timestamp
raw_timestamp = alexa_request_payload.get('request', {}).get('timestamp')
timestamp = self._parse_timestamp(raw_timestamp)
if not current_app.debug or self.ask_verify_timestamp_debug:
verifier.verify_timestamp(timestamp)
# verify application id
try:
application_id = alexa_request_payload['session']['application']['applicationId']
except KeyError:
application_id = alexa_request_payload['context'][
'System']['application']['applicationId']
if self.ask_application_id is not None:
verifier.verify_application_id(application_id, self.ask_application_id)
return alexa_request_payload
@staticmethod
def _parse_timestamp(timestamp):
"""
Parse a given timestamp value, raising ValueError if None or Flasey
"""
if timestamp:
try:
return aniso8601.parse_datetime(timestamp)
except AttributeError:
# raised by aniso8601 if raw_timestamp is not valid string
# in ISO8601 format
try:
return datetime.utcfromtimestamp(timestamp)
except:
# relax the timestamp a bit in case it was sent in millis
return datetime.utcfromtimestamp(timestamp/1000)
raise ValueError('Invalid timestamp value! Cannot parse from either ISO8601 string or UTC timestamp.')
def _update_stream(self):
fresh_stream = models._Field()
fresh_stream.__dict__.update(self.current_stream.__dict__) # keeps url attribute after stopping stream
fresh_stream.__dict__.update(self._from_directive())
context_info = self._from_context()
if context_info != None:
fresh_stream.__dict__.update(context_info)
self.current_stream = fresh_stream
dbgdump(current_stream.__dict__)
def _from_context(self):
return getattr(self.context, 'AudioPlayer', {})
def _from_directive(self):
from_buffer = top_stream(self.stream_cache, self._get_user())
if from_buffer:
if self.request.intent and 'PauseIntent' in self.request.intent.name:
return {}
return from_buffer
return {}
def _flask_view_func(self, *args, **kwargs):
ask_payload = self._alexa_request(verify=self.ask_verify_requests)
dbgdump(ask_payload)
request_body = models._Field(ask_payload)
self.request = request_body.request
self.version = request_body.version
self.context = getattr(request_body, 'context', models._Field())
self.session = getattr(request_body, 'session', self.session) # to keep old session.attributes through AudioRequests
if not self.session:
self.session = models._Field()
if not self.session.attributes:
self.session.attributes = models._Field()
self._update_stream()
# add current dialog state in session
try:
self.session["dialogState"] = request.dialogState
except KeyError:
self.session["dialogState"] = "unknown"
try:
if self.session.new and self._on_session_started_callback is not None:
self._on_session_started_callback()
except AttributeError:
pass
result = None
request_type = self.request.type
if request_type == 'LaunchRequest' and self._launch_view_func:
result = self._launch_view_func()
elif request_type == 'SessionEndedRequest':
if self._session_ended_view_func:
result = self._session_ended_view_func()
else:
result = "{}", 200
elif request_type == 'IntentRequest' and self._intent_view_funcs:
result = self._map_intent_to_view_func(self.request.intent)()
elif request_type == 'Display.ElementSelected' and self._display_element_selected_func:
result = self._display_element_selected_func()
elif 'AudioPlayer' in request_type:
result = self._map_player_request_to_func(self.request.type)()
# routes to on_playback funcs
# user can also access state of content.AudioPlayer with current_stream
elif 'Connections.Response' in request_type:
result = self._map_purchase_request_to_func(self.request.type)()
if result is not None:
if isinstance(result, models._Response):
return result.render_response()
return result
return "", 400
def _map_intent_to_view_func(self, intent):
"""Provides appropiate parameters to the intent functions."""
if intent.name in self._intent_view_funcs:
view_func = self._intent_view_funcs[intent.name]
elif self._default_intent_view_func is not None:
view_func = self._default_intent_view_func
else:
raise NotImplementedError('Intent "{}" not found and no default intent specified.'.format(intent.name))
PY3 = sys.version_info[0] == 3
if PY3:
argspec = inspect.getfullargspec(view_func)
else:
argspec = inspect.getargspec(view_func)
arg_names = argspec.args
arg_values = self._map_params_to_view_args(intent.name, arg_names)
return partial(view_func, *arg_values)
def _map_player_request_to_func(self, player_request_type):
"""Provides appropriate parameters to the on_playback functions."""
# calbacks for on_playback requests are optional
view_func = self._intent_view_funcs.get(player_request_type, lambda: None)
argspec = inspect.getargspec(view_func)
arg_names = argspec.args
arg_values = self._map_params_to_view_args(player_request_type, arg_names)
return partial(view_func, *arg_values)
def _map_purchase_request_to_func(self, purchase_request_type):
"""Provides appropriate parameters to the on_purchase functions."""
if purchase_request_type in self._intent_view_funcs:
view_func = self._intent_view_funcs[purchase_request_type]
else:
raise NotImplementedError('Request type "{}" not found and no default view specified.'.format(purchase_request_type))
argspec = inspect.getargspec(view_func)
arg_names = argspec.args
arg_values = self._map_params_to_view_args(purchase_request_type, arg_names)
print('_map_purchase_request_to_func', arg_names, arg_values, view_func, purchase_request_type)
return partial(view_func, *arg_values)
def _get_slot_value(self, slot_object):
slot_name = slot_object.name
slot_value = getattr(slot_object, 'value', None)
resolutions = getattr(slot_object, 'resolutions', None)
if resolutions is not None:
resolutions_per_authority = getattr(resolutions, 'resolutionsPerAuthority', None)
if resolutions_per_authority is not None and len(resolutions_per_authority) > 0:
values = resolutions_per_authority[0].get('values', None)
if values is not None and len(values) > 0:
value = values[0].get('value', None)
if value is not None:
slot_value = value.get('name', slot_value)
return slot_value
def _map_params_to_view_args(self, view_name, arg_names):
arg_values = []
convert = self._intent_converts.get(view_name)
default = self._intent_defaults.get(view_name)
mapping = self._intent_mappings.get(view_name)
convert_errors = {}
request_data = {}
intent = getattr(self.request, 'intent', None)
if intent is not None:
if intent.slots is not None:
for slot_key in intent.slots.keys():
slot_object = getattr(intent.slots, slot_key)
request_data[slot_object.name] = self._get_slot_value(slot_object=slot_object)
else:
for param_name in self.request:
request_data[param_name] = getattr(self.request, param_name, None)
for arg_name in arg_names:
param_or_slot = mapping.get(arg_name, arg_name)
arg_value = request_data.get(param_or_slot)
if arg_value is None or arg_value == "":
if arg_name in default:
default_value = default[arg_name]
if isinstance(default_value, collections.Callable):
default_value = default_value()
arg_value = default_value
elif arg_name in convert:
shorthand_or_function = convert[arg_name]
if shorthand_or_function in _converters:
shorthand = shorthand_or_function
convert_func = _converters[shorthand]
else:
convert_func = shorthand_or_function
try:
arg_value = convert_func(arg_value)
except Exception as e:
convert_errors[arg_name] = e
arg_values.append(arg_value)
self.convert_errors = convert_errors
return arg_values
class YamlLoader(BaseLoader):
def __init__(self, app, path):
self.path = app.root_path + os.path.sep + path
self.mapping = {}
self._reload_mapping()
def _reload_mapping(self):
if os.path.isfile(self.path):
self.last_mtime = os.path.getmtime(self.path)
with open(self.path) as f:
self.mapping = yaml.safe_load(f.read())
def get_source(self, environment, template):
if not os.path.isfile(self.path):
return None, None, None
if self.last_mtime != os.path.getmtime(self.path):
self._reload_mapping()
if template in self.mapping:
source = self.mapping[template]
return source, None, lambda: source == self.mapping.get(template)
raise TemplateNotFound(template)
|
|
# -*- coding: utf-8 -*-
from twisted.python.usage import Options
from xml.dom.minidom import parse as XMLParser
from xml.parsers.expat import ExpatError
from sys import exit, stderr
from re import match
def raiseMatch(re, data):
if match(re, data) is None:
raise AssertionError, data + ' did not match ' + re
return True
class ConfigParser(Options):
__cfoverride = ' (Overrides configuration file data)'
optParameters = [
['bootstrap', 'b', None, 'Bootstrap node' + __cfoverride],
['config-file', 'c', None, 'Node configuration file'],
]
winds = [ ]
node = { }
overlay = { }
components = [ ]
def __init__(self):
Options.__init__(self)
self['winds'] = ConfigParser.winds
self['node'] = ConfigParser.node
self['overlay'] = ConfigParser.overlay
self['components'] = ConfigParser.components
def subElementsParser(self, container, elementName, requiredAttributes=[]):
errors = [ ]
cfName = str(container.nodeName)
if cfName not in self:
setattr(ConfigParser, cfName, [ ])
self[cfName] = getattr(ConfigParser, cfName)
for e in container.getElementsByTagName(elementName):
d = { }
for k in e.attributes.keys():
d[str(k)] = str(e.getAttribute(k))
for k,t,f in requiredAttributes:
try:
assert d[k] is not None, (
'Required attribute %s is missing' % (k))
if f is not None:
f(d[k])
if t is not None:
d[k] = t(d[k])
# continue expression implicit here =)
except KeyError as e:
errors.append('Required attribute %s is empty' % (k))
except AssertionError as e:
errors.append(str(e))
if len(errors) == 0:
self[cfName].append(d)
if len(errors) > 0:
raise RuntimeError, str.join('\n', errors)
def elementParser(self, container, requiredAttributes):
errors = [ ]
nodeName = str(container.nodeName)
if nodeName not in self:
setattr(ConfigParser, nodeName, { })
self[nodeName] = getattr(ConfigParser, nodeName)
for k in container.attributes.keys():
self[nodeName][str(k)] = str(container.getAttribute(k))
for k,t,f in requiredAttributes:
try:
assert self[nodeName][k] is not None, (
'Required attribute %s is missing' % (k))
if f is not None:
f(self[nodeName][k])
if t is not None:
self[nodeName][k] = t(self[nodeName][k])
except KeyError as e:
errors.append('Required attribute %s is empty' % (k))
except AssertionError as e:
errors.append(str(e))
if len(errors) > 0:
raise RuntimeError, str.join('\n', errors)
def opt_node(self, symbol):
symbol = symbol.split(',')
try:
self['node']['name'], self['node']['domain'] = symbol[0].split('@')
except Exception as e:
raise RuntimeError, 'Error parsing node options\n\t' + str(e)
for r in symbol[1:]:
t = r.split(':')
self['node'][t[0]] = str.join(':', t[1:])
opt_n = opt_node
def nodeParser(self, node):
for k in node.attributes.keys():
self['node'][str(k)] = str(node.getAttribute(k))
if 'name' not in self['node'] or 'domain' not in self['node']:
raise RuntimeError, 'Missing node information'
# address,port,kind,protocol,module,class,extra0:extraData0
def opt_winds (self, symbol):
symbol = symbol.split(',')
wind = None
try:
wind = {
'address': symbol[0],
'ports': (int(symbol[1]),),
'kind': symbol[2],
'protocol': symbol[3],
'module': symbol[4],
'class': symbol[5],
}
except IndexError:
raise RuntimeError, 'Missing argument in wind CLI tuple'
for r in symbol[6:]:
k = r.split(':')
wind[k[0]] = str.join(':', k[1:])
self['winds'].append(wind)
opt_w = opt_winds
def windsParser(self, winds):
requiredAttributes = [
('address', None, None),
('kind', None, None),
('module', None, None),
('class', None, None),
('ports', lambda s: tuple(map(int, s.split(','))), None),
('protocol', None, None),
]
self.subElementsParser(winds, 'wind', requiredAttributes)
def opt_overlay(self, symbol):
symbol = map(str, symbol.split(','))
try:
self['overlay']['module'], self['overlay']['class'] = symbol[:2]
raiseMatch('[_a-zA-Z]+(\.[_a-zA-Z]+)*', self['overlay']['module'])
raiseMatch('[_a-zA-Z]+', self['overlay']['class'])
except ValueError:
raise RuntimeError, (
'Error reading overlay configuration from CLI')
except AssertionError as e:
raise RuntimeError, str(e)
for r in symbol[2:]:
k = r.split(':')
self['overlay'][k[0]] = str.join(':', k[1:])
opt_o = opt_overlay
def overlayParser(self, overlay):
requiredAttributes = [
('module',
None, lambda s: raiseMatch('[_a-zA-Z]+(\.[_a-zA-Z]+)*', s)),
('class',
None, lambda s: raiseMatch('[_a-zA-Z]+', s)),
]
self.elementParser(overlay, requiredAttributes)
def componentsParser(self, components):
requiredAttributes = [
('name', None, None),
('class', None, None)
]
self.subElementsParser(components, 'component', requiredAttributes)
def bootstrapParser(self, bootstrap):
# TODO: validation of these fields
requiredAttributes = [
('address', None, None),
('port', lambda p: int(p), None),
('kind', None, None),
('protocol', None, None),
]
self.subElementsParser(bootstrap, 'introducer', requiredAttributes)
def postOptions(self):
cdata = None
if 'config-file' in self and self['config-file'] != None:
try:
cdata = XMLParser(self['config-file'])
except ExpatError as ee:
raise RuntimeError, (
'Error while parsing configuration file\n' + str(ee))
mustHaveFields = [{
'nm': 'winds',
'prsr': self.windsParser,
},{
'nm': 'overlay',
'prsr': self.overlayParser,
},{
'nm': 'node',
'prsr': self.nodeParser,
}]
optionalFields = [{
'nm': 'bootstrap',
'prsr': self.bootstrapParser,
'dval': [ ]
},{
'nm': 'managementComponents',
'prsr': self.componentsParser,
'dval': [ ]
}]
for f in mustHaveFields:
if len(self[f['nm']]) is 0 and cdata is not None:
try:
f['prsr'](cdata.getElementsByTagName(f['nm'])[0])
except RuntimeError as e:
raise RuntimeError, (
'While parsing %s\n\t%s' % (f['nm'], str(e)))
elif len(self[f['nm']]) is 0 and cdata is None:
raise RuntimeError, (
'%s configuration file section\'s or CLI flag not found\n'
'Review your setup and try again.' % (f['nm']))
for f in optionalFields:
if f['nm'] not in self or self[f['nm']] is None:
self[f['nm']] = f['dval']
if cdata is None:
continue
try:
f['prsr'](cdata.getElementsByTagName(f['nm'])[0])
except IndexError as ie:
print ' > %s not in configuration file' % (f['nm'])
except Exception as e:
print >> stderr, (
' * While parsing a optional parameter %s\n * %s' % (
f['nm'], str(e)))
if __name__ == '__main__':
'''CLI Test case:
python configparser.py \
-w 127.0.0.1,8001,IPv4/UDP,default,puretcp,PureTCPProto \
-w 127.0.0.1,8002,IPv4/UDP,default,pureudp,PureUDPProto \
-o cyclon,Cyclon,cacheSize:40 -n [email protected]
'''
'''Configuration file test case:
<?xml version="1.0" ?>
<ManP2P-ng subject="configuration">
<node name="Alderan" domain="redes.inf.ufrgs.br"/>
<winds>
<wind address="127.0.0.1" kind="IPv4/UDP" module="pureudp" class="PureUDPProto" ports="8001,8002" protocol="default"/>
<wind address="127.0.0.1" kind="IPv4/TCP" module="puretcp" class="PureTCPProto" ports="8001,8002" protocol="default"/>
</winds>
<overlay cacheSize="40" module="cyclon" class="Cyclon"/>
<bootstrap>
<introducer address="127.0.0.1" port="8004" kind="IPv4/UDP" protocol="default"/>
<introducer address="127.0.0.1" port="8005" kind="IPv4/TCP" protocol="default"/>
</bootstrap>
</ManP2P-ng>
'''
a = ConfigParser()
b = ConfigParser()
ConfigParser().parseOptions()
print ConfigParser()['node']['name'], ConfigParser()['node']['domain']
|
|
"""
Module: Machine Learning Models
Project: Sparx
Authors: Bastin Robins. J
Email : [email protected]
"""
from datetime import datetime
from urllib import urlencode
import logging
import numpy as np
import pandas as pd
from sklearn import preprocessing
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import OneHotEncoder
from sklearn.preprocessing import Imputer
from sklearn.preprocessing import StandardScaler
import dateutil.parser as parser
from geopy.geocoders import Nominatim
class Process(object):
''' Process class consist for best micro-level preprocessing
methods helps to clean the dataset passed as dataframes
'''
def __init__(self):
self.version = "0.0.1"
@staticmethod
def geocode(address):
''' Return full address, latitude and longitude of give address string
Parameters:
-----------
address: str
Enter a dictionary of address whose latitude and longitude
should be returned
Usage:
------
>> p = preprocess()
>> p.geocode("172 5th Avenue NYC")
>> {'latitude': 40.74111015, 'adress': u'172, 5th Avenue, Flatiron,
Manhattan, Manhattan Community Board 5, New York County, NYC,
New York, 10010, United States of America',
'longitude': -73.9903105}
'''
geolocator = Nominatim()
location = geolocator.geocode(address)
return dict(address=location.address, latitude=location.latitude,\
longitude=location.longitude)
@staticmethod
def unique_value_count(df):
'''
return unique value count of each column as dict mapped
Parameters:
-----------
column_name: str
Enter the column for checking unique values
Usage:
------
>> p = preprocess()
>> p.unique_value_count(data['name'])
>> {'gender': {'Male': 2, 'Female': 6},
'age': {32: 2, 34: 2, 35: 1, 37: 1, 21: 1, 28: 1},
'name': {'Neeta': 1, 'vandana': 2, 'Amruta': 1, 'Vikrant': 2,
'vanana': 1, 'Pallavi': 1}}
'''
response = {}
for col in df.columns:
response[col] = dict(df[col].value_counts())
return response
@staticmethod
def unique_identifier(data):
unique_col = []
for col in data.columns:
if((len(data[col].unique())) == (data[col].size)):
unique_col.append(col)
return unique_col
@staticmethod
def datestring_to_dd_mm_yy(datestring):
''' Return a dictionary of year, month,day, hour, minute and second
Parameters:
-----------
datestring: str
Enter the datetime string
Usage:
------
>> p = preprocess()
>> p.datestring_to_dd_mm_yy("march/1/1980")
>> {'second': '00', 'hour': '00', 'year': '1980', 'day': '01',
'minute': '00', 'month': '03'}
'''
date, time = str(parser.parse(datestring)).split(' ')
date = date.split('-')
time = time.split(':')
return dict(year=date[0], month=date[1], day=date[2],\
hour=time[0], minute=time[1], second=time[2])
def get_version(self):
''' Return a version number'''
return self.version
@staticmethod
def is_categorical(dataframe):
''' comment '''
if dataframe.dtypes == 'object':
return True
else:
return False
@staticmethod
def count_missing(data):
''' Return the count of missing values
Paratmers:
----------
data: pandas.core.series
given a column in pandas dataframe
Usage:
-------
>>> p = Process()
>>> p.count_missing(df['col_name'])
>>> 0
'''
return data.isnull().sum()
@staticmethod
def dict_query_string(query_dict):
''' Return a string which is the query formed using the given dictionary
as parameter
Parameters
----------
query_dict: Dict
Dictionary of keys and values
Usage
-----
# Input query string
query = {'name': 'Sam', 'age': 20 }
p = Process()
p.dict_query_string(query)
>> name=Same&age=20
'''
return urlencode(query_dict)
@staticmethod
def describe(dataframe, col_name):
''' Return the basic description of an column in a pandas dataframe
check if the column is an interger or float type
Parameters:
-----------
dataframe: pandas dataframe
col_name: str
any one column name in the dataframe passed
Usage:
------
>> p = Process()
>> p.describe(dataframe, 'Amount')
>> {'min': 0, 'max': 100, 'mean': 50, 'median': 49 }
'''
try:
return dict(min=dataframe[col_name].min(), max=dataframe[col_name].max(),\
mean=dataframe[col_name].mean(), median=dataframe[col_name].median())
except Exception as e:
logging.exception(e)
@staticmethod
def encode(data):
''' Return a clean dataframe which is initially converted into utf8 format
and all categorical variables are converted into numeric labels also each
label encoding classes as saved into a dictionary now a tuple of first
element is dataframe and second is the hash_map
Parameters:
------------
data : pandas dataframe
Usage:
------
>> p = Process()
>> p.encode(pd.DataFrame())
'''
# Remove all the ascii unicodes
import sys
reload(sys)
sys.setdefaultencoding('utf8')
# Instantiate the LabelEncoder instance
label = LabelEncoder()
# One shot hot encoding if its categorical variable
hash_map = {}
date_columns = []
for col in data.columns:
if data[col].dtypes == 'object':
hash_map[col] = dict(zip(label.fit_transform(data[col].unique()),\
data[col].unique()))
label.fit(data[col].values)
data[col] = label.transform(data[col])
return (data, hash_map)
|
|
from __future__ import division, unicode_literals
from ml.similarity import pre_process
from textblob import TextBlob as tb
import nltk, re, pprint
import nltk.chunk
from nltk.corpus import twitter_samples
from nltk import tag
from nltk.corpus import wordnet
from nltk.corpus.reader.wordnet import POS_LIST
import math
def tf(word, blob):
return blob.words.count(word) / len(blob.words)
def n_containing(word, bloblist):
return sum(1 for blob in bloblist if word in blob.words)
def idf(word, bloblist):
y = (1 + n_containing(word, bloblist))
x = math.log(len(bloblist) / y)
return x
def tfidf(word, blob, bloblist):
_tf = tf(word, blob)
_idf = idf(word, bloblist)
return _tf * _idf
def tag_override(sent):
force_tags = {'crashes': 'VBZ'}
tagged_words = nltk.pos_tag(sent)
new_tagged_words = [(word, force_tags.get(word, tag)) for word, tag in tagged_words]
return new_tagged_words
def get_pos(tag):
if tag:
s = str(tag).lower()[0]
if s in POS_LIST:
return s
return None
def transform(tree):
return (tree.leaves(), tree.label())
def disambiguity(word, tag):
return word
# words = wordnet.synsets(word, get_pos(tag))
# if words:
# value = words[0]
# return value._lemma_names[0]
# else:
# return word
class TestNLTK(object):
def setup(self):
pass
def teardown(self):
pass
def test_named_entity_extraction(self):
data = [
"""
If you are navigating through the list of open tabs inside the All+Tabs panel and you wanna filter by a term you have to select the search text field first.
It would be nice if any entered character is automatically routed to the search field and the filter gets applied.
""",
"""
In maximized mode there is something like 3 pixels padding on the right side of "All tabs" panel.
It doesn't exist on the left side of panel and in not maximized mode.
""",
"""
When you have the All+Tabs panel open it would be great if you can press Cmd/Ctrl+F to focus the search text field. Right now the panel gets hidden and the Find toolbar is shown without focus.
IMO using the command inside the All+Tabs panel would make more sense.
""",
"""
Steps to reproduce:
Nothing... had multiple windows and tiles open... for about 4 hours
Actual results:
Crashed without warning
""",
"""
Firefox crashes at leat 6 times a day. Installed latest version but still crashing. Goes very slow before it crashes.
""",
"""
Steps to reproduce:
W have installed Firefox 18 (as we did with all previous version) on Solaris 10 SPAC 64b
Actual results:
When we tried to start it form a console, it crashed with a message: Segmentation fault.
And it produced a core dump
Expected results:
Firefox should have open correctly
"""
]
analyses = [
""""All Tabs" panel code removed in bug 670684""",
""""All Tabs" panel code removed in bug 670684""",
""""All Tabs" panel code removed in bug 670684""",
"""
Please provide the crash ID from about:crashes.
Are you able to reproduce it consistently?
""",
"""Please post the related Report IDs from about:crashes.""",
"""
Do you see crash report IDs in about:crashes?
""",
"""
I agree with the utility of this feature it's just going to take some serious work. This is a pretty hacky area of the code that is screaming for a rewrite.
I'll slate this for 1.1
""",
"""
This is caused by bug 479393:
We are expecting that CERT_VerifyCertificateNow will return SECSuccess if at least one of the given usages is valid. However, it actually returns SECFailure unless all of the given usages are valid. I remember that Kai's original patch was correctly ignoring the return value of CERT_VerifyCertificateNow, but he "corrected" it when I told him we should check the return value. My bad. :(
The fix is simple: restore the error detection logic to the way it was done before:
"""
]
processed_data = [pre_process(text) for text in data]
example = processed_data[0]
chunked = nltk.ne_chunk(nltk.pos_tag(nltk.word_tokenize(example)), binary=True)
# example = "WASHINGTON -- In the wake of a string of abuses by New York police officers in the 1990s, Loretta E. Lynch, the top federal prosecutor in Brooklyn, spoke forcefully about the pain of a broken trust that African-Americans felt and said the responsibility for repairing generations of miscommunication and mistrust fell to law enforcement."
# print(chunked)
grammar = r"""
NP: {<DT|PP\$>?<JJ>*<NN>} # chunk determiner/possessive, adjectives and noun
{<NNP>+} # chunk sequences of proper nouns
VP: {<VB|VBD|VBG|VBN|VBP|VBZ>}
"""
cp = nltk.RegexpParser(grammar)
sentences = nltk.sent_tokenize(example)
sentences = [nltk.word_tokenize(sent) for sent in sentences]
sentences = [nltk.pos_tag(sent) for sent in sentences]
# print cp.parse(sentences[0])
# print '\nExample 1'
# tree = cp.parse(sentences[0])
# for subtree in tree.subtrees():
# if subtree.label() == 'NP': print(subtree)
#
# print '\nExample 2'
# example = processed_data[1]
# sentences = nltk.sent_tokenize(example)
# sentences = [nltk.word_tokenize(sent) for sent in sentences]
# sentences = [nltk.pos_tag(sent) for sent in sentences]
# tree = cp.parse(sentences[0])
# for subtree in tree.subtrees():
# if subtree.label() == 'NP': print(subtree)
#
# print '\nExample 3'
# example = processed_data[2]
# sentences = nltk.sent_tokenize(example)
# sentences = [nltk.word_tokenize(sent) for sent in sentences]
# sentences = [nltk.pos_tag(sent) for sent in sentences]
# tree = cp.parse(sentences[0])
# for subtree in tree.subtrees():
# if subtree.label() == 'NP': print(subtree)
# print '\nExample 4'
# stack4 = []
# example = processed_data[3]
# sentences = nltk.sent_tokenize(example)
# sentences = [nltk.word_tokenize(sent) for sent in sentences]
# sentences = [tag_override(sent) for sent in sentences]
#
# for sentence in sentences:
# tree = cp.parse(sentence)
# print tree
# print '\n----\n'
# for subtree in tree.subtrees():
# if subtree.label() == 'NP':
# stack4.append(transform(subtree))
# if subtree.label() == 'VP':
# stack4.append(transform(subtree))
print '\nExample 5'
stack5 = []
example = processed_data[4]
sentences = nltk.sent_tokenize(example)
sentences = [nltk.word_tokenize(sent) for sent in sentences]
# print sentences
sentences = [tag_override(sent) for sent in sentences]
# TODO train my pos tagger on a bugzilla corpora ??
for sentence in sentences:
tree = cp.parse(sentence)
print tree
print '\n----\n'
for subtree in tree.subtrees():
if subtree.label() == 'NP':
stack5.append(transform(subtree))
# TODO: for now, let us focus on named entities
# if subtree.label() == 'VP':
# stack5.append(transform(subtree))
noum_verb_data = []
for current_problem in data:
sentences = nltk.sent_tokenize(example)
sentences = [nltk.word_tokenize(sent) for sent in sentences]
sentences = [tag_override(sent) for sent in sentences]
filtered_terms = []
for sentence in sentences:
tree = cp.parse(sentence)
for subtree in tree.subtrees():
if subtree.label() == 'NP':
filtered_terms.append(transform(subtree))
if subtree.label() == 'VP':
filtered_terms.append(transform(subtree))
final_terms = []
for tree, tag in filtered_terms:
for (word, w_tag) in tree:
if str(w_tag).startswith('NNN') or str(w_tag).startswith('VB'):
final_terms.append(final_terms)
noum_verb_data.append(' '.join([str(word) for word in final_terms]))
print '\nExample 6'
stack6 = []
example = processed_data[5]
sentences = nltk.sent_tokenize(example)
sentences = [nltk.word_tokenize(sent) for sent in sentences]
sentences = [tag_override(sent) for sent in sentences]
for sentence in sentences:
tree = cp.parse(sentence)
print tree
print '\n----\n'
for subtree in tree.subtrees():
if subtree.label() == 'NP':
print "appending {}".format(transform(subtree))
stack6.append(transform(subtree))
# TODO: for now, let us focus on named entities
# if subtree.label() == 'VP':
# print "appending {}".format(transform(subtree))
# stack6.append(transform(subtree))
print '\nAnalyses'
analysis = []
example = analyses[5]
sentences = nltk.sent_tokenize(example)
sentences = [nltk.word_tokenize(sent) for sent in sentences]
# print sentences
sentences = [tag_override(sent) for sent in sentences]
# TODO train my pos tagger on a bugzilla corpora ??
for sentence in sentences:
tree = cp.parse(sentence)
print tree
print '\n----\n'
for subtree in tree.subtrees():
if subtree.label() == 'NP':
analysis.append(transform(subtree))
# print stack5
print stack5
print
print stack6
print
print analysis
filtered_stack5 = []
filtered_stack6 = []
filtered_analysis = []
for tree, tag in stack5:
filtered_stack5 += tree
for tree, tag in stack6:
filtered_stack6 += tree
for tree, tag in analysis:
filtered_analysis += tree
print
print '\n\nNamed entities and verbs :: analyses'
print ' '.join([disambiguity(word, tag) for (word, tag) in filtered_analysis])
print
print 'Named entities and verbs :: filtered_stack5'
print ' '.join([disambiguity(word, tag) for (word, tag) in filtered_stack5])
print
print '\n\nNamed entities and verbs :: filtered_stack6'
print ' '.join([disambiguity(word, tag) for (word, tag) in filtered_stack6])
# bloblist = [
# tb(' '.join([wordnet.synsets(word, get_pos(tag)) for (word, tag) in filtered_stack5])),
# tb(' '.join([wordnet.synsets(word, get_pos(tag)) for (word, tag) in filtered_stack6]))
# ]
#
# for i, blob in enumerate(bloblist):
# print("Top words in document {}".format(i + 1))
# scores = {word: tfidf(word, blob, bloblist) for word in blob.words}
# sorted_words = sorted(scores.items(), key=lambda x: x[1], reverse=True)
# for word, score in sorted_words[:7]:
# print("\tWord: {}, TF-IDF: {}".format(word, round(score, 5)))
# print '----'
# similar = []
# for s1 in stack5:
# n1 = s1[0][0][0]
# t1 = s1[1]
# if t1 == 'NP' or t1 == 'VP':
# for s2 in stack6:
# n2 = s2[0][0][0]
# t2 = s2[1]
# if t2 == 'NP' or t2 == 'VP':
# print "({}, {}) ({}, {})".format(n1, t1, n2, t2)
# wordFromList1 = wordnet.synsets(n1, get_pos(t1))
# wordFromList2 = wordnet.synsets(n2, get_pos(t2))
# print len(wordFromList1)
# print len(wordFromList2)
# if wordFromList1 and wordFromList2: # Thanks to @alexis' note
# s = wordFromList1[0].wup_similarity(wordFromList2[0])
# similar.append({'similarity': s, 'w1': n1, 'w2': n2})
#
#
#
#
# for s in similar:
# if s['similarity'] > 0.5:
# print s
# prev = None
# continuous_chunk = []
#
# current_chunk = []
#
# for i in chunked:
# if type(i) == nltk.tree.Tree:
# current_chunk.append(" ".join([token for token, pos in i.leaves()]))
# elif current_chunk:
# named_entity = " ".join(current_chunk)
#
# if named_entity not in continuous_chunk:
# continuous_chunk.append(named_entity)
# current_chunk = []
# else:
# continue
#
# print continuous_chunk
# sentences = nltk.sent_tokenize(example)
# sentences = [nltk.word_tokenize(sent) for sent in sentences]
# sentences = [nltk.pos_tag(sent) for sent in sentences]
#
# print(nltk.ne_chunk(sentences, binary=True))
|
|
import importlib
from django import forms
from django.contrib.auth.models import AnonymousUser, Permission, User
from django.http import HttpRequest, HttpResponse, HttpResponseRedirect
from django.template import Context, Template
from django.views.generic.base import View
from djblets.features.testing import override_feature_check
from djblets.testing.decorators import add_fixtures
from reviewboard.accounts.models import LocalSiteProfile
from reviewboard.oauth.features import oauth2_service_feature
from reviewboard.oauth.models import Application
from reviewboard.reviews.models import DefaultReviewer, Group
from reviewboard.site.context_processors import AllPermsWrapper
from reviewboard.site.middleware import LocalSiteMiddleware
from reviewboard.site.mixins import (CheckLocalSiteAccessViewMixin,
LocalSiteAwareModelFormMixin)
from reviewboard.site.models import LocalSite
from reviewboard.site.urlresolvers import local_site_reverse
from reviewboard.testing.testcase import TestCase
class BasicTests(TestCase):
"""Tests basic LocalSite functionality"""
fixtures = ['test_users', 'test_site']
def test_access(self):
"""Test LocalSite.is_accessible_by"""
doc = User.objects.get(username="doc")
dopey = User.objects.get(username="dopey")
site = LocalSite.objects.get(name="local-site-1")
self.assertTrue(site.is_accessible_by(doc))
self.assertFalse(site.is_accessible_by(dopey))
def test_access_with_public(self):
"""Test LocalSite.is_accessible_by with public LocalSites"""
doc = User.objects.get(username="doc")
dopey = User.objects.get(username="dopey")
site = LocalSite.objects.get(name="local-site-1")
site.public = True
self.assertTrue(site.is_accessible_by(doc))
self.assertTrue(site.is_accessible_by(dopey))
def test_local_site_reverse_with_no_local_site(self):
"""Testing local_site_reverse with no local site"""
request = HttpRequest()
self.assertEqual(local_site_reverse('dashboard'),
'/dashboard/')
self.assertEqual(local_site_reverse('dashboard', request=request),
'/dashboard/')
self.assertEqual(local_site_reverse('user', args=['sample-user']),
'/users/sample-user/')
self.assertEqual(
local_site_reverse('user', kwargs={'username': 'sample-user'}),
'/users/sample-user/')
def test_local_site_reverse_with_local_site(self):
"""Testing local_site_reverse with a local site"""
request = HttpRequest()
request.GET['local_site_name'] = 'test'
self.assertEqual(local_site_reverse('dashboard', request=request),
'/dashboard/')
self.assertEqual(local_site_reverse('user', args=['sample-user'],
request=request),
'/users/sample-user/')
self.assertEqual(
local_site_reverse('user', kwargs={'username': 'sample-user'},
request=request),
'/users/sample-user/')
class LocalSiteMiddlewareTests(TestCase):
"""Unit tests for reviewboard.site.middleware.LocalSiteMiddleware."""
def setUp(self):
super(LocalSiteMiddlewareTests, self).setUp()
self.middleware = LocalSiteMiddleware(lambda: HttpResponse(''))
def test_request_local_site_empty(self):
"""Testing LocalSiteMiddleware's request.local_site with no LocalSite
"""
request = HttpRequest()
self.middleware.process_view(request=request, view_func=None,
view_args=None, view_kwargs={})
self.assertTrue(hasattr(request, '_local_site_name'))
self.assertTrue(hasattr(request, 'local_site'))
self.assertIsNone(request._local_site_name)
self.assertIsNone(request.local_site)
def test_request_local_site_not_empty(self):
"""Testing LocalSiteMiddleware's request.local_site with a LocalSite"""
local_site = LocalSite.objects.create(name='test-site')
request = HttpRequest()
self.middleware.process_view(
request=request,
view_func=None,
view_args=None,
view_kwargs={
'local_site_name': local_site.name,
})
self.assertTrue(hasattr(request, '_local_site_name'))
self.assertTrue(hasattr(request, 'local_site'))
self.assertEqual(request._local_site_name, 'test-site')
self.assertEqual(request.local_site, local_site)
class PermissionWrapperTests(TestCase):
"""Testing the LocalSite-aware permissions wrapper."""
def setUp(self):
super(PermissionWrapperTests, self).setUp()
self.user = User.objects.get(username='doc')
self.assertFalse(self.user.is_superuser)
@add_fixtures(['test_users', 'test_site'])
def test_lookup_global_permission(self):
"""Testing AllPermsWrapper with global permission lookup"""
self.user.user_permissions.add(
Permission.objects.get(codename='delete_reviewrequest'))
perms = AllPermsWrapper(self.user, self.local_site_name)
self.assertIn('reviews.delete_reviewrequest', perms)
self.assertNotIn('reviews.fake_permission', perms)
@add_fixtures(['test_users', 'test_site'])
def test_lookup_site_permission(self):
"""Testing AllPermsWrapper with site permission lookup"""
local_site = LocalSite.objects.get(name=self.local_site_name)
local_site_profile = self.user.get_site_profile(local_site)
local_site_profile.permissions['reviews.can_change_status'] = True
local_site_profile.save(update_fields=('permissions',))
perms = AllPermsWrapper(self.user, self.local_site_name)
self.assertIn('reviews.can_change_status', perms)
self.assertNotIn('reviews.fake_permission', perms)
class AdminPermissionTests(TestCase):
fixtures = ['test_users', 'test_site']
def setUp(self):
super(AdminPermissionTests, self).setUp()
self.user = User.objects.get(username='doc')
self.assertFalse(self.user.is_superuser)
self.local_site = LocalSite.objects.get(name=self.local_site_name)
self.local_site.admins.add(self.user)
def test_assigned_permissions(self):
"""Testing LocalSite assigned admin permissions"""
self.assertTrue(self.user.has_perm(
'hostingsvcs.change_hostingserviceaccount', self.local_site))
self.assertTrue(self.user.has_perm(
'hostingsvcs.create_hostingserviceaccount', self.local_site))
self.assertTrue(self.user.has_perm(
'reviews.can_change_status', self.local_site))
self.assertTrue(self.user.has_perm(
'reviews.can_edit_reviewrequest', self.local_site))
self.assertTrue(self.user.has_perm(
'reviews.can_submit_as_another_user', self.local_site))
self.assertTrue(self.user.has_perm(
'reviews.change_default_reviewer', self.local_site))
self.assertTrue(self.user.has_perm(
'reviews.add_group', self.local_site))
self.assertTrue(self.user.has_perm(
'reviews.change_group', self.local_site))
self.assertTrue(self.user.has_perm(
'reviews.delete_file', self.local_site))
self.assertTrue(self.user.has_perm(
'reviews.delete_screenshot', self.local_site))
self.assertTrue(self.user.has_perm(
'scmtools.add_repository', self.local_site))
self.assertTrue(self.user.has_perm(
'scmtools.change_repository', self.local_site))
def test_invalid_permissions(self):
"""Testing LocalSite invalid admin permissions"""
self.assertFalse(self.user.has_perm(
'reviews.delete_reviewrequest', self.local_site))
self.assertFalse(self.user.has_perm(
'dummy.permission', self.local_site))
class TemplateTagTests(TestCase):
def test_local_site_url_with_no_local_site(self):
"""Testing localsite's {% url %} with no local site"""
context = Context({})
t = Template('{% url "dashboard" %}')
self.assertEqual(t.render(context), '/dashboard/')
t = Template('{% url "user" "sample-user" %}')
self.assertEqual(t.render(context), '/users/sample-user/')
def test_local_site_url_with_local_site(self):
"""Testing localsite's {% url %} with local site"""
# Make sure that {% url %} is registered as a built-in tag.
importlib.import_module('reviewboard.site.templatetags')
context = Context({
'local_site_name': 'test',
})
t = Template('{% url "dashboard" %}')
self.assertEqual(t.render(context), '/s/test/dashboard/')
t = Template('{% url "user" "sample-user" %}')
self.assertEqual(t.render(context), '/s/test/users/sample-user/')
class CheckLocalSiteAccessViewMixinTests(TestCase):
"""Unit tests for CheckLocalSiteAccessViewMixin."""
@add_fixtures(['test_site', 'test_users'])
def test_dispatch_with_local_site_and_allowed(self):
"""Testing CheckLocalSiteAccessViewMixin.dispatch with LocalSite and
access allowed
"""
class MyView(CheckLocalSiteAccessViewMixin, View):
def get(view, *args, **kwargs):
self.assertIsNotNone(view.local_site)
self.assertEqual(view.local_site.name, 'local-site-1')
return HttpResponse('success')
local_site = self.get_local_site(self.local_site_name)
request = self.create_http_request(user=local_site.users.all()[0],
local_site=local_site)
view = MyView.as_view()
response = view(request, local_site_name=local_site.name)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, b'success')
@add_fixtures(['test_site', 'test_users'])
def test_dispatch_with_local_site_and_not_allowed(self):
"""Testing CheckLocalSiteAccessViewMixin.dispatch with LocalSite and
access not allowed
"""
class MyView(CheckLocalSiteAccessViewMixin, View):
def get(view, *args, **kwargs):
self.assertIsNotNone(view.local_site)
self.assertEqual(view.local_site.name, 'local-site-1')
return HttpResponse('success')
view = MyView.as_view()
local_site = self.get_local_site(self.local_site_name)
request = self.create_http_request(
user=User.objects.create_user(username='test123',
email='[email protected]'),
local_site=local_site,
view=view)
response = view(request, local_site_name=local_site.name)
self.assertEqual(response.status_code, 403)
@add_fixtures(['test_site'])
def test_dispatch_with_local_site_and_anonymous(self):
"""Testing CheckLocalSiteAccessViewMixin.dispatch with LocalSite and
anonymous user
"""
class MyView(CheckLocalSiteAccessViewMixin, View):
def get(view, *args, **kwargs):
self.assertIsNotNone(view.local_site)
self.assertEqual(view.local_site.name, 'local-site-1')
return HttpResponse('success')
view = MyView.as_view()
local_site = self.get_local_site(self.local_site_name)
request = self.create_http_request(local_site=local_site,
view=view)
response = view(request, local_site_name=local_site.name)
self.assertIsInstance(response, HttpResponseRedirect)
@add_fixtures(['test_site', 'test_users'])
def test_dispatch_with_no_local_site(self):
"""Testing CheckLocalSiteAccessViewMixin.dispatch with no LocalSite"""
class MyView(CheckLocalSiteAccessViewMixin, View):
def get(view, *args, **kwargs):
self.assertIsNone(view.local_site)
return HttpResponse('success')
view = MyView.as_view()
request = self.create_http_request(
user=User.objects.get(username='doc'),
view=view)
response = view(request)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, b'success')
class OAuth2ApplicationTests(TestCase):
"""Testing Applicications assigned to a Local Site."""
fixtures = ['test_users', 'test_site']
def test_disable_reassign_to_admin(self):
"""Testing an Application is disabled and re-assigned to a Local Site
admin when its owner is removed from a Local Site
"""
with override_feature_check(oauth2_service_feature.feature_id, True):
local_site = LocalSite.objects.get(pk=1)
user = User.objects.get(username='doc')
admin = User.objects.get(username='admin')
application = self.create_oauth_application(user=user,
local_site=local_site)
local_site.users.remove(user)
application = Application.objects.get(pk=application.pk)
self.assertTrue(application.is_disabled_for_security)
self.assertEqual(application.original_user_id, user.pk)
self.assertEqual(application.user_id, admin.pk)
self.assertFalse(application.enabled)
class LocalSiteAwareModelFormMixinTests(TestCase):
"""Unit tests for LocalSiteAwareModelFormMixin."""
class MyForm(LocalSiteAwareModelFormMixin, forms.ModelForm):
users = forms.ModelMultipleChoiceField(
queryset=User.objects.filter(is_active=True))
inactive_user = forms.ModelChoiceField(
queryset=User.objects.filter(is_active=False))
default_reviewer = forms.ModelChoiceField(
queryset=DefaultReviewer.objects.all())
class Meta:
model = Group
fields = '__all__'
def setUp(self):
super(LocalSiteAwareModelFormMixinTests, self).setUp()
self.global_user = User.objects.create(username='global-user')
self.site_user = User.objects.create(username='site-user')
self.inactive_global_user = User.objects.create(
username='inactive-global-user',
is_active=False)
self.inactive_site_user = User.objects.create(
username='inactive-site-user',
is_active=False)
self.local_site = LocalSite.objects.create(name='site1')
self.local_site.users.add(self.site_user, self.inactive_site_user)
self.global_default_reviewer = DefaultReviewer.objects.create(
name='global-default-reviewer',
file_regex='.')
self.site_default_reviewer = DefaultReviewer.objects.create(
name='site-default-reviewer',
file_regex='.',
local_site=self.local_site)
def test_without_localsite(self):
"""Testing LocalSiteAwareModelFormMixin without a LocalSite"""
# Make sure the initial state and querysets are what we expect on init.
form = self.MyForm()
self.assertIsNone(form.limited_to_local_site)
self.assertIn('local_site', form.fields)
self.assertEqual(list(form.fields['users'].queryset),
[self.global_user, self.site_user])
self.assertEqual(list(form.fields['inactive_user'].queryset),
[self.inactive_global_user, self.inactive_site_user])
self.assertEqual(list(form.fields['default_reviewer'].queryset),
[self.global_default_reviewer,
self.site_default_reviewer])
# Now test what happens when it's been fed data and validated.
form = self.MyForm(data={
'name': 'test-group',
'display_name': 'Test Group',
'users': [self.global_user.pk],
'inactive_user': self.inactive_global_user.pk,
'default_reviewer': self.global_default_reviewer.pk,
})
self.assertIsNone(form.limited_to_local_site)
self.assertIn('local_site', form.fields)
self.assertEqual(list(form.fields['users'].queryset),
[self.global_user, self.site_user])
self.assertEqual(list(form.fields['inactive_user'].queryset),
[self.inactive_global_user, self.inactive_site_user])
self.assertEqual(list(form.fields['default_reviewer'].queryset),
[self.global_default_reviewer,
self.site_default_reviewer])
form.is_valid()
self.assertTrue(form.is_valid())
# Make sure any overridden querysets have been restored, so users can
# still change entries.
self.assertEqual(list(form.fields['users'].queryset),
[self.global_user, self.site_user])
self.assertEqual(list(form.fields['inactive_user'].queryset),
[self.inactive_global_user, self.inactive_site_user])
self.assertEqual(list(form.fields['default_reviewer'].queryset),
[self.global_default_reviewer,
self.site_default_reviewer])
new_group = form.save()
self.assertEqual(list(new_group.users.all()), [self.global_user])
self.assertIsNone(new_group.local_site_id)
def test_without_localsite_and_edit_instance(self):
"""Testing LocalSiteAwareModelFormMixin without a LocalSite and
editing an instance
"""
group = self.create_review_group()
form = self.MyForm(
data={
'name': 'new-group',
'display_name': 'New Group',
'users': [self.global_user.pk],
'inactive_user': self.inactive_global_user.pk,
'default_reviewer': self.global_default_reviewer.pk,
},
instance=group)
self.assertTrue(form.is_valid())
new_group = form.save()
self.assertEqual(group.pk, new_group.pk)
self.assertIsNone(new_group.local_site_id)
def test_without_localsite_and_with_compatible_rel_values(self):
"""Testing LocalSiteAwareModelFormMixin without a LocalSite and
compatible relation model values
"""
# Note that Users are compatible even if on a Local Site, so long
# as the form's model instance is not on a Local Site. However,
# the DefaultReviewer is not compatible.
form = self.MyForm(data={
'name': 'new-group',
'display_name': 'New Group',
'users': [self.site_user.pk],
'inactive_user': self.inactive_site_user.pk,
'default_reviewer': self.global_default_reviewer.pk,
})
self.assertTrue(form.is_valid())
def test_without_localsite_and_with_incompatible_rel_values(self):
"""Testing LocalSiteAwareModelFormMixin without a LocalSite and
incompatible relation model values
"""
form = self.MyForm(data={
'name': 'new-group',
'display_name': 'New Group',
'users': [self.site_user.pk],
'inactive_user': self.inactive_site_user.pk,
'default_reviewer': self.site_default_reviewer.pk,
})
self.assertFalse(form.is_valid())
self.assertEqual(
form.errors,
{
'default_reviewer': [
'Select a valid choice. That choice is not one of the '
'available choices.',
],
})
def test_with_limited_localsite(self):
"""Testing LocalSiteAwareModelFormMixin limited to a LocalSite"""
form = self.MyForm(limit_to_local_site=self.local_site)
self.assertIs(form.limited_to_local_site, self.local_site)
self.assertNotIn('local_site', form.fields)
self.assertEqual(list(form.fields['users'].queryset),
[self.site_user])
self.assertEqual(list(form.fields['inactive_user'].queryset),
[self.inactive_site_user])
self.assertEqual(list(form.fields['default_reviewer'].queryset),
[self.site_default_reviewer])
def test_with_limited_localsite_and_changing_site(self):
"""Testing LocalSiteAwareModelFormMixin limited to a LocalSite and
LocalSite in form data ignored
"""
site2 = LocalSite.objects.create(name='test-site-2')
form = self.MyForm(
data={
'name': 'new-group',
'display_name': 'New Group',
'users': [self.site_user.pk],
'inactive_user': self.inactive_site_user.pk,
'default_reviewer': self.site_default_reviewer.pk,
'local_site': site2.pk,
},
limit_to_local_site=self.local_site)
self.assertIs(form.limited_to_local_site, self.local_site)
self.assertTrue(form.is_valid())
self.assertEqual(form.cleaned_data['local_site'], self.local_site)
group = form.save()
self.assertEqual(group.local_site, self.local_site)
def test_with_limited_localsite_and_compatible_instance(self):
"""Testing LocalSiteAwareModelFormMixin limited to a LocalSite and
editing compatible instance
"""
group = self.create_review_group(local_site=self.local_site)
# This should just simply not raise an exception.
self.MyForm(instance=group,
limit_to_local_site=self.local_site)
def test_with_limited_localsite_and_incompatible_instance(self):
"""Testing LocalSiteAwareModelFormMixin limited to a LocalSite and
editing incompatible instance
"""
group = self.create_review_group()
error_message = (
'The provided instance is not associated with a LocalSite '
'compatible with this form. Please contact support.'
)
# This should just simply not raise an exception.
with self.assertRaisesMessage(ValueError, error_message):
self.MyForm(instance=group,
limit_to_local_site=self.local_site)
def test_with_limited_localsite_and_incompatible_rel_values(self):
"""Testing LocalSiteAwareModelFormMixin limited to a LocalSite and
incompatible relation model values
"""
form = self.MyForm(
data={
'name': 'new-group',
'display_name': 'New Group',
'users': [self.global_user.pk],
'inactive_user': self.inactive_global_user.pk,
'default_reviewer': self.global_default_reviewer.pk,
},
limit_to_local_site=self.local_site)
self.assertFalse(form.is_valid())
self.assertEqual(
form.errors,
{
'default_reviewer': [
'Select a valid choice. That choice is not one of the '
'available choices.',
],
'inactive_user': [
'Select a valid choice. That choice is not one of the '
'available choices.',
],
'users': [
'Select a valid choice. 1 is not one of the available '
'choices.',
],
})
def test_with_localsite_in_data(self):
"""Testing LocalSiteAwareModelFormMixin with a LocalSite in form data
"""
# Make sure the initial state and querysets are what we expect on init.
form = self.MyForm()
self.assertIsNone(form.limited_to_local_site)
self.assertIn('local_site', form.fields)
self.assertEqual(list(form.fields['users'].queryset),
[self.global_user, self.site_user])
self.assertEqual(list(form.fields['inactive_user'].queryset),
[self.inactive_global_user, self.inactive_site_user])
self.assertEqual(list(form.fields['default_reviewer'].queryset),
[self.global_default_reviewer,
self.site_default_reviewer])
# Now test what happens when it's been fed data and validated.
form = self.MyForm(data={
'name': 'new-group',
'display_name': 'New Group',
'local_site': self.local_site.pk,
'users': [self.site_user.pk],
'inactive_user': self.inactive_site_user.pk,
'default_reviewer': self.site_default_reviewer.pk,
})
self.assertIsNone(form.limited_to_local_site)
self.assertTrue(form.is_valid())
self.assertIn('local_site', form.fields)
self.assertEqual(form.cleaned_data['local_site'], self.local_site)
# Make sure any overridden querysets have been restored, so users can
# still change entries.
self.assertEqual(list(form.fields['users'].queryset),
[self.global_user, self.site_user])
self.assertEqual(list(form.fields['inactive_user'].queryset),
[self.inactive_global_user, self.inactive_site_user])
self.assertEqual(list(form.fields['default_reviewer'].queryset),
[self.global_default_reviewer,
self.site_default_reviewer])
group = form.save()
self.assertEqual(group.local_site, self.local_site)
self.assertEqual(list(group.users.all()), [self.site_user])
def test_with_localsite_in_data_and_edit_instance(self):
"""Testing LocalSiteAwareModelFormMixin with a LocalSite in form data
and editing instance
"""
group = self.create_review_group()
form = self.MyForm(
data={
'name': 'new-group',
'display_name': 'New Group',
'local_site': self.local_site.pk,
'users': [self.site_user.pk],
'inactive_user': self.inactive_site_user.pk,
'default_reviewer': self.site_default_reviewer.pk,
},
instance=group)
self.assertTrue(form.is_valid())
new_group = form.save()
self.assertEqual(new_group.pk, group.pk)
self.assertEqual(new_group.local_site, self.local_site)
self.assertEqual(list(new_group.users.all()), [self.site_user])
def test_with_localsite_in_data_and_incompatible_rel_values(self):
"""Testing LocalSiteAwareModelFormMixin with a LocalSite in form data
and incompatible relation model values
"""
form = self.MyForm(data={
'name': 'new-group',
'display_name': 'New Group',
'local_site': self.local_site.pk,
'users': [self.global_user.pk],
'inactive_user': self.inactive_global_user.pk,
'default_reviewer': self.global_default_reviewer.pk,
})
self.assertFalse(form.is_valid())
self.assertEqual(
form.errors,
{
'default_reviewer': [
'Select a valid choice. That choice is not one of the '
'available choices.',
],
'inactive_user': [
'Select a valid choice. That choice is not one of the '
'available choices.',
],
'users': [
'Select a valid choice. 1 is not one of the available '
'choices.',
],
})
def test_with_localsite_in_data_with_bad_value(self):
"""Testing LocalSiteAwareModelFormMixin with a LocalSite in form data
and ID is a non-integer
"""
# This should just not crash.
form = self.MyForm(data={
'name': 'new-group',
'display_name': 'New Group',
'local_site': 'abc',
})
self.assertFalse(form.is_valid())
self.assertEqual(
form.errors['local_site'],
[
'Select a valid choice. That choice is not one of the '
'available choices.',
])
|
|
# Copyright 2013 Donald Stufft
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function
from __future__ import unicode_literals
import textwrap
import pretend
import pytest
from werkzeug.exceptions import NotFound
from warehouse.packaging.views import project_detail
def test_project_detail_missing_project():
app = pretend.stub(
db=pretend.stub(
packaging=pretend.stub(
get_project=pretend.call_recorder(lambda proj: None),
),
),
)
request = pretend.stub()
project_name = "test-project"
with pytest.raises(NotFound):
project_detail(app, request, project_name)
assert app.db.packaging.get_project.calls == [
pretend.call("test-project"),
]
def test_project_detail_no_versions():
app = pretend.stub(
db=pretend.stub(
packaging=pretend.stub(
get_project=pretend.call_recorder(
lambda proj: "test-project",
),
get_releases=pretend.call_recorder(lambda proj: []),
),
),
)
request = pretend.stub()
project_name = "test-project"
with pytest.raises(NotFound):
project_detail(app, request, project_name)
assert app.db.packaging.get_project.calls == [
pretend.call("test-project"),
]
assert app.db.packaging.get_releases.calls == [
pretend.call("test-project"),
]
def test_project_detail_redirects():
app = pretend.stub(
config=pretend.stub(
cache=pretend.stub(
browser=False,
varnish=False,
),
),
db=pretend.stub(
packaging=pretend.stub(
get_project=pretend.call_recorder(
lambda proj: "test-project",
),
get_releases=pretend.call_recorder(
lambda proj: [{"version": "1.0"}],
),
),
),
)
request = pretend.stub(
url_adapter=pretend.stub(
build=pretend.call_recorder(
lambda *a, **kw: "/projects/test-project/",
),
),
)
project_name = "test-Project"
normalized = "test-project"
resp = project_detail(app, request, project_name=project_name)
assert resp.status_code == 301
assert resp.headers["Location"] == "/projects/test-project/"
assert resp.headers["Surrogate-Key"] == \
"project-detail project-detail~{}".format(normalized)
assert app.db.packaging.get_project.calls == [
pretend.call("test-Project"),
]
assert app.db.packaging.get_releases.calls == [
pretend.call("test-project"),
]
assert request.url_adapter.build.calls == [
pretend.call(
"warehouse.packaging.views.project_detail",
{"project_name": "test-project", "version": None},
force_external=False,
),
]
def test_project_detail_invalid_version():
app = pretend.stub(
config=pretend.stub(
cache=pretend.stub(
browser=False,
varnish=False,
),
),
db=pretend.stub(
packaging=pretend.stub(
get_project=pretend.call_recorder(
lambda proj: "test-project",
),
get_releases=pretend.call_recorder(
lambda proj: [{"version": "1.0"}],
),
),
),
)
request = pretend.stub()
project_name = "test-project"
with pytest.raises(NotFound):
project_detail(app, request, project_name, "2.0")
assert app.db.packaging.get_project.calls == [
pretend.call("test-project"),
]
assert app.db.packaging.get_releases.calls == [
pretend.call("test-project"),
]
@pytest.mark.parametrize(("version", "description", "camo"), [
(
None,
textwrap.dedent("""
Test Project
============
This is a test project
"""),
None,
),
(
"1.0",
textwrap.dedent("""
Test Project
============
This is a test project
"""),
None,
),
(None, ".. code-fail::\n wat", None),
("1.0", ".. code-fail::\n wat", None),
(None, None, None),
("1.0", None, None),
(
None,
textwrap.dedent("""
Test Project
============
This is a test project
"""),
pretend.stub(url="https://camo.example.com/", key="secret key"),
),
(
"1.0",
textwrap.dedent("""
Test Project
============
This is a test project
"""),
pretend.stub(url="https://camo.example.com/", key="secret key"),
),
(
None,
".. code-fail::\n wat",
pretend.stub(url="https://camo.example.com/", key="secret key"),
),
(
"1.0",
".. code-fail::\n wat",
pretend.stub(url="https://camo.example.com/", key="secret key"),
),
(
None,
None,
pretend.stub(url="https://camo.example.com/", key="secret key"),
),
(
"1.0",
None,
pretend.stub(url="https://camo.example.com/", key="secret key"),
),
])
def test_project_detail_valid(version, description, camo):
release = {
"description": description,
}
template = pretend.stub(
render=pretend.call_recorder(lambda **ctx: ""),
)
app = pretend.stub(
config=pretend.stub(
cache=pretend.stub(
browser=False,
varnish=False,
),
camo=camo,
),
db=pretend.stub(
packaging=pretend.stub(
get_project=pretend.call_recorder(
lambda proj: "test-project",
),
get_releases=pretend.call_recorder(
lambda proj: [{"version": "2.0"}, {"version": "1.0"}],
),
get_release=pretend.call_recorder(
lambda proj, version: release,
),
get_download_counts=pretend.call_recorder(
lambda proj: {
"last_day": 1,
"last_week": 7,
"last_month": 30,
},
),
get_downloads=pretend.call_recorder(lambda proj, ver: []),
get_classifiers=pretend.call_recorder(lambda proj, ver: []),
get_documentation_url=pretend.call_recorder(
lambda proj: None,
),
get_bugtrack_url=pretend.call_recorder(lambda proj: None),
get_users_for_project=pretend.call_recorder(lambda proj: []),
),
),
templates=pretend.stub(
get_template=pretend.call_recorder(lambda t: template),
),
)
request = pretend.stub()
project_name = "test-project"
normalized = "test-project"
resp = project_detail(
app,
request,
project_name=project_name,
version=version,
)
assert resp.status_code == 200
assert resp.headers["Surrogate-Key"] == \
"project-detail project-detail~{}".format(normalized)
assert app.db.packaging.get_project.calls == [
pretend.call("test-project"),
]
assert app.db.packaging.get_releases.calls == [
pretend.call("test-project"),
]
assert app.db.packaging.get_users_for_project.calls == [
pretend.call("test-project"),
]
|
|
import enum
import json
import os
import re
import typing as t
from collections import abc
from collections import deque
from random import choice
from random import randrange
from threading import Lock
from types import CodeType
from urllib.parse import quote_from_bytes
import markupsafe
if t.TYPE_CHECKING:
import typing_extensions as te
F = t.TypeVar("F", bound=t.Callable[..., t.Any])
# special singleton representing missing values for the runtime
missing: t.Any = type("MissingType", (), {"__repr__": lambda x: "missing"})()
internal_code: t.MutableSet[CodeType] = set()
concat = "".join
def pass_context(f: F) -> F:
"""Pass the :class:`~jinja2.runtime.Context` as the first argument
to the decorated function when called while rendering a template.
Can be used on functions, filters, and tests.
If only ``Context.eval_context`` is needed, use
:func:`pass_eval_context`. If only ``Context.environment`` is
needed, use :func:`pass_environment`.
.. versionadded:: 3.0.0
Replaces ``contextfunction`` and ``contextfilter``.
"""
f.jinja_pass_arg = _PassArg.context # type: ignore
return f
def pass_eval_context(f: F) -> F:
"""Pass the :class:`~jinja2.nodes.EvalContext` as the first argument
to the decorated function when called while rendering a template.
See :ref:`eval-context`.
Can be used on functions, filters, and tests.
If only ``EvalContext.environment`` is needed, use
:func:`pass_environment`.
.. versionadded:: 3.0.0
Replaces ``evalcontextfunction`` and ``evalcontextfilter``.
"""
f.jinja_pass_arg = _PassArg.eval_context # type: ignore
return f
def pass_environment(f: F) -> F:
"""Pass the :class:`~jinja2.Environment` as the first argument to
the decorated function when called while rendering a template.
Can be used on functions, filters, and tests.
.. versionadded:: 3.0.0
Replaces ``environmentfunction`` and ``environmentfilter``.
"""
f.jinja_pass_arg = _PassArg.environment # type: ignore
return f
class _PassArg(enum.Enum):
context = enum.auto()
eval_context = enum.auto()
environment = enum.auto()
@classmethod
def from_obj(cls, obj: F) -> t.Optional["_PassArg"]:
if hasattr(obj, "jinja_pass_arg"):
return obj.jinja_pass_arg # type: ignore
return None
def internalcode(f: F) -> F:
"""Marks the function as internally used"""
internal_code.add(f.__code__)
return f
def is_undefined(obj: t.Any) -> bool:
"""Check if the object passed is undefined. This does nothing more than
performing an instance check against :class:`Undefined` but looks nicer.
This can be used for custom filters or tests that want to react to
undefined variables. For example a custom default filter can look like
this::
def default(var, default=''):
if is_undefined(var):
return default
return var
"""
from .runtime import Undefined
return isinstance(obj, Undefined)
def consume(iterable: t.Iterable[t.Any]) -> None:
"""Consumes an iterable without doing anything with it."""
for _ in iterable:
pass
def clear_caches() -> None:
"""Jinja keeps internal caches for environments and lexers. These are
used so that Jinja doesn't have to recreate environments and lexers all
the time. Normally you don't have to care about that but if you are
measuring memory consumption you may want to clean the caches.
"""
from .environment import get_spontaneous_environment
from .lexer import _lexer_cache
get_spontaneous_environment.cache_clear()
_lexer_cache.clear()
def import_string(import_name: str, silent: bool = False) -> t.Any:
"""Imports an object based on a string. This is useful if you want to
use import paths as endpoints or something similar. An import path can
be specified either in dotted notation (``xml.sax.saxutils.escape``)
or with a colon as object delimiter (``xml.sax.saxutils:escape``).
If the `silent` is True the return value will be `None` if the import
fails.
:return: imported object
"""
try:
if ":" in import_name:
module, obj = import_name.split(":", 1)
elif "." in import_name:
module, _, obj = import_name.rpartition(".")
else:
return __import__(import_name)
return getattr(__import__(module, None, None, [obj]), obj)
except (ImportError, AttributeError):
if not silent:
raise
def open_if_exists(filename: str, mode: str = "rb") -> t.Optional[t.IO]:
"""Returns a file descriptor for the filename if that file exists,
otherwise ``None``.
"""
if not os.path.isfile(filename):
return None
return open(filename, mode)
def object_type_repr(obj: t.Any) -> str:
"""Returns the name of the object's type. For some recognized
singletons the name of the object is returned instead. (For
example for `None` and `Ellipsis`).
"""
if obj is None:
return "None"
elif obj is Ellipsis:
return "Ellipsis"
cls = type(obj)
if cls.__module__ == "builtins":
return f"{cls.__name__} object"
return f"{cls.__module__}.{cls.__name__} object"
def pformat(obj: t.Any) -> str:
"""Format an object using :func:`pprint.pformat`."""
from pprint import pformat # type: ignore
return pformat(obj)
_http_re = re.compile(
r"""
^
(
(https?://|www\.) # scheme or www
(([\w%-]+\.)+)? # subdomain
(
[a-z]{2,63} # basic tld
|
xn--[\w%]{2,59} # idna tld
)
|
([\w%-]{2,63}\.)+ # basic domain
(com|net|int|edu|gov|org|info|mil) # basic tld
|
(https?://) # scheme
(
(([\d]{1,3})(\.[\d]{1,3}){3}) # IPv4
|
(\[([\da-f]{0,4}:){2}([\da-f]{0,4}:?){1,6}]) # IPv6
)
)
(?::[\d]{1,5})? # port
(?:[/?#]\S*)? # path, query, and fragment
$
""",
re.IGNORECASE | re.VERBOSE,
)
_email_re = re.compile(r"^\S+@\w[\w.-]*\.\w+$")
def urlize(
text: str,
trim_url_limit: t.Optional[int] = None,
rel: t.Optional[str] = None,
target: t.Optional[str] = None,
extra_schemes: t.Optional[t.Iterable[str]] = None,
) -> str:
"""Convert URLs in text into clickable links.
This may not recognize links in some situations. Usually, a more
comprehensive formatter, such as a Markdown library, is a better
choice.
Works on ``http://``, ``https://``, ``www.``, ``mailto:``, and email
addresses. Links with trailing punctuation (periods, commas, closing
parentheses) and leading punctuation (opening parentheses) are
recognized excluding the punctuation. Email addresses that include
header fields are not recognized (for example,
``mailto:[email protected][email protected]``).
:param text: Original text containing URLs to link.
:param trim_url_limit: Shorten displayed URL values to this length.
:param target: Add the ``target`` attribute to links.
:param rel: Add the ``rel`` attribute to links.
:param extra_schemes: Recognize URLs that start with these schemes
in addition to the default behavior.
.. versionchanged:: 3.0
The ``extra_schemes`` parameter was added.
.. versionchanged:: 3.0
Generate ``https://`` links for URLs without a scheme.
.. versionchanged:: 3.0
The parsing rules were updated. Recognize email addresses with
or without the ``mailto:`` scheme. Validate IP addresses. Ignore
parentheses and brackets in more cases.
"""
if trim_url_limit is not None:
def trim_url(x: str) -> str:
if len(x) > trim_url_limit: # type: ignore
return f"{x[:trim_url_limit]}..."
return x
else:
def trim_url(x: str) -> str:
return x
words = re.split(r"(\s+)", str(markupsafe.escape(text)))
rel_attr = f' rel="{markupsafe.escape(rel)}"' if rel else ""
target_attr = f' target="{markupsafe.escape(target)}"' if target else ""
for i, word in enumerate(words):
head, middle, tail = "", word, ""
match = re.match(r"^([(<]|<)+", middle)
if match:
head = match.group()
middle = middle[match.end() :]
# Unlike lead, which is anchored to the start of the string,
# need to check that the string ends with any of the characters
# before trying to match all of them, to avoid backtracking.
if middle.endswith((")", ">", ".", ",", "\n", ">")):
match = re.search(r"([)>.,\n]|>)+$", middle)
if match:
tail = match.group()
middle = middle[: match.start()]
# Prefer balancing parentheses in URLs instead of ignoring a
# trailing character.
for start_char, end_char in ("(", ")"), ("<", ">"), ("<", ">"):
start_count = middle.count(start_char)
if start_count <= middle.count(end_char):
# Balanced, or lighter on the left
continue
# Move as many as possible from the tail to balance
for _ in range(min(start_count, tail.count(end_char))):
end_index = tail.index(end_char) + len(end_char)
# Move anything in the tail before the end char too
middle += tail[:end_index]
tail = tail[end_index:]
if _http_re.match(middle):
if middle.startswith("https://") or middle.startswith("http://"):
middle = (
f'<a href="{middle}"{rel_attr}{target_attr}>{trim_url(middle)}</a>'
)
else:
middle = (
f'<a href="https://{middle}"{rel_attr}{target_attr}>'
f"{trim_url(middle)}</a>"
)
elif middle.startswith("mailto:") and _email_re.match(middle[7:]):
middle = f'<a href="{middle}">{middle[7:]}</a>'
elif (
"@" in middle
and not middle.startswith("www.")
and ":" not in middle
and _email_re.match(middle)
):
middle = f'<a href="mailto:{middle}">{middle}</a>'
elif extra_schemes is not None:
for scheme in extra_schemes:
if middle != scheme and middle.startswith(scheme):
middle = f'<a href="{middle}"{rel_attr}{target_attr}>{middle}</a>'
words[i] = f"{head}{middle}{tail}"
return "".join(words)
def generate_lorem_ipsum(
n: int = 5, html: bool = True, min: int = 20, max: int = 100
) -> str:
"""Generate some lorem ipsum for the template."""
from .constants import LOREM_IPSUM_WORDS
words = LOREM_IPSUM_WORDS.split()
result = []
for _ in range(n):
next_capitalized = True
last_comma = last_fullstop = 0
word = None
last = None
p = []
# each paragraph contains out of 20 to 100 words.
for idx, _ in enumerate(range(randrange(min, max))):
while True:
word = choice(words)
if word != last:
last = word
break
if next_capitalized:
word = word.capitalize()
next_capitalized = False
# add commas
if idx - randrange(3, 8) > last_comma:
last_comma = idx
last_fullstop += 2
word += ","
# add end of sentences
if idx - randrange(10, 20) > last_fullstop:
last_comma = last_fullstop = idx
word += "."
next_capitalized = True
p.append(word)
# ensure that the paragraph ends with a dot.
p_str = " ".join(p)
if p_str.endswith(","):
p_str = p_str[:-1] + "."
elif not p_str.endswith("."):
p_str += "."
result.append(p_str)
if not html:
return "\n\n".join(result)
return markupsafe.Markup(
"\n".join(f"<p>{markupsafe.escape(x)}</p>" for x in result)
)
def url_quote(obj: t.Any, charset: str = "utf-8", for_qs: bool = False) -> str:
"""Quote a string for use in a URL using the given charset.
:param obj: String or bytes to quote. Other types are converted to
string then encoded to bytes using the given charset.
:param charset: Encode text to bytes using this charset.
:param for_qs: Quote "/" and use "+" for spaces.
"""
if not isinstance(obj, bytes):
if not isinstance(obj, str):
obj = str(obj)
obj = obj.encode(charset)
safe = b"" if for_qs else b"/"
rv = quote_from_bytes(obj, safe)
if for_qs:
rv = rv.replace("%20", "+")
return rv
@abc.MutableMapping.register
class LRUCache:
"""A simple LRU Cache implementation."""
# this is fast for small capacities (something below 1000) but doesn't
# scale. But as long as it's only used as storage for templates this
# won't do any harm.
def __init__(self, capacity: int) -> None:
self.capacity = capacity
self._mapping: t.Dict[t.Any, t.Any] = {}
self._queue: "te.Deque[t.Any]" = deque()
self._postinit()
def _postinit(self) -> None:
# alias all queue methods for faster lookup
self._popleft = self._queue.popleft
self._pop = self._queue.pop
self._remove = self._queue.remove
self._wlock = Lock()
self._append = self._queue.append
def __getstate__(self) -> t.Mapping[str, t.Any]:
return {
"capacity": self.capacity,
"_mapping": self._mapping,
"_queue": self._queue,
}
def __setstate__(self, d: t.Mapping[str, t.Any]) -> None:
self.__dict__.update(d)
self._postinit()
def __getnewargs__(self) -> t.Tuple:
return (self.capacity,)
def copy(self) -> "LRUCache":
"""Return a shallow copy of the instance."""
rv = self.__class__(self.capacity)
rv._mapping.update(self._mapping)
rv._queue.extend(self._queue)
return rv
def get(self, key: t.Any, default: t.Any = None) -> t.Any:
"""Return an item from the cache dict or `default`"""
try:
return self[key]
except KeyError:
return default
def setdefault(self, key: t.Any, default: t.Any = None) -> t.Any:
"""Set `default` if the key is not in the cache otherwise
leave unchanged. Return the value of this key.
"""
try:
return self[key]
except KeyError:
self[key] = default
return default
def clear(self) -> None:
"""Clear the cache."""
with self._wlock:
self._mapping.clear()
self._queue.clear()
def __contains__(self, key: t.Any) -> bool:
"""Check if a key exists in this cache."""
return key in self._mapping
def __len__(self) -> int:
"""Return the current size of the cache."""
return len(self._mapping)
def __repr__(self) -> str:
return f"<{type(self).__name__} {self._mapping!r}>"
def __getitem__(self, key: t.Any) -> t.Any:
"""Get an item from the cache. Moves the item up so that it has the
highest priority then.
Raise a `KeyError` if it does not exist.
"""
with self._wlock:
rv = self._mapping[key]
if self._queue[-1] != key:
try:
self._remove(key)
except ValueError:
# if something removed the key from the container
# when we read, ignore the ValueError that we would
# get otherwise.
pass
self._append(key)
return rv
def __setitem__(self, key: t.Any, value: t.Any) -> None:
"""Sets the value for an item. Moves the item up so that it
has the highest priority then.
"""
with self._wlock:
if key in self._mapping:
self._remove(key)
elif len(self._mapping) == self.capacity:
del self._mapping[self._popleft()]
self._append(key)
self._mapping[key] = value
def __delitem__(self, key: t.Any) -> None:
"""Remove an item from the cache dict.
Raise a `KeyError` if it does not exist.
"""
with self._wlock:
del self._mapping[key]
try:
self._remove(key)
except ValueError:
pass
def items(self) -> t.Iterable[t.Tuple[t.Any, t.Any]]:
"""Return a list of items."""
result = [(key, self._mapping[key]) for key in list(self._queue)]
result.reverse()
return result
def values(self) -> t.Iterable[t.Any]:
"""Return a list of all values."""
return [x[1] for x in self.items()]
def keys(self) -> t.Iterable[t.Any]:
"""Return a list of all keys ordered by most recent usage."""
return list(self)
def __iter__(self) -> t.Iterator[t.Any]:
return reversed(tuple(self._queue))
def __reversed__(self) -> t.Iterator[t.Any]:
"""Iterate over the keys in the cache dict, oldest items
coming first.
"""
return iter(tuple(self._queue))
__copy__ = copy
def select_autoescape(
enabled_extensions: t.Collection[str] = ("html", "htm", "xml"),
disabled_extensions: t.Collection[str] = (),
default_for_string: bool = True,
default: bool = False,
) -> t.Callable[[t.Optional[str]], bool]:
"""Intelligently sets the initial value of autoescaping based on the
filename of the template. This is the recommended way to configure
autoescaping if you do not want to write a custom function yourself.
If you want to enable it for all templates created from strings or
for all templates with `.html` and `.xml` extensions::
from jinja2 import Environment, select_autoescape
env = Environment(autoescape=select_autoescape(
enabled_extensions=('html', 'xml'),
default_for_string=True,
))
Example configuration to turn it on at all times except if the template
ends with `.txt`::
from jinja2 import Environment, select_autoescape
env = Environment(autoescape=select_autoescape(
disabled_extensions=('txt',),
default_for_string=True,
default=True,
))
The `enabled_extensions` is an iterable of all the extensions that
autoescaping should be enabled for. Likewise `disabled_extensions` is
a list of all templates it should be disabled for. If a template is
loaded from a string then the default from `default_for_string` is used.
If nothing matches then the initial value of autoescaping is set to the
value of `default`.
For security reasons this function operates case insensitive.
.. versionadded:: 2.9
"""
enabled_patterns = tuple(f".{x.lstrip('.').lower()}" for x in enabled_extensions)
disabled_patterns = tuple(f".{x.lstrip('.').lower()}" for x in disabled_extensions)
def autoescape(template_name: t.Optional[str]) -> bool:
if template_name is None:
return default_for_string
template_name = template_name.lower()
if template_name.endswith(enabled_patterns):
return True
if template_name.endswith(disabled_patterns):
return False
return default
return autoescape
def htmlsafe_json_dumps(
obj: t.Any, dumps: t.Optional[t.Callable[..., str]] = None, **kwargs: t.Any
) -> markupsafe.Markup:
"""Serialize an object to a string of JSON with :func:`json.dumps`,
then replace HTML-unsafe characters with Unicode escapes and mark
the result safe with :class:`~markupsafe.Markup`.
This is available in templates as the ``|tojson`` filter.
The following characters are escaped: ``<``, ``>``, ``&``, ``'``.
The returned string is safe to render in HTML documents and
``<script>`` tags. The exception is in HTML attributes that are
double quoted; either use single quotes or the ``|forceescape``
filter.
:param obj: The object to serialize to JSON.
:param dumps: The ``dumps`` function to use. Defaults to
``env.policies["json.dumps_function"]``, which defaults to
:func:`json.dumps`.
:param kwargs: Extra arguments to pass to ``dumps``. Merged onto
``env.policies["json.dumps_kwargs"]``.
.. versionchanged:: 3.0
The ``dumper`` parameter is renamed to ``dumps``.
.. versionadded:: 2.9
"""
if dumps is None:
dumps = json.dumps
return markupsafe.Markup(
dumps(obj, **kwargs)
.replace("<", "\\u003c")
.replace(">", "\\u003e")
.replace("&", "\\u0026")
.replace("'", "\\u0027")
)
class Cycler:
"""Cycle through values by yield them one at a time, then restarting
once the end is reached. Available as ``cycler`` in templates.
Similar to ``loop.cycle``, but can be used outside loops or across
multiple loops. For example, render a list of folders and files in a
list, alternating giving them "odd" and "even" classes.
.. code-block:: html+jinja
{% set row_class = cycler("odd", "even") %}
<ul class="browser">
{% for folder in folders %}
<li class="folder {{ row_class.next() }}">{{ folder }}
{% endfor %}
{% for file in files %}
<li class="file {{ row_class.next() }}">{{ file }}
{% endfor %}
</ul>
:param items: Each positional argument will be yielded in the order
given for each cycle.
.. versionadded:: 2.1
"""
def __init__(self, *items: t.Any) -> None:
if not items:
raise RuntimeError("at least one item has to be provided")
self.items = items
self.pos = 0
def reset(self) -> None:
"""Resets the current item to the first item."""
self.pos = 0
@property
def current(self) -> t.Any:
"""Return the current item. Equivalent to the item that will be
returned next time :meth:`next` is called.
"""
return self.items[self.pos]
def next(self) -> t.Any:
"""Return the current item, then advance :attr:`current` to the
next item.
"""
rv = self.current
self.pos = (self.pos + 1) % len(self.items)
return rv
__next__ = next
class Joiner:
"""A joining helper for templates."""
def __init__(self, sep: str = ", ") -> None:
self.sep = sep
self.used = False
def __call__(self) -> str:
if not self.used:
self.used = True
return ""
return self.sep
class Namespace:
"""A namespace object that can hold arbitrary attributes. It may be
initialized from a dictionary or with keyword arguments."""
def __init__(*args: t.Any, **kwargs: t.Any) -> None: # noqa: B902
self, args = args[0], args[1:]
self.__attrs = dict(*args, **kwargs)
def __getattribute__(self, name: str) -> t.Any:
# __class__ is needed for the awaitable check in async mode
if name in {"_Namespace__attrs", "__class__"}:
return object.__getattribute__(self, name)
try:
return self.__attrs[name]
except KeyError:
raise AttributeError(name) from None
def __setitem__(self, name: str, value: t.Any) -> None:
self.__attrs[name] = value
def __repr__(self) -> str:
return f"<Namespace {self.__attrs!r}>"
|
|
import getpass
import binascii
import logging
logger = logging.getLogger(__name__)
import sys
import json
import time
from decimal import Decimal as D
import bitcoin as bitcoinlib
import bitcoin.rpc as bitcoinlib_rpc
from bitcoin.core import CBlock
from counterpartylib.lib import util
from counterpartylib.lib import script
from counterpartylib.lib import config
from counterpartylib.lib import exceptions
from counterpartylib.lib.backend import addrindexrs
MEMPOOL_CACHE_INITIALIZED = False
PRETX_CACHE = {}
def sortkeypicker(keynames):
"""http://stackoverflow.com/a/1143719"""
negate = set()
for i, k in enumerate(keynames):
if k[:1] == '-':
keynames[i] = k[1:]
negate.add(k[1:])
def getit(adict):
composite = [adict[k] for k in keynames]
for i, (k, v) in enumerate(zip(keynames, composite)):
if k in negate:
composite[i] = -v
return composite
return getit
def BACKEND():
mdl = sys.modules['counterpartylib.lib.backend.{}'.format(config.BACKEND_NAME)]
mdl.init()
return mdl
def stop():
BACKEND().stop()
def getblockcount():
return BACKEND().getblockcount()
def getblockhash(blockcount):
return BACKEND().getblockhash(blockcount)
def getblock(block_hash):
block_hex = BACKEND().getblock(block_hash)
return CBlock.deserialize(util.unhexlify(block_hex))
def cache_pretx(txid, rawtx):
PRETX_CACHE[binascii.hexlify(txid).decode('utf8')] = binascii.hexlify(rawtx).decode('utf8')
def clear_pretx(txid):
del PRETX_CACHE[binascii.hexlify(txid).decode('utf8')]
def getrawtransaction(tx_hash, verbose=False, skip_missing=False):
if tx_hash in PRETX_CACHE:
return PRETX_CACHE[tx_hash]
else:
return BACKEND().getrawtransaction(tx_hash, verbose=verbose, skip_missing=skip_missing)
def getrawtransaction_batch(txhash_list, verbose=False, skip_missing=False):
return BACKEND().getrawtransaction_batch(txhash_list, verbose=verbose, skip_missing=skip_missing)
def sendrawtransaction(tx_hex):
return BACKEND().sendrawtransaction(tx_hex)
def getrawmempool():
return BACKEND().getrawmempool()
def getindexblocksbehind():
return BACKEND().getindexblocksbehind()
def extract_addresses(txhash_list):
return BACKEND().extract_addresses(txhash_list)
def ensure_script_pub_key_for_inputs(coins):
txhash_set = set()
for coin in coins:
if 'scriptPubKey' not in coin:
txhash_set.add(coin['txid'])
if len(txhash_set) > 0:
txs = BACKEND().getrawtransaction_batch(list(txhash_set), verbose=True, skip_missing=False)
for coin in coins:
if 'scriptPubKey' not in coin:
# get the scriptPubKey
txid = coin['txid']
for vout in txs[txid]['vout']:
if vout['n'] == coin['vout']:
coin['scriptPubKey'] = vout['scriptPubKey']['hex']
return coins
def fee_per_kb(conf_target, mode, nblocks=None):
"""
:param conf_target:
:param mode:
:return: fee_per_kb in satoshis, or None when unable to determine
"""
return BACKEND().fee_per_kb(conf_target, mode, nblocks=nblocks)
def deserialize(tx_hex):
return bitcoinlib.core.CTransaction.deserialize(binascii.unhexlify(tx_hex))
def serialize(ctx):
return bitcoinlib.core.CTransaction.serialize(ctx)
def is_valid(address):
try:
script.validate(address)
return True
except script.AddressError:
return False
def get_txhash_list(block):
return [bitcoinlib.core.b2lx(ctx.GetHash()) for ctx in block.vtx]
def get_tx_list(block):
raw_transactions = {}
tx_hash_list = []
for ctx in block.vtx:
if util.enabled('correct_segwit_txids'):
hsh = ctx.GetTxid()
else:
hsh = ctx.GetHash()
tx_hash = bitcoinlib.core.b2lx(hsh)
raw = ctx.serialize()
tx_hash_list.append(tx_hash)
raw_transactions[tx_hash] = bitcoinlib.core.b2x(raw)
return (tx_hash_list, raw_transactions)
def sort_unspent_txouts(unspent, unconfirmed=False):
# Filter out all dust amounts to avoid bloating the resultant transaction
unspent = list(filter(lambda x: x['value'] > config.DEFAULT_MULTISIG_DUST_SIZE, unspent))
# Sort by amount, using the largest UTXOs available
if config.REGTEST:
# REGTEST has a lot of coinbase inputs that can't be spent due to maturity
# this doesn't usually happens on mainnet or testnet because most fednodes aren't mining
unspent = sorted(unspent, key=lambda x: (x['confirmations'], x['value']), reverse=True)
else:
unspent = sorted(unspent, key=lambda x: x['value'], reverse=True)
return unspent
def get_btc_supply(normalize=False):
"""returns the total supply of {} (based on what Bitcoin Core says the current block height is)""".format(config.BTC)
block_count = getblockcount()
blocks_remaining = block_count
total_supply = 0
reward = 50.0
while blocks_remaining > 0:
if blocks_remaining >= 210000:
blocks_remaining -= 210000
total_supply += 210000 * reward
reward /= 2
else:
total_supply += (blocks_remaining * reward)
blocks_remaining = 0
return total_supply if normalize else int(total_supply * config.UNIT)
class MempoolError(Exception):
pass
def get_unspent_txouts(source, unconfirmed=False, unspent_tx_hash=None):
"""returns a list of unspent outputs for a specific address
@return: A list of dicts, with each entry in the dict having the following keys:
"""
unspent = BACKEND().get_unspent_txouts(source)
# filter by unspent_tx_hash
if unspent_tx_hash is not None:
unspent = list(filter(lambda x: x['txId'] == unspent_tx_hash, unspent))
# filter unconfirmed
if not unconfirmed:
unspent = [utxo for utxo in unspent if utxo['confirmations'] > 0]
# format
for utxo in unspent:
utxo['amount'] = float(utxo['value'] / config.UNIT)
utxo['txid'] = utxo['txId']
del utxo['txId']
# do not add scriptPubKey
return unspent
def search_raw_transactions(address, unconfirmed=True):
return BACKEND().search_raw_transactions(address, unconfirmed)
class UnknownPubKeyError(Exception):
pass
def pubkeyhash_to_pubkey(pubkeyhash, provided_pubkeys=None):
# Search provided pubkeys.
if provided_pubkeys:
if type(provided_pubkeys) != list:
provided_pubkeys = [provided_pubkeys]
for pubkey in provided_pubkeys:
if pubkeyhash == script.pubkey_to_pubkeyhash(util.unhexlify(pubkey)):
return pubkey
# Search blockchain.
raw_transactions = search_raw_transactions(pubkeyhash, unconfirmed=True)
for tx_id in raw_transactions:
tx = raw_transactions[tx_id]
for vin in tx['vin']:
if 'txinwitness' in vin:
if len(vin['txinwitness']) >= 2:
# catch unhexlify errs for when txinwitness[1] isn't a witness program (eg; for P2W)
try:
pubkey = vin['txinwitness'][1]
if pubkeyhash == script.pubkey_to_p2whash(util.unhexlify(pubkey)):
return pubkey
except binascii.Error:
pass
elif 'coinbase' not in vin:
scriptsig = vin['scriptSig']
asm = scriptsig['asm'].split(' ')
if len(asm) >= 2:
# catch unhexlify errs for when asm[1] isn't a pubkey (eg; for P2SH)
try:
pubkey = asm[1]
if pubkeyhash == script.pubkey_to_pubkeyhash(util.unhexlify(pubkey)):
return pubkey
except binascii.Error:
pass
raise UnknownPubKeyError('Public key was neither provided nor published in blockchain.')
def multisig_pubkeyhashes_to_pubkeys(address, provided_pubkeys=None):
signatures_required, pubkeyhashes, signatures_possible = script.extract_array(address)
pubkeys = [pubkeyhash_to_pubkey(pubkeyhash, provided_pubkeys) for pubkeyhash in pubkeyhashes]
return script.construct_array(signatures_required, pubkeys, signatures_possible)
def init_mempool_cache():
"""prime the mempool cache, so that functioning is faster...
"""
global MEMPOOL_CACHE_INITIALIZED
logger.debug('Initializing mempool cache...')
start = time.time()
mempool_txhash_list = getrawmempool()
#with this function, don't try to load in more than BACKEND_RAW_TRANSACTIONS_CACHE_SIZE entries
num_tx = min(len(mempool_txhash_list), config.BACKEND_RAW_TRANSACTIONS_CACHE_SIZE)
mempool_tx = BACKEND().getrawtransaction_batch(mempool_txhash_list[:num_tx], skip_missing=True, verbose=True)
vin_txhash_list = []
max_remaining_num_tx = config.BACKEND_RAW_TRANSACTIONS_CACHE_SIZE - num_tx
if max_remaining_num_tx:
for txid in mempool_tx:
tx = mempool_tx[txid]
if not(tx is None):
vin_txhash_list += [vin['txid'] for vin in tx['vin']]
BACKEND().getrawtransaction_batch(vin_txhash_list[:max_remaining_num_tx], skip_missing=True, verbose=True)
MEMPOOL_CACHE_INITIALIZED = True
logger.info('Mempool cache initialized: {:.2f}s for {:,} transactions'.format(time.time() - start, num_tx + min(max_remaining_num_tx, len(vin_txhash_list))))
# vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4
|
|
import re
import time
import logging
from logging.handlers import TimedRotatingFileHandler
import datetime
from influxdb import InfluxDBClient
try:
import statsd
except ImportError:
pass
logger = logging.getLogger('graphite_influxdb')
try:
from graphite_api.intervals import Interval, IntervalSet
from graphite_api.node import LeafNode, BranchNode
except ImportError:
try:
from graphite.intervals import Interval, IntervalSet
from graphite.node import LeafNode, BranchNode
except ImportError:
raise SystemExit(1, "You have neither graphite_api nor \
the graphite webapp in your pythonpath")
# Tell influxdb to return time as seconds from epoch
_INFLUXDB_CLIENT_PARAMS = {'epoch' : 's'}
class NullStatsd():
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
pass
def timer(self, key, val=None):
return self
def timing(self, key, val):
pass
def start(self):
pass
def stop(self):
pass
def normalize_config(config=None):
ret = {}
if config is not None:
cfg = config.get('influxdb', {})
ret['host'] = cfg.get('host', 'localhost')
ret['port'] = cfg.get('port', 8086)
ret['user'] = cfg.get('user', 'graphite')
ret['passw'] = cfg.get('pass', 'graphite')
ret['db'] = cfg.get('db', 'graphite')
ssl = cfg.get('ssl', False)
ret['ssl'] = (ssl == 'true')
ret['schema'] = cfg.get('schema', [])
ret['log_file'] = cfg.get('log_file', None)
ret['log_level'] = cfg.get('log_level', 'info')
cfg = config.get('es', {})
ret['es_enabled'] = cfg.get('enabled', False)
ret['es_index'] = cfg.get('index', 'graphite_metrics2')
ret['es_hosts'] = cfg.get('hosts', ['localhost:9200'])
ret['es_field'] = cfg.get('field', '_id')
if config.get('statsd', None):
ret['statsd'] = config.get('statsd')
else:
from django.conf import settings
ret['host'] = getattr(settings, 'INFLUXDB_HOST', 'localhost')
ret['port'] = getattr(settings, 'INFLUXDB_PORT', 8086)
ret['user'] = getattr(settings, 'INFLUXDB_USER', 'graphite')
ret['passw'] = getattr(settings, 'INFLUXDB_PASS', 'graphite')
ret['db'] = getattr(settings, 'INFLUXDB_DB', 'graphite')
ssl = getattr(settings, 'INFLUXDB_SSL', False)
ret['ssl'] = (ssl == 'true')
ret['schema'] = getattr(settings, 'INFLUXDB_SCHEMA', [])
ret['log_file'] = getattr(
settings, 'INFLUXDB_LOG_FILE', None)
# Default log level is 'info'
ret['log_level'] = getattr(
settings, 'INFLUXDB_LOG_LEVEL', 'info')
ret['es_enabled'] = getattr(settings, 'ES_ENABLED', False)
ret['es_index'] = getattr(settings, 'ES_INDEX', 'graphite_metrics2')
ret['es_hosts'] = getattr(settings, 'ES_HOSTS', ['localhost:9200'])
ret['es_field'] = getattr(settings, 'ES_FIELD', '_id')
return ret
def _make_graphite_api_points_list(influxdb_data):
"""Make graphite-api data points dictionary from Influxdb ResultSet data"""
_data = {}
for key in influxdb_data.keys():
_data[key[0]] = [(datetime.datetime.fromtimestamp(float(d['time'])),
d['value']) for d in influxdb_data.get_points(key[0])]
return _data
class InfluxdbReader(object):
__slots__ = ('client', 'path', 'step', 'statsd_client')
def __init__(self, client, path, step, statsd_client):
self.client = client
self.path = path
self.step = step
self.statsd_client = statsd_client
def fetch(self, start_time, end_time):
# in graphite,
# from is exclusive (from=foo returns data at ts=foo+1 and higher)
# until is inclusive (until=bar returns data at ts=bar and lower)
# influx doesn't support <= and >= yet, hence the add.
logger.debug("fetch() path=%s start_time=%s, end_time=%s, step=%d", self.path, start_time, end_time, self.step)
with self.statsd_client.timer('service_is_graphite-api.ext_service_is_influxdb.target_type_is_gauge.unit_is_ms.what_is_query_individual_duration'):
_query = 'select mean(value) as value from "%s" where (time > %ds and time <= %ds) GROUP BY time(%ss)' % (
self.path, start_time, end_time, self.step)
logger.debug("fetch() path=%s querying influxdb query: '%s'", self.path, _query)
data = self.client.query(_query, params=_INFLUXDB_CLIENT_PARAMS)
logger.debug("fetch() path=%s returned data: %s", self.path, data)
try:
data = _make_graphite_api_points_list(data)
except Exception:
logger.debug("fetch() path=%s COULDN'T READ POINTS. SETTING TO EMPTY LIST", self.path)
data = []
time_info = start_time, end_time, self.step
return time_info, [v[1] for v in data[self.path]]
def get_intervals(self):
now = int(time.time())
return IntervalSet([Interval(1, now)])
class InfluxLeafNode(LeafNode):
__fetch_multi__ = 'influxdb'
class InfluxdbFinder(object):
__fetch_multi__ = 'influxdb'
__slots__ = ('client', 'es', 'schemas', 'config', 'statsd_client')
def __init__(self, config=None):
# Shouldn't be trying imports in __init__.
# It turns what should be a load error into a runtime error
config = normalize_config(config)
self.config = config
self.client = InfluxDBClient(config['host'], config['port'], config['user'], config['passw'], config['db'], config['ssl'])
self.schemas = [(re.compile(patt), step) for (patt, step) in config['schema']]
try:
self.statsd_client = statsd.StatsClient(config['statsd'].get('host'),
config['statsd'].get('port', 8125)) \
if 'statsd' in config and config['statsd'].get('host') else NullStatsd()
except NameError:
logger.warning("Statsd client configuration present but 'statsd' module"
"not installed - ignoring statsd configuration..")
self.statsd_client = NullStatsd()
self._setup_logger(config['log_level'], config['log_file'])
self.es = None
if config['es_enabled']:
try:
from elasticsearch import Elasticsearch
except ImportError:
logger.warning("Elasticsearch configuration present but 'elasticsearch'"
"module not installed - ignoring elasticsearch configuration..")
else:
self.es = Elasticsearch(config['es_hosts'])
def _setup_logger(self, level, log_file):
"""Setup log level and log file if set"""
if logger.handlers:
return
level = getattr(logging, level.upper())
logger.setLevel(level)
formatter = logging.Formatter(
'[%(levelname)s] %(asctime)s - %(module)s.%(funcName)s() - %(message)s')
handler = logging.StreamHandler()
logger.addHandler(handler)
handler.setFormatter(formatter)
if not log_file:
return
try:
handler = TimedRotatingFileHandler(log_file)
except IOError:
logger.error("Could not write to %s, falling back to stdout",
log_file)
else:
logger.addHandler(handler)
handler.setFormatter(formatter)
def assure_series(self, query):
key_series = "%s_series" % query.pattern
done = False
if self.es:
# note: ES always treats a regex as anchored at start and end
regex = self.compile_regex('{0}.*', query)
with self.statsd_client.timer('service_is_graphite-api.ext_service_is_elasticsearch.target_type_is_gauge.unit_is_ms.action_is_get_series'):
logger.debug("assure_series() Calling ES with regexp - %s", regex.pattern)
try:
res = self.es.search(index=self.config['es_index'],
size=10000,
body={
"query": {
"regexp": {
self.config['es_field']: regex.pattern,
},
},
"fields": [self.config['es_field']]
}
)
if res['_shards']['successful'] > 0:
# pprint(res['hits']['total'])
series = [hit['fields'][self.config['es_field']] for hit in res['hits']['hits']]
done = True
else:
logger.error("assure_series() Calling ES failed for %s: no successful shards", regex.pattern)
except Exception as e:
logger.error("assure_series() Calling ES failed for %s: %s", regex.pattern, e)
# if no ES configured, or ES failed, try influxdb.
if not done:
# regexes in influxdb are not assumed to be anchored, so anchor them explicitly
regex = self.compile_regex('^{0}', query)
with self.statsd_client.timer('service_is_graphite-api.ext_service_is_influxdb.target_type_is_gauge.unit_is_ms.action_is_get_series'):
_query = "show series from /%s/" % regex.pattern
logger.debug("assure_series() Calling influxdb with query - %s", _query)
ret = self.client.query(_query, params=_INFLUXDB_CLIENT_PARAMS)
# as long as influxdb doesn't have good safeguards against
# series with bad data in the metric names, we must filter out
# like so:
series = [key_name for [key_name] in ret.raw['series'][0]['values']]
return series
def compile_regex(self, fmt, query):
"""Turn glob (graphite) queries into compiled regex
* becomes .*
. becomes \.
fmt argument is so that caller can control anchoring (must contain exactly 1 {0} !"""
return re.compile(fmt.format(
query.pattern.replace('.', '\.').replace('*', '[^\.]*').replace(
'{', '(').replace(',', '|').replace('}', ')')
))
def get_leaves(self, query):
key_leaves = "%s_leaves" % query.pattern
series = self.assure_series(query)
regex = self.compile_regex('^{0}$', query)
logger.debug("get_leaves() key %s", key_leaves)
timer = self.statsd_client.timer('service_is_graphite-api.action_is_find_leaves.target_type_is_gauge.unit_is_ms')
now = datetime.datetime.now()
timer.start()
# return every matching series and its
# resolution (based on first pattern match in schema, fallback to 60s)
leaves = [(name, next((res for (patt, res) in self.schemas if patt.match(name)), 60))
for name in series if regex.match(name)
]
timer.stop()
end = datetime.datetime.now()
dt = end - now
logger.debug("get_leaves() key %s Finished find_leaves in %s.%ss",
key_leaves,
dt.seconds,
dt.microseconds)
return leaves
def get_branches(self, query):
seen_branches = set()
key_branches = "%s_branches" % query.pattern
# Very inefficient call to list
series = self.assure_series(query)
regex = self.compile_regex('^{0}$', query)
logger.debug("get_branches() %s", key_branches)
timer = self.statsd_client.timer('service_is_graphite-api.action_is_find_branches.target_type_is_gauge.unit_is_ms')
start_time = datetime.datetime.now()
timer.start()
branches = []
for name in series:
while '.' in name:
name = name.rsplit('.', 1)[0]
if name not in seen_branches:
seen_branches.add(name)
if regex.match(name) is not None:
logger.debug("get_branches() %s found branch name: %s", key_branches, name)
branches.append(name)
timer.stop()
end_time = datetime.datetime.now()
dt = end_time - start_time
logger.debug("get_branches() %s Finished find_branches in %s.%ss",
key_branches,
dt.seconds, dt.microseconds)
return branches
def find_nodes(self, query):
logger.debug("find_nodes() query %s", query)
# TODO: once we can query influx better for retention periods, honor the start/end time in the FindQuery object
with self.statsd_client.timer('service_is_graphite-api.action_is_yield_nodes.target_type_is_gauge.unit_is_ms.what_is_query_duration'):
for (name, res) in self.get_leaves(query):
yield InfluxLeafNode(name, InfluxdbReader(
self.client, name, res, self.statsd_client))
for name in self.get_branches(query):
logger.debug("Yielding branch %s" % (name,))
yield BranchNode(name)
def fetch_multi(self, nodes, start_time, end_time):
series = ', '.join(['"%s"' % node.path for node in nodes])
# use the step of the node that is the most coarse
# not sure if there's a better way? can we combine series
# with different steps (and use the optimal step for each?)
# probably not
step = max([node.reader.step for node in nodes])
query = 'select mean(value) as value from %s where (time > %ds and time <= %ds) GROUP BY time(%ss)' % (
series, start_time, end_time, step)
logger.debug('fetch_multi() query: %s', query)
logger.debug('fetch_multi() - start_time: %s - end_time: %s, step %s',
datetime.datetime.fromtimestamp(float(start_time)), datetime.datetime.fromtimestamp(float(end_time)), step)
with self.statsd_client.timer('service_is_graphite-api.ext_service_is_influxdb.target_type_is_gauge.unit_is_ms.action_is_select_datapoints'):
logger.debug("Calling influxdb multi fetch with query - %s", query)
data = self.client.query(query, params=_INFLUXDB_CLIENT_PARAMS)
logger.debug('fetch_multi() - Retrieved %d result set(s)', len(data))
data = _make_graphite_api_points_list(data)
# some series we requested might not be in the resultset.
# this is because influx doesn't include series that had no values
# this is a behavior that some people actually appreciate when graphing, but graphite doesn't do this (yet),
# and we want to look the same, so we must add those back in.
# a better reason though, is because for advanced alerting cases like bosun, you want all entries even if they have no data, so you can properly
# compare, join, or do logic with the targets returned for requests for the same data but from different time ranges, you want them to all
# include the same keys.
query_keys = set([node.path for node in nodes])
for key in query_keys:
data.setdefault(key, [])
time_info = start_time, end_time, step
for key in data:
data[key] = [v[1] for v in data[key]]
return time_info, data
|
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for SessionManager."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from tensorflow.python.client import session as session_lib
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import gfile
from tensorflow.python.platform import test
from tensorflow.python.training import saver as saver_lib
from tensorflow.python.training import server_lib
from tensorflow.python.training import session_manager
class SessionManagerTest(test.TestCase):
def testPrepareSessionSucceeds(self):
with ops.Graph().as_default():
v = variables.Variable([1.0, 2.0, 3.0], name="v")
sm = session_manager.SessionManager(
ready_op=variables.report_uninitialized_variables())
sess = sm.prepare_session(
"", init_op=variables.global_variables_initializer())
self.assertAllClose([1.0, 2.0, 3.0], sess.run(v))
def testPrepareSessionSucceedsWithInitFeedDict(self):
with ops.Graph().as_default():
p = array_ops.placeholder(dtypes.float32, shape=(3,))
v = variables.Variable(p, name="v")
sm = session_manager.SessionManager(
ready_op=variables.report_uninitialized_variables())
sess = sm.prepare_session(
"",
init_op=variables.global_variables_initializer(),
init_feed_dict={p: [1.0, 2.0, 3.0]})
self.assertAllClose([1.0, 2.0, 3.0], sess.run(v))
def testPrepareSessionSucceedsWithInitFn(self):
with ops.Graph().as_default():
v = variables.Variable([125], name="v")
sm = session_manager.SessionManager(
ready_op=variables.report_uninitialized_variables())
sess = sm.prepare_session(
"", init_fn=lambda sess: sess.run(v.initializer))
self.assertAllClose([125], sess.run(v))
def testPrepareSessionFails(self):
checkpoint_dir = os.path.join(self.get_temp_dir(), "prepare_session")
checkpoint_dir2 = os.path.join(self.get_temp_dir(), "prepare_session2")
try:
gfile.DeleteRecursively(checkpoint_dir)
gfile.DeleteRecursively(checkpoint_dir2)
except errors.OpError:
pass # Ignore
gfile.MakeDirs(checkpoint_dir)
with ops.Graph().as_default():
v = variables.Variable([1.0, 2.0, 3.0], name="v")
sm = session_manager.SessionManager(
ready_op=variables.report_uninitialized_variables())
saver = saver_lib.Saver({"v": v})
sess = sm.prepare_session(
"",
init_op=variables.global_variables_initializer(),
saver=saver,
checkpoint_dir=checkpoint_dir)
self.assertAllClose([1.0, 2.0, 3.0], sess.run(v))
checkpoint_filename = os.path.join(checkpoint_dir,
"prepare_session_checkpoint")
saver.save(sess, checkpoint_filename)
# Create a new Graph and SessionManager and recover.
with ops.Graph().as_default():
# Renames the checkpoint directory.
os.rename(checkpoint_dir, checkpoint_dir2)
gfile.MakeDirs(checkpoint_dir)
v = variables.Variable([6.0, 7.0, 8.0], name="v")
with self.test_session():
self.assertEqual(False, variables.is_variable_initialized(v).eval())
session_manager.SessionManager(
ready_op=variables.report_uninitialized_variables())
saver = saver_lib.Saver({"v": v})
# This should fail as there's no checkpoint within 2 seconds.
with self.assertRaisesRegexp(
RuntimeError, "no init_op or init_fn or local_init_op was given"):
sess = sm.prepare_session(
"",
init_op=None,
saver=saver,
checkpoint_dir=checkpoint_dir,
wait_for_checkpoint=True,
max_wait_secs=2)
# Rename the checkpoint directory back.
gfile.DeleteRecursively(checkpoint_dir)
os.rename(checkpoint_dir2, checkpoint_dir)
# This should succeed as there's checkpoint.
sess = sm.prepare_session(
"",
init_op=None,
saver=saver,
checkpoint_dir=checkpoint_dir,
wait_for_checkpoint=True,
max_wait_secs=2)
self.assertEqual(
True,
variables.is_variable_initialized(
sess.graph.get_tensor_by_name("v:0")).eval(session=sess))
def _test_recovered_variable(self,
checkpoint_dir=None,
checkpoint_filename_with_path=None):
# Create a new Graph and SessionManager and recover from a checkpoint.
with ops.Graph().as_default():
v = variables.Variable(2, name="v")
with session_lib.Session():
self.assertEqual(False, variables.is_variable_initialized(v).eval())
sm2 = session_manager.SessionManager(
ready_op=variables.report_uninitialized_variables())
saver = saver_lib.Saver({"v": v})
sess, initialized = sm2.recover_session(
"",
saver=saver,
checkpoint_dir=checkpoint_dir,
checkpoint_filename_with_path=checkpoint_filename_with_path)
self.assertTrue(initialized)
self.assertEqual(
True,
variables.is_variable_initialized(
sess.graph.get_tensor_by_name("v:0")).eval(session=sess))
self.assertEquals(1, sess.run(v))
def testRecoverSession(self):
# Create a checkpoint.
checkpoint_dir = os.path.join(self.get_temp_dir(), "recover_session")
try:
gfile.DeleteRecursively(checkpoint_dir)
except errors.OpError:
pass # Ignore
gfile.MakeDirs(checkpoint_dir)
with ops.Graph().as_default():
v = variables.Variable(1, name="v")
sm = session_manager.SessionManager(
ready_op=variables.report_uninitialized_variables())
saver = saver_lib.Saver({"v": v})
sess, initialized = sm.recover_session(
"", saver=saver, checkpoint_dir=checkpoint_dir)
self.assertFalse(initialized)
sess.run(v.initializer)
self.assertEquals(1, sess.run(v))
saver.save(sess,
os.path.join(checkpoint_dir, "recover_session_checkpoint"))
self._test_recovered_variable(checkpoint_dir=checkpoint_dir)
self._test_recovered_variable(
checkpoint_filename_with_path=saver_lib.latest_checkpoint(
checkpoint_dir))
# Cannot set both checkpoint_dir and checkpoint_filename_with_path.
with self.assertRaises(ValueError):
self._test_recovered_variable(
checkpoint_dir=checkpoint_dir,
checkpoint_filename_with_path=saver_lib.latest_checkpoint(
checkpoint_dir))
def testWaitForSessionReturnsNoneAfterTimeout(self):
with ops.Graph().as_default():
variables.Variable(1, name="v")
sm = session_manager.SessionManager(
ready_op=variables.report_uninitialized_variables(),
recovery_wait_secs=1)
# Set max_wait_secs to allow us to try a few times.
with self.assertRaises(errors.DeadlineExceededError):
sm.wait_for_session(master="", max_wait_secs=3)
def testInitWithNoneLocalInitOpError(self):
# Creating a SessionManager with a None local_init_op but
# non-None ready_for_local_init_op raises ValueError
with self.assertRaisesRegexp(ValueError,
"If you pass a ready_for_local_init_op "
"you must also pass a local_init_op "):
session_manager.SessionManager(
ready_for_local_init_op=variables.report_uninitialized_variables(
variables.global_variables()),
local_init_op=None)
def testRecoverSessionWithReadyForLocalInitOp(self):
# Create a checkpoint.
checkpoint_dir = os.path.join(self.get_temp_dir(),
"recover_session_ready_for_local_init")
try:
gfile.DeleteRecursively(checkpoint_dir)
except errors.OpError:
pass # Ignore
gfile.MakeDirs(checkpoint_dir)
with ops.Graph().as_default():
v = variables.Variable(1, name="v")
sm = session_manager.SessionManager(
ready_op=variables.report_uninitialized_variables())
saver = saver_lib.Saver({"v": v})
sess, initialized = sm.recover_session(
"", saver=saver, checkpoint_dir=checkpoint_dir)
self.assertFalse(initialized)
sess.run(v.initializer)
self.assertEquals(1, sess.run(v))
saver.save(sess,
os.path.join(checkpoint_dir, "recover_session_checkpoint"))
# Create a new Graph and SessionManager and recover.
with ops.Graph().as_default():
v = variables.Variable(2, name="v")
w = variables.Variable(
v,
trainable=False,
collections=[ops.GraphKeys.LOCAL_VARIABLES],
name="w")
with self.test_session():
self.assertEqual(False, variables.is_variable_initialized(v).eval())
self.assertEqual(False, variables.is_variable_initialized(w).eval())
sm2 = session_manager.SessionManager(
ready_op=variables.report_uninitialized_variables(),
ready_for_local_init_op=variables.report_uninitialized_variables(
variables.global_variables()),
local_init_op=w.initializer)
saver = saver_lib.Saver({"v": v})
sess, initialized = sm2.recover_session(
"", saver=saver, checkpoint_dir=checkpoint_dir)
self.assertTrue(initialized)
self.assertEqual(
True,
variables.is_variable_initialized(
sess.graph.get_tensor_by_name("v:0")).eval(session=sess))
self.assertEqual(
True,
variables.is_variable_initialized(
sess.graph.get_tensor_by_name("w:0")).eval(session=sess))
self.assertEquals(1, sess.run(v))
self.assertEquals(1, sess.run(w))
def testRecoverSessionWithReadyForLocalInitOpFailsToReadyLocal(self):
# We use ready_for_local_init_op=tf.report_uninitialized_variables(),
# which causes recover_session to not run local_init_op, and to return
# initialized=False
# Create a checkpoint.
checkpoint_dir = os.path.join(
self.get_temp_dir(),
"recover_session_ready_for_local_init_fails_to_ready_local")
try:
gfile.DeleteRecursively(checkpoint_dir)
except errors.OpError:
pass # Ignore
gfile.MakeDirs(checkpoint_dir)
with ops.Graph().as_default():
v = variables.Variable(1, name="v")
sm = session_manager.SessionManager(
ready_op=variables.report_uninitialized_variables())
saver = saver_lib.Saver({"v": v})
sess, initialized = sm.recover_session(
"", saver=saver, checkpoint_dir=checkpoint_dir)
self.assertFalse(initialized)
sess.run(v.initializer)
self.assertEquals(1, sess.run(v))
saver.save(sess,
os.path.join(checkpoint_dir, "recover_session_checkpoint"))
# Create a new Graph and SessionManager and recover.
with ops.Graph().as_default():
v = variables.Variable(2, name="v")
w = variables.Variable(
v,
trainable=False,
collections=[ops.GraphKeys.LOCAL_VARIABLES],
name="w")
with self.test_session():
self.assertEqual(False, variables.is_variable_initialized(v).eval())
self.assertEqual(False, variables.is_variable_initialized(w).eval())
sm2 = session_manager.SessionManager(
ready_op=variables.report_uninitialized_variables(),
ready_for_local_init_op=variables.report_uninitialized_variables(),
local_init_op=w.initializer)
saver = saver_lib.Saver({"v": v})
sess, initialized = sm2.recover_session(
"", saver=saver, checkpoint_dir=checkpoint_dir)
self.assertFalse(initialized)
self.assertEqual(
True,
variables.is_variable_initialized(
sess.graph.get_tensor_by_name("v:0")).eval(session=sess))
self.assertEqual(
False,
variables.is_variable_initialized(
sess.graph.get_tensor_by_name("w:0")).eval(session=sess))
self.assertEquals(1, sess.run(v))
def testRecoverSessionNoChkptStillRunsLocalInitOp(self):
# This test checks for backwards compatibility.
# In particular, we continue to ensure that recover_session will execute
# local_init_op exactly once, regardless of whether the session was
# successfully recovered.
with ops.Graph().as_default():
w = variables.Variable(
1,
trainable=False,
collections=[ops.GraphKeys.LOCAL_VARIABLES],
name="w")
with self.test_session():
self.assertEqual(False, variables.is_variable_initialized(w).eval())
sm2 = session_manager.SessionManager(
ready_op=variables.report_uninitialized_variables(),
ready_for_local_init_op=None,
local_init_op=w.initializer)
# Try to recover session from None
sess, initialized = sm2.recover_session(
"", saver=None, checkpoint_dir=None)
# Succeeds because recover_session still run local_init_op
self.assertFalse(initialized)
self.assertEqual(
True,
variables.is_variable_initialized(
sess.graph.get_tensor_by_name("w:0")).eval(session=sess))
self.assertEquals(1, sess.run(w))
def testRecoverSessionFailsStillRunsLocalInitOp(self):
# Create a checkpoint.
checkpoint_dir = os.path.join(
self.get_temp_dir(),
"recover_session_ready_for_local_init_fails_stil_run")
try:
gfile.DeleteRecursively(checkpoint_dir)
except errors.OpError:
pass # Ignore
gfile.MakeDirs(checkpoint_dir)
# Create a new Graph and SessionManager and recover.
with ops.Graph().as_default():
v = variables.Variable(2, name="v")
w = variables.Variable(
1,
trainable=False,
collections=[ops.GraphKeys.LOCAL_VARIABLES],
name="w")
with self.test_session():
self.assertEqual(False, variables.is_variable_initialized(v).eval())
self.assertEqual(False, variables.is_variable_initialized(w).eval())
sm2 = session_manager.SessionManager(
ready_op=variables.report_uninitialized_variables(),
ready_for_local_init_op=None,
local_init_op=w.initializer)
saver = saver_lib.Saver({"v": v})
sess, initialized = sm2.recover_session(
"",
saver=saver,
checkpoint_dir=checkpoint_dir,
wait_for_checkpoint=False)
self.assertFalse(initialized)
self.assertEqual(
False,
variables.is_variable_initialized(
sess.graph.get_tensor_by_name("v:0")).eval(session=sess))
self.assertEqual(
True,
variables.is_variable_initialized(
sess.graph.get_tensor_by_name("w:0")).eval(session=sess))
self.assertEquals(1, sess.run(w))
def testWaitForSessionLocalInit(self):
server = server_lib.Server.create_local_server()
with ops.Graph().as_default() as graph:
v = variables.Variable(1, name="v")
w = variables.Variable(
v,
trainable=False,
collections=[ops.GraphKeys.LOCAL_VARIABLES],
name="w")
sm = session_manager.SessionManager(
graph=graph,
ready_op=variables.report_uninitialized_variables(),
ready_for_local_init_op=variables.report_uninitialized_variables(
variables.global_variables()),
local_init_op=w.initializer)
# Initialize v but not w
s = session_lib.Session(server.target, graph=graph)
s.run(v.initializer)
sess = sm.wait_for_session(server.target, max_wait_secs=3)
self.assertEqual(
True,
variables.is_variable_initialized(
sess.graph.get_tensor_by_name("v:0")).eval(session=sess))
self.assertEqual(
True,
variables.is_variable_initialized(
sess.graph.get_tensor_by_name("w:0")).eval(session=sess))
self.assertEquals(1, sess.run(v))
self.assertEquals(1, sess.run(w))
def testWaitForSessionWithReadyForLocalInitOpFailsToReadyLocal(self):
with ops.Graph().as_default() as graph:
v = variables.Variable(1, name="v")
w = variables.Variable(
v,
trainable=False,
collections=[ops.GraphKeys.LOCAL_VARIABLES],
name="w")
sm = session_manager.SessionManager(
graph=graph,
ready_op=variables.report_uninitialized_variables(),
ready_for_local_init_op=variables.report_uninitialized_variables(),
local_init_op=w.initializer)
with self.assertRaises(errors_impl.DeadlineExceededError):
# Time-out because w fails to be initialized,
# because of overly restrictive ready_for_local_init_op
sm.wait_for_session("", max_wait_secs=3)
def testWaitForSessionInsufficientReadyForLocalInitCheck(self):
with ops.Graph().as_default() as graph:
v = variables.Variable(1, name="v")
w = variables.Variable(
v,
trainable=False,
collections=[ops.GraphKeys.LOCAL_VARIABLES],
name="w")
sm = session_manager.SessionManager(
graph=graph,
ready_op=variables.report_uninitialized_variables(),
ready_for_local_init_op=None,
local_init_op=w.initializer)
with self.assertRaisesRegexp(errors_impl.FailedPreconditionError,
"Attempting to use uninitialized value v"):
sm.wait_for_session("", max_wait_secs=3)
def testPrepareSessionWithReadyForLocalInitOp(self):
with ops.Graph().as_default():
v = variables.Variable(1, name="v")
w = variables.Variable(
v,
trainable=False,
collections=[ops.GraphKeys.LOCAL_VARIABLES],
name="w")
with self.test_session():
self.assertEqual(False, variables.is_variable_initialized(v).eval())
self.assertEqual(False, variables.is_variable_initialized(w).eval())
sm2 = session_manager.SessionManager(
ready_op=variables.report_uninitialized_variables(),
ready_for_local_init_op=variables.report_uninitialized_variables(
variables.global_variables()),
local_init_op=w.initializer)
sess = sm2.prepare_session("", init_op=v.initializer)
self.assertEqual(
True,
variables.is_variable_initialized(
sess.graph.get_tensor_by_name("v:0")).eval(session=sess))
self.assertEqual(
True,
variables.is_variable_initialized(
sess.graph.get_tensor_by_name("w:0")).eval(session=sess))
self.assertEquals(1, sess.run(v))
self.assertEquals(1, sess.run(w))
def testPrepareSessionDidNotInitLocalVariable(self):
with ops.Graph().as_default():
v = variables.Variable(1, name="v")
w = variables.Variable(
v,
trainable=False,
collections=[ops.GraphKeys.LOCAL_VARIABLES],
name="w")
with self.test_session():
self.assertEqual(False, variables.is_variable_initialized(v).eval())
self.assertEqual(False, variables.is_variable_initialized(w).eval())
sm2 = session_manager.SessionManager(
ready_op=variables.report_uninitialized_variables())
with self.assertRaisesRegexp(RuntimeError,
"Init operations did not make model ready"):
sm2.prepare_session("", init_op=v.initializer)
def testPrepareSessionWithReadyNotReadyForLocal(self):
with ops.Graph().as_default():
v = variables.Variable(1, name="v")
w = variables.Variable(
v,
trainable=False,
collections=[ops.GraphKeys.LOCAL_VARIABLES],
name="w")
with self.test_session():
self.assertEqual(False, variables.is_variable_initialized(v).eval())
self.assertEqual(False, variables.is_variable_initialized(w).eval())
sm2 = session_manager.SessionManager(
ready_op=variables.report_uninitialized_variables(),
ready_for_local_init_op=variables.report_uninitialized_variables(
variables.global_variables()),
local_init_op=w.initializer)
with self.assertRaisesRegexp(
RuntimeError,
"Init operations did not make model ready for local_init"):
sm2.prepare_session("", init_op=None)
def testPrepareSessionWithInsufficientReadyForLocalInitCheck(self):
with ops.Graph().as_default():
v = variables.Variable(1, name="v")
w = variables.Variable(
v,
trainable=False,
collections=[ops.GraphKeys.LOCAL_VARIABLES],
name="w")
with self.test_session():
self.assertEqual(False, variables.is_variable_initialized(v).eval())
self.assertEqual(False, variables.is_variable_initialized(w).eval())
sm2 = session_manager.SessionManager(
ready_op=variables.report_uninitialized_variables(),
ready_for_local_init_op=None,
local_init_op=w.initializer)
with self.assertRaisesRegexp(errors_impl.FailedPreconditionError,
"Attempting to use uninitialized value v"):
sm2.prepare_session("", init_op=None)
class ObsoleteSessionManagerTest(test.TestCase):
def testPrepareSessionSucceeds(self):
with ops.Graph().as_default():
v = variables.Variable([1.0, 2.0, 3.0], name="v")
sm = session_manager.SessionManager(
ready_op=variables.assert_variables_initialized())
sess = sm.prepare_session(
"", init_op=variables.global_variables_initializer())
self.assertAllClose([1.0, 2.0, 3.0], sess.run(v))
def testPrepareSessionSucceedsWithInitFeedDict(self):
with ops.Graph().as_default():
p = array_ops.placeholder(dtypes.float32, shape=(3,))
v = variables.Variable(p, name="v")
sm = session_manager.SessionManager(
ready_op=variables.assert_variables_initialized())
sess = sm.prepare_session(
"",
init_op=variables.global_variables_initializer(),
init_feed_dict={p: [1.0, 2.0, 3.0]})
self.assertAllClose([1.0, 2.0, 3.0], sess.run(v))
def testPrepareSessionSucceedsWithInitFn(self):
with ops.Graph().as_default():
v = variables.Variable([125], name="v")
sm = session_manager.SessionManager(
ready_op=variables.assert_variables_initialized())
sess = sm.prepare_session(
"", init_fn=lambda sess: sess.run(v.initializer))
self.assertAllClose([125], sess.run(v))
def testPrepareSessionFails(self):
checkpoint_dir = os.path.join(self.get_temp_dir(), "prepare_session")
checkpoint_dir2 = os.path.join(self.get_temp_dir(), "prepare_session2")
try:
gfile.DeleteRecursively(checkpoint_dir)
gfile.DeleteRecursively(checkpoint_dir2)
except errors.OpError:
pass # Ignore
gfile.MakeDirs(checkpoint_dir)
with ops.Graph().as_default():
v = variables.Variable([1.0, 2.0, 3.0], name="v")
sm = session_manager.SessionManager(
ready_op=variables.assert_variables_initialized())
saver = saver_lib.Saver({"v": v})
sess = sm.prepare_session(
"",
init_op=variables.global_variables_initializer(),
saver=saver,
checkpoint_dir=checkpoint_dir)
self.assertAllClose([1.0, 2.0, 3.0], sess.run(v))
checkpoint_filename = os.path.join(checkpoint_dir,
"prepare_session_checkpoint")
saver.save(sess, checkpoint_filename)
# Create a new Graph and SessionManager and recover.
with ops.Graph().as_default():
# Renames the checkpoint directory.
os.rename(checkpoint_dir, checkpoint_dir2)
gfile.MakeDirs(checkpoint_dir)
v = variables.Variable([6.0, 7.0, 8.0], name="v")
with self.test_session():
self.assertEqual(False, variables.is_variable_initialized(v).eval())
session_manager.SessionManager(
ready_op=variables.assert_variables_initialized())
saver = saver_lib.Saver({"v": v})
# This should fail as there's no checkpoint within 2 seconds.
with self.assertRaisesRegexp(
RuntimeError, "no init_op or init_fn or local_init_op was given"):
sess = sm.prepare_session(
"",
init_op=None,
saver=saver,
checkpoint_dir=checkpoint_dir,
wait_for_checkpoint=True,
max_wait_secs=2)
# Rename the checkpoint directory back.
gfile.DeleteRecursively(checkpoint_dir)
os.rename(checkpoint_dir2, checkpoint_dir)
# This should succeed as there's checkpoint.
sess = sm.prepare_session(
"",
init_op=None,
saver=saver,
checkpoint_dir=checkpoint_dir,
wait_for_checkpoint=True,
max_wait_secs=2)
self.assertEqual(
True,
variables.is_variable_initialized(
sess.graph.get_tensor_by_name("v:0")).eval(session=sess))
def testRecoverSession(self):
# Create a checkpoint.
checkpoint_dir = os.path.join(self.get_temp_dir(), "recover_session")
try:
gfile.DeleteRecursively(checkpoint_dir)
except errors.OpError:
pass # Ignore
gfile.MakeDirs(checkpoint_dir)
with ops.Graph().as_default():
v = variables.Variable(1, name="v")
sm = session_manager.SessionManager(
ready_op=variables.assert_variables_initialized())
saver = saver_lib.Saver({"v": v})
sess, initialized = sm.recover_session(
"", saver=saver, checkpoint_dir=checkpoint_dir)
self.assertFalse(initialized)
sess.run(v.initializer)
self.assertEquals(1, sess.run(v))
saver.save(sess,
os.path.join(checkpoint_dir, "recover_session_checkpoint"))
# Create a new Graph and SessionManager and recover.
with ops.Graph().as_default():
v = variables.Variable(2, name="v")
with self.test_session():
self.assertEqual(False, variables.is_variable_initialized(v).eval())
sm2 = session_manager.SessionManager(
ready_op=variables.assert_variables_initialized())
saver = saver_lib.Saver({"v": v})
sess, initialized = sm2.recover_session(
"", saver=saver, checkpoint_dir=checkpoint_dir)
self.assertTrue(initialized)
self.assertEqual(
True,
variables.is_variable_initialized(
sess.graph.get_tensor_by_name("v:0")).eval(session=sess))
self.assertEquals(1, sess.run(v))
def testWaitForSessionReturnsNoneAfterTimeout(self):
with ops.Graph().as_default():
variables.Variable(1, name="v")
sm = session_manager.SessionManager(
ready_op=variables.assert_variables_initialized(),
recovery_wait_secs=1)
# Set max_wait_secs to allow us to try a few times.
with self.assertRaises(errors.DeadlineExceededError):
sm.wait_for_session(master="", max_wait_secs=3)
if __name__ == "__main__":
test.main()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.