content
stringlengths 22
815k
| id
int64 0
4.91M
|
---|---|
async def health() -> Dict[str, str]:
"""Health check function
:return: Health check dict
:rtype: Dict[str, str]
"""
health_response = schemas.Health(name=settings.PROJECT_NAME,
api_version=__version__)
return health_response.dict()
| 5,347,700 |
def controls(pressed_list):
""" Paddles control function"""
global plr_y, enm_y
if pressed_list[pg.K_w] and plr_y >= 0:
plr_y -= 3
if pressed_list[pg.K_s] and plr_y <= window_h - 20:
plr_y += 3
if pressed_list[pg.K_UP] and enm_y >= 0:
enm_y -= 3
if pressed_list[pg.K_DOWN] and enm_y <= window_h - 20:
enm_y += 3
| 5,347,701 |
def detail(video_id):
""" return value is
[
{
'video_path' : s
},
{
'person_id': n,
'person_info_list' : [
{
'frame' : n
'millisec' : n
'age' : n
'gender' : s
'img_person' : s
'top_color' : n
'bottom_color' : n
},
{
...
}
]
},
{
'person_id' : n,
...
},
...
]
"""
video = VideoList.query.get_or_404(video_id)
tableName = videoNameToTable(video.video_name)
VideoTable = getVideoTable(tableName)
returnJson = list()
returnJson.append({'video_name' : tableName + '.mp4' })
people = db.session.query(VideoTable.person_id.distinct()).all()
for person in people:
personDict = dict()
person_id = person[0]
personDict['person_id'] = person_id
personDict['person_info_list'] = list()
personInfoList = VideoTable.query.filter(VideoTable.person_id == person_id).all()
for personInfo in personInfoList:
# change 'personInfo.img_person' from abs path to relative path
index = personInfo.img_person.find('images')
img_person = personInfo.img_person[index + 7:]
personDict['person_info_list'].append(
{
'frame' : personInfo.frame,
'millisec' : personInfo.millisec,
'age' : personInfo.age,
'gender' : personInfo.gender,
'img_person' : img_person,
'top_color' : personInfo.top_color,
'bottom_color' : personInfo.bottom_color
}
)
returnJson.append(personDict)
return jsonify(returnJson), 200
| 5,347,702 |
def list_terminologies():
""" Get the list of available Amazon Translate Terminologies for this region
Returns:
This is a proxy for boto3 get_terminology and returns the output from that SDK method.
See `the boto3 documentation for details <https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/translate.html#Translate.Client.list_terminologies>`_
Raises:
See the boto3 documentation for details
500: Internal server error
"""
# This function returns a list of saved terminologies
print('list_terminologies request: '+app.current_request.raw_body.decode())
translate_client = boto3.client('translate', region_name=os.environ['AWS_REGION'])
response = translate_client.list_terminologies(MaxResults=100)
terminologies = response['TerminologyPropertiesList']
while ('NextToken' in response):
response = translate_client.list_terminologies(MaxResults=100, NextToken=response['NextToken'])
terminologies = terminologies + response['TerminologyPropertiesList']
# Convert time field to a format that is JSON serializable
for item in terminologies:
item['CreatedAt'] = item['CreatedAt'].isoformat()
item['LastUpdatedAt'] = item['LastUpdatedAt'].isoformat()
return response
| 5,347,703 |
def RunLinters(prefix, name, data, settings=None):
"""Run linters starting with |prefix| against |data|."""
ret = []
if settings is None:
settings = ParseOptions([])
ret += settings.errors
linters = [x for x in FindLinters(prefix) if x not in settings.skip]
for linter in linters:
functor = globals().get(linter)
for result in functor(data):
ret.append(LintResult(linter, name, result, logging.ERROR))
return ret
| 5,347,704 |
def watchdog_mock():
"""Mock watchdog module."""
with patch.dict('sys.modules', {
'watchdog': MagicMock(),
'watchdog.observers': MagicMock(),
'watchdog.events': MagicMock(),
}):
yield
| 5,347,705 |
def customization_data(client=None):
"""Produce any customization definitions (types, fields, message destinations, etc)
that should be installed by `resilient-circuits customize`
"""
# This import data contains:
# Incident fields:
# mock_incident_field
# Action fields:
# mock_activity_field_one
# Function inputs:
# mock_field_one
# mock_field_two
# DataTables:
# mock_data_table
# Message Destinations:
# fn_mock_integration
# Functions:
# mock_function_one
# mock_function_two
# Workflows:
# example_mock_workflow_one
# Rules:
# Mock Auto Rule
# Mock Manual Rule
yield ImportDefinition(u"""
eyJsb2NhbGUiOiBudWxsLCAid29ya2Zsb3dzIjogW3siZGVzY3JpcHRpb24iOiAiQW4gZXhhbXBs
ZSB3b3JrZmxvdyBmb3IgbW9jayB1bml0IHRlc3RzIiwgIndvcmtmbG93X2lkIjogNywgInRhZ3Mi
OiBbXSwgIm9iamVjdF90eXBlIjogImluY2lkZW50IiwgImV4cG9ydF9rZXkiOiAiZXhhbXBsZV9t
b2NrX3dvcmtmbG93X29uZSIsICJ1dWlkIjogImZhZDMxNzM5LTA2MzQtNDJjOS1hMDBhLTcwZTY2
MTA3NDA4YiIsICJhY3Rpb25zIjogW10sICJjb250ZW50IjogeyJ4bWwiOiAiPD94bWwgdmVyc2lv
bj1cIjEuMFwiIGVuY29kaW5nPVwiVVRGLThcIj8+PGRlZmluaXRpb25zIHhtbG5zPVwiaHR0cDov
L3d3dy5vbWcub3JnL3NwZWMvQlBNTi8yMDEwMDUyNC9NT0RFTFwiIHhtbG5zOmJwbW5kaT1cImh0
dHA6Ly93d3cub21nLm9yZy9zcGVjL0JQTU4vMjAxMDA1MjQvRElcIiB4bWxuczpvbWdkYz1cImh0
dHA6Ly93d3cub21nLm9yZy9zcGVjL0RELzIwMTAwNTI0L0RDXCIgeG1sbnM6b21nZGk9XCJodHRw
Oi8vd3d3Lm9tZy5vcmcvc3BlYy9ERC8yMDEwMDUyNC9ESVwiIHhtbG5zOnJlc2lsaWVudD1cImh0
dHA6Ly9yZXNpbGllbnQuaWJtLmNvbS9icG1uXCIgeG1sbnM6eHNkPVwiaHR0cDovL3d3dy53My5v
cmcvMjAwMS9YTUxTY2hlbWFcIiB4bWxuczp4c2k9XCJodHRwOi8vd3d3LnczLm9yZy8yMDAxL1hN
TFNjaGVtYS1pbnN0YW5jZVwiIHRhcmdldE5hbWVzcGFjZT1cImh0dHA6Ly93d3cuY2FtdW5kYS5v
cmcvdGVzdFwiPjxwcm9jZXNzIGlkPVwiZXhhbXBsZV9tb2NrX3dvcmtmbG93X29uZVwiIGlzRXhl
Y3V0YWJsZT1cInRydWVcIiBuYW1lPVwiRXhhbXBsZTogTW9jayBXb3JrZmxvdyBPbmVcIj48ZG9j
dW1lbnRhdGlvbj5BbiBleGFtcGxlIHdvcmtmbG93IGZvciBtb2NrIHVuaXQgdGVzdHM8L2RvY3Vt
ZW50YXRpb24+PHN0YXJ0RXZlbnQgaWQ9XCJTdGFydEV2ZW50XzE1NWFzeG1cIj48b3V0Z29pbmc+
U2VxdWVuY2VGbG93XzAxMmxjazA8L291dGdvaW5nPjwvc3RhcnRFdmVudD48c2VydmljZVRhc2sg
aWQ9XCJTZXJ2aWNlVGFza18xOGE5M3hlXCIgbmFtZT1cIm1vY2tfZnVuY3Rpb25fb25lXCIgcmVz
aWxpZW50OnR5cGU9XCJmdW5jdGlvblwiPjxleHRlbnNpb25FbGVtZW50cz48cmVzaWxpZW50OmZ1
bmN0aW9uIHV1aWQ9XCI5NmNjNjQ1YS1iOTc3LTRkYzktOWQxZC1jYWRiNDA4MGJiN2FcIj57XCJp
bnB1dHNcIjp7XCIxNjkzMzg1Mi03MTJjLTRlM2ItYjc4NS03Yzk5NTE0ZTk1NTFcIjp7XCJpbnB1
dF90eXBlXCI6XCJzdGF0aWNcIixcInN0YXRpY19pbnB1dFwiOntcIm11bHRpc2VsZWN0X3ZhbHVl
XCI6W10sXCJ0ZXh0X3ZhbHVlXCI6XCJtb2NrIGlucHV0IHZhbHVlIG9uZVwifX0sXCJhMTYxMjIz
Yy05YzYwLTQ4MGYtOGIyMC01OTc0M2U2NGRkNTNcIjp7XCJpbnB1dF90eXBlXCI6XCJzdGF0aWNc
IixcInN0YXRpY19pbnB1dFwiOntcIm11bHRpc2VsZWN0X3ZhbHVlXCI6W10sXCJudW1iZXJfdmFs
dWVcIjoxMjM0NX19fX08L3Jlc2lsaWVudDpmdW5jdGlvbj48L2V4dGVuc2lvbkVsZW1lbnRzPjxp
bmNvbWluZz5TZXF1ZW5jZUZsb3dfMDEybGNrMDwvaW5jb21pbmc+PG91dGdvaW5nPlNlcXVlbmNl
Rmxvd18wNWw2MzZpPC9vdXRnb2luZz48L3NlcnZpY2VUYXNrPjxlbmRFdmVudCBpZD1cIkVuZEV2
ZW50XzByZmhtZXNcIj48aW5jb21pbmc+U2VxdWVuY2VGbG93XzF0ZWo0bm08L2luY29taW5nPjwv
ZW5kRXZlbnQ+PHNlcXVlbmNlRmxvdyBpZD1cIlNlcXVlbmNlRmxvd18wMTJsY2swXCIgc291cmNl
UmVmPVwiU3RhcnRFdmVudF8xNTVhc3htXCIgdGFyZ2V0UmVmPVwiU2VydmljZVRhc2tfMThhOTN4
ZVwiLz48c2VxdWVuY2VGbG93IGlkPVwiU2VxdWVuY2VGbG93XzA1bDYzNmlcIiBzb3VyY2VSZWY9
XCJTZXJ2aWNlVGFza18xOGE5M3hlXCIgdGFyZ2V0UmVmPVwiU2VydmljZVRhc2tfMWVhdHdtcVwi
Lz48c2VydmljZVRhc2sgaWQ9XCJTZXJ2aWNlVGFza18xZWF0d21xXCIgbmFtZT1cIm1vY2tfZnVu
Y3Rpb25fdHdvXCIgcmVzaWxpZW50OnR5cGU9XCJmdW5jdGlvblwiPjxleHRlbnNpb25FbGVtZW50
cz48cmVzaWxpZW50OmZ1bmN0aW9uIHV1aWQ9XCJkODRhMzliOS00NzI2LTQzOTAtOTI5NC0xNTI1
MTc0ZDQxY2NcIj57XCJpbnB1dHNcIjp7fSxcInBvc3RfcHJvY2Vzc2luZ19zY3JpcHRcIjpcImlu
Y2lkZW50LmFkZE5vdGUoXFxcImEgbW9jayBub3RlIGFkZGVkIGluIHRoZSBwb3N0IHByb2Nlc3Mg
c2NyaXB0XFxcIilcIixcInByZV9wcm9jZXNzaW5nX3NjcmlwdFwiOlwiXFxuaW5wdXRzLm1vY2tf
ZmllbGRfb25lID0gXFxcIm1vY2sgdmFsdWUgZm9yIGZpZWxkIG9uZVxcXCJcXG5cXG5pbnB1dHMu
bW9ja19maWVsZF90d28gPSAxMjM0NVwifTwvcmVzaWxpZW50OmZ1bmN0aW9uPjwvZXh0ZW5zaW9u
RWxlbWVudHM+PGluY29taW5nPlNlcXVlbmNlRmxvd18wNWw2MzZpPC9pbmNvbWluZz48b3V0Z29p
bmc+U2VxdWVuY2VGbG93XzF0ZWo0bm08L291dGdvaW5nPjwvc2VydmljZVRhc2s+PHNlcXVlbmNl
RmxvdyBpZD1cIlNlcXVlbmNlRmxvd18xdGVqNG5tXCIgc291cmNlUmVmPVwiU2VydmljZVRhc2tf
MWVhdHdtcVwiIHRhcmdldFJlZj1cIkVuZEV2ZW50XzByZmhtZXNcIi8+PC9wcm9jZXNzPjxicG1u
ZGk6QlBNTkRpYWdyYW0gaWQ9XCJCUE1ORGlhZ3JhbV8xXCI+PGJwbW5kaTpCUE1OUGxhbmUgYnBt
bkVsZW1lbnQ9XCJ1bmRlZmluZWRcIiBpZD1cIkJQTU5QbGFuZV8xXCI+PGJwbW5kaTpCUE1OU2hh
cGUgYnBtbkVsZW1lbnQ9XCJTdGFydEV2ZW50XzE1NWFzeG1cIiBpZD1cIlN0YXJ0RXZlbnRfMTU1
YXN4bV9kaVwiPjxvbWdkYzpCb3VuZHMgaGVpZ2h0PVwiMzZcIiB3aWR0aD1cIjM2XCIgeD1cIjM5
NlwiIHk9XCI2OVwiLz48YnBtbmRpOkJQTU5MYWJlbD48b21nZGM6Qm91bmRzIGhlaWdodD1cIjBc
IiB3aWR0aD1cIjkwXCIgeD1cIjM5MVwiIHk9XCIxMDRcIi8+PC9icG1uZGk6QlBNTkxhYmVsPjwv
YnBtbmRpOkJQTU5TaGFwZT48YnBtbmRpOkJQTU5TaGFwZSBicG1uRWxlbWVudD1cIlNlcnZpY2VU
YXNrXzE4YTkzeGVcIiBpZD1cIlNlcnZpY2VUYXNrXzE4YTkzeGVfZGlcIj48b21nZGM6Qm91bmRz
IGhlaWdodD1cIjgwXCIgd2lkdGg9XCIxMDBcIiB4PVwiNTUzXCIgeT1cIjQ3XCIvPjwvYnBtbmRp
OkJQTU5TaGFwZT48YnBtbmRpOkJQTU5TaGFwZSBicG1uRWxlbWVudD1cIkVuZEV2ZW50XzByZmht
ZXNcIiBpZD1cIkVuZEV2ZW50XzByZmhtZXNfZGlcIj48b21nZGM6Qm91bmRzIGhlaWdodD1cIjM2
XCIgd2lkdGg9XCIzNlwiIHg9XCIxMDMwXCIgeT1cIjY5XCIvPjxicG1uZGk6QlBNTkxhYmVsPjxv
bWdkYzpCb3VuZHMgaGVpZ2h0PVwiMTNcIiB3aWR0aD1cIjBcIiB4PVwiMTA0OFwiIHk9XCIxMDhc
Ii8+PC9icG1uZGk6QlBNTkxhYmVsPjwvYnBtbmRpOkJQTU5TaGFwZT48YnBtbmRpOkJQTU5FZGdl
IGJwbW5FbGVtZW50PVwiU2VxdWVuY2VGbG93XzAxMmxjazBcIiBpZD1cIlNlcXVlbmNlRmxvd18w
MTJsY2swX2RpXCI+PG9tZ2RpOndheXBvaW50IHg9XCI0MzJcIiB4c2k6dHlwZT1cIm9tZ2RjOlBv
aW50XCIgeT1cIjg3XCIvPjxvbWdkaTp3YXlwb2ludCB4PVwiNTUzXCIgeHNpOnR5cGU9XCJvbWdk
YzpQb2ludFwiIHk9XCI4N1wiLz48YnBtbmRpOkJQTU5MYWJlbD48b21nZGM6Qm91bmRzIGhlaWdo
dD1cIjEzXCIgd2lkdGg9XCIwXCIgeD1cIjQ5Mi41XCIgeT1cIjY1XCIvPjwvYnBtbmRpOkJQTU5M
YWJlbD48L2JwbW5kaTpCUE1ORWRnZT48YnBtbmRpOkJQTU5FZGdlIGJwbW5FbGVtZW50PVwiU2Vx
dWVuY2VGbG93XzA1bDYzNmlcIiBpZD1cIlNlcXVlbmNlRmxvd18wNWw2MzZpX2RpXCI+PG9tZ2Rp
OndheXBvaW50IHg9XCI2NTNcIiB4c2k6dHlwZT1cIm9tZ2RjOlBvaW50XCIgeT1cIjg3XCIvPjxv
bWdkaTp3YXlwb2ludCB4PVwiNzgxXCIgeHNpOnR5cGU9XCJvbWdkYzpQb2ludFwiIHk9XCI4N1wi
Lz48YnBtbmRpOkJQTU5MYWJlbD48b21nZGM6Qm91bmRzIGhlaWdodD1cIjEzXCIgd2lkdGg9XCIw
XCIgeD1cIjcxN1wiIHk9XCI2NS41XCIvPjwvYnBtbmRpOkJQTU5MYWJlbD48L2JwbW5kaTpCUE1O
RWRnZT48YnBtbmRpOkJQTU5TaGFwZSBicG1uRWxlbWVudD1cIlNlcnZpY2VUYXNrXzFlYXR3bXFc
IiBpZD1cIlNlcnZpY2VUYXNrXzFlYXR3bXFfZGlcIj48b21nZGM6Qm91bmRzIGhlaWdodD1cIjgw
XCIgd2lkdGg9XCIxMDBcIiB4PVwiNzgxXCIgeT1cIjQ3XCIvPjwvYnBtbmRpOkJQTU5TaGFwZT48
YnBtbmRpOkJQTU5FZGdlIGJwbW5FbGVtZW50PVwiU2VxdWVuY2VGbG93XzF0ZWo0bm1cIiBpZD1c
IlNlcXVlbmNlRmxvd18xdGVqNG5tX2RpXCI+PG9tZ2RpOndheXBvaW50IHg9XCI4ODFcIiB4c2k6
dHlwZT1cIm9tZ2RjOlBvaW50XCIgeT1cIjg3XCIvPjxvbWdkaTp3YXlwb2ludCB4PVwiMTAzMFwi
IHhzaTp0eXBlPVwib21nZGM6UG9pbnRcIiB5PVwiODdcIi8+PGJwbW5kaTpCUE1OTGFiZWw+PG9t
Z2RjOkJvdW5kcyBoZWlnaHQ9XCIxM1wiIHdpZHRoPVwiMFwiIHg9XCI5NTUuNVwiIHk9XCI2NVwi
Lz48L2JwbW5kaTpCUE1OTGFiZWw+PC9icG1uZGk6QlBNTkVkZ2U+PC9icG1uZGk6QlBNTlBsYW5l
PjwvYnBtbmRpOkJQTU5EaWFncmFtPjwvZGVmaW5pdGlvbnM+IiwgIndvcmtmbG93X2lkIjogImV4
YW1wbGVfbW9ja193b3JrZmxvd19vbmUiLCAidmVyc2lvbiI6IDN9LCAiY3JlYXRvcl9pZCI6ICJh
ZG1pbkBleGFtcGxlLmNvbSIsICJsYXN0X21vZGlmaWVkX2J5IjogImFkbWluQGV4YW1wbGUuY29t
IiwgImxhc3RfbW9kaWZpZWRfdGltZSI6IDE1NTgwODMzNjcwNzEsICJjb250ZW50X3ZlcnNpb24i
OiAzLCAicHJvZ3JhbW1hdGljX25hbWUiOiAiZXhhbXBsZV9tb2NrX3dvcmtmbG93X29uZSIsICJu
YW1lIjogIkV4YW1wbGU6IE1vY2sgV29ya2Zsb3cgT25lIn1dLCAiYWN0aW9ucyI6IFt7InRpbWVv
dXRfc2Vjb25kcyI6IDg2NDAwLCAib2JqZWN0X3R5cGUiOiAiaW5jaWRlbnQiLCAidHlwZSI6IDAs
ICJuYW1lIjogIk1vY2sgQXV0byBSdWxlIiwgInRhZ3MiOiBbXSwgInZpZXdfaXRlbXMiOiBbXSwg
ImVuYWJsZWQiOiB0cnVlLCAid29ya2Zsb3dzIjogWyJleGFtcGxlX21vY2tfd29ya2Zsb3dfb25l
Il0sICJsb2dpY190eXBlIjogImFsbCIsICJleHBvcnRfa2V5IjogIk1vY2sgQXV0byBSdWxlIiwg
InV1aWQiOiAiNWI2MGE5ODMtOWM1YS00MWY0LWI2OGItYWU2NGYwMzhiNzEyIiwgImF1dG9tYXRp
b25zIjogW10sICJjb25kaXRpb25zIjogW3sidHlwZSI6IG51bGwsICJldmFsdWF0aW9uX2lkIjog
bnVsbCwgImZpZWxkX25hbWUiOiBudWxsLCAibWV0aG9kIjogIm9iamVjdF9hZGRlZCIsICJ2YWx1
ZSI6IG51bGx9XSwgImlkIjogMjEsICJtZXNzYWdlX2Rlc3RpbmF0aW9ucyI6IFtdfSwgeyJ0aW1l
b3V0X3NlY29uZHMiOiA4NjQwMCwgIm9iamVjdF90eXBlIjogImluY2lkZW50IiwgInR5cGUiOiAx
LCAibmFtZSI6ICJNb2NrIE1hbnVhbCBSdWxlIiwgInRhZ3MiOiBbXSwgInZpZXdfaXRlbXMiOiBb
eyJzaG93X2lmIjogbnVsbCwgImZpZWxkX3R5cGUiOiAiYWN0aW9uaW52b2NhdGlvbiIsICJzaG93
X2xpbmtfaGVhZGVyIjogZmFsc2UsICJlbGVtZW50IjogImZpZWxkX3V1aWQiLCAiY29udGVudCI6
ICJmMWQ1NDE5Zi1kYjg5LTRiNmMtYTdmMC0xMDU1OWE2ZTM2YzIiLCAic3RlcF9sYWJlbCI6IG51
bGx9XSwgImVuYWJsZWQiOiB0cnVlLCAid29ya2Zsb3dzIjogWyJleGFtcGxlX21vY2tfd29ya2Zs
b3dfb25lIl0sICJsb2dpY190eXBlIjogImFsbCIsICJleHBvcnRfa2V5IjogIk1vY2sgTWFudWFs
IFJ1bGUiLCAidXVpZCI6ICI2NGFhMzhjMS02ZmNlLTQzYjktYjc4Zi0zMjNhMTU2OGYyNDQiLCAi
YXV0b21hdGlvbnMiOiBbXSwgImNvbmRpdGlvbnMiOiBbXSwgImlkIjogMjAsICJtZXNzYWdlX2Rl
c3RpbmF0aW9ucyI6IFtdfV0sICJsYXlvdXRzIjogW10sICJleHBvcnRfZm9ybWF0X3ZlcnNpb24i
OiAyLCAiaWQiOiAxLCAiaW5kdXN0cmllcyI6IG51bGwsICJmdW5jdGlvbnMiOiBbeyJkaXNwbGF5
X25hbWUiOiAibW9ja19mdW5jdGlvbl9vbmUiLCAiZGVzY3JpcHRpb24iOiB7ImNvbnRlbnQiOiAi
bW9ja19kZXNjcmlwdGlvbiIsICJmb3JtYXQiOiAidGV4dCJ9LCAiY3JlYXRvciI6IHsidHlwZSI6
ICJ1c2VyIiwgImRpc3BsYXlfbmFtZSI6ICJBZG1pbiBVc2VyIiwgImlkIjogNzEsICJuYW1lIjog
ImFkbWluQGV4YW1wbGUuY29tIn0sICJ2aWV3X2l0ZW1zIjogW3sic2hvd19pZiI6IG51bGwsICJm
aWVsZF90eXBlIjogIl9fZnVuY3Rpb24iLCAic2hvd19saW5rX2hlYWRlciI6IGZhbHNlLCAiZWxl
bWVudCI6ICJmaWVsZF91dWlkIiwgImNvbnRlbnQiOiAiMTY5MzM4NTItNzEyYy00ZTNiLWI3ODUt
N2M5OTUxNGU5NTUxIiwgInN0ZXBfbGFiZWwiOiBudWxsfSwgeyJzaG93X2lmIjogbnVsbCwgImZp
ZWxkX3R5cGUiOiAiX19mdW5jdGlvbiIsICJzaG93X2xpbmtfaGVhZGVyIjogZmFsc2UsICJlbGVt
ZW50IjogImZpZWxkX3V1aWQiLCAiY29udGVudCI6ICJhMTYxMjIzYy05YzYwLTQ4MGYtOGIyMC01
OTc0M2U2NGRkNTMiLCAic3RlcF9sYWJlbCI6IG51bGx9XSwgInRhZ3MiOiBbXSwgImV4cG9ydF9r
ZXkiOiAibW9ja19mdW5jdGlvbl9vbmUiLCAidXVpZCI6ICI5NmNjNjQ1YS1iOTc3LTRkYzktOWQx
ZC1jYWRiNDA4MGJiN2EiLCAibGFzdF9tb2RpZmllZF9ieSI6IHsidHlwZSI6ICJ1c2VyIiwgImRp
c3BsYXlfbmFtZSI6ICJBZG1pbiBVc2VyIiwgImlkIjogNzEsICJuYW1lIjogImFkbWluQGV4YW1w
bGUuY29tIn0sICJ2ZXJzaW9uIjogMSwgIndvcmtmbG93cyI6IFt7InByb2dyYW1tYXRpY19uYW1l
IjogImV4YW1wbGVfbW9ja193b3JrZmxvd19vbmUiLCAidGFncyI6IFtdLCAib2JqZWN0X3R5cGUi
OiAiaW5jaWRlbnQiLCAidXVpZCI6IG51bGwsICJhY3Rpb25zIjogW10sICJuYW1lIjogIkV4YW1w
bGU6IE1vY2sgV29ya2Zsb3cgT25lIiwgIndvcmtmbG93X2lkIjogNywgImRlc2NyaXB0aW9uIjog
bnVsbH1dLCAibGFzdF9tb2RpZmllZF90aW1lIjogMTU1ODA4MzE1NzE3OSwgImRlc3RpbmF0aW9u
X2hhbmRsZSI6ICJmbl9tb2NrX2ludGVncmF0aW9uIiwgImlkIjogNDAsICJuYW1lIjogIm1vY2tf
ZnVuY3Rpb25fb25lIn0sIHsiZGlzcGxheV9uYW1lIjogIm1vY2tfZnVuY3Rpb25fdHdvIiwgImRl
c2NyaXB0aW9uIjogeyJjb250ZW50IjogIm1vY2sgZGVzY3JpcHRpb24gdHdvIiwgImZvcm1hdCI6
ICJ0ZXh0In0sICJjcmVhdG9yIjogeyJ0eXBlIjogInVzZXIiLCAiZGlzcGxheV9uYW1lIjogIkFk
bWluIFVzZXIiLCAiaWQiOiA3MSwgIm5hbWUiOiAiYWRtaW5AZXhhbXBsZS5jb20ifSwgInZpZXdf
aXRlbXMiOiBbeyJzaG93X2lmIjogbnVsbCwgImZpZWxkX3R5cGUiOiAiX19mdW5jdGlvbiIsICJz
aG93X2xpbmtfaGVhZGVyIjogZmFsc2UsICJlbGVtZW50IjogImZpZWxkX3V1aWQiLCAiY29udGVu
dCI6ICIxNjkzMzg1Mi03MTJjLTRlM2ItYjc4NS03Yzk5NTE0ZTk1NTEiLCAic3RlcF9sYWJlbCI6
IG51bGx9LCB7InNob3dfaWYiOiBudWxsLCAiZmllbGRfdHlwZSI6ICJfX2Z1bmN0aW9uIiwgInNo
b3dfbGlua19oZWFkZXIiOiBmYWxzZSwgImVsZW1lbnQiOiAiZmllbGRfdXVpZCIsICJjb250ZW50
IjogImExNjEyMjNjLTljNjAtNDgwZi04YjIwLTU5NzQzZTY0ZGQ1MyIsICJzdGVwX2xhYmVsIjog
bnVsbH1dLCAidGFncyI6IFtdLCAiZXhwb3J0X2tleSI6ICJtb2NrX2Z1bmN0aW9uX3R3byIsICJ1
dWlkIjogImQ4NGEzOWI5LTQ3MjYtNDM5MC05Mjk0LTE1MjUxNzRkNDFjYyIsICJsYXN0X21vZGlm
aWVkX2J5IjogeyJ0eXBlIjogInVzZXIiLCAiZGlzcGxheV9uYW1lIjogIkFkbWluIFVzZXIiLCAi
aWQiOiA3MSwgIm5hbWUiOiAiYWRtaW5AZXhhbXBsZS5jb20ifSwgInZlcnNpb24iOiAxLCAid29y
a2Zsb3dzIjogW3sicHJvZ3JhbW1hdGljX25hbWUiOiAiZXhhbXBsZV9tb2NrX3dvcmtmbG93X29u
ZSIsICJ0YWdzIjogW10sICJvYmplY3RfdHlwZSI6ICJpbmNpZGVudCIsICJ1dWlkIjogbnVsbCwg
ImFjdGlvbnMiOiBbXSwgIm5hbWUiOiAiRXhhbXBsZTogTW9jayBXb3JrZmxvdyBPbmUiLCAid29y
a2Zsb3dfaWQiOiA3LCAiZGVzY3JpcHRpb24iOiBudWxsfV0sICJsYXN0X21vZGlmaWVkX3RpbWUi
OiAxNTU4MDgzMTg4NTQ5LCAiZGVzdGluYXRpb25faGFuZGxlIjogImZuX21vY2tfaW50ZWdyYXRp
b24iLCAiaWQiOiA0MSwgIm5hbWUiOiAibW9ja19mdW5jdGlvbl90d28ifV0sICJhY3Rpb25fb3Jk
ZXIiOiBbXSwgImdlb3MiOiBudWxsLCAidGFza19vcmRlciI6IFtdLCAidHlwZXMiOiBbeyJwcm9w
ZXJ0aWVzIjogeyJmb3Jfd2hvIjogW10sICJjYW5fZGVzdHJveSI6IGZhbHNlLCAiY2FuX2NyZWF0
ZSI6IGZhbHNlfSwgImZvcl93b3JrZmxvd3MiOiBmYWxzZSwgImRpc3BsYXlfbmFtZSI6ICJNb2Nr
IERhdGEgVGFibGUiLCAidXVpZCI6ICIyYzg0ZDM5YS04YzJlLTRjNmUtYTlmMC1iNjI5NGJkNGMy
Y2IiLCAidGFncyI6IFtdLCAiZmllbGRzIjogeyJtb2NrX2NvbF8yIjogeyJvcGVyYXRpb25zIjog
W10sICJ0eXBlX2lkIjogMTAwMSwgIm9wZXJhdGlvbl9wZXJtcyI6IHt9LCAidGV4dCI6ICJtb2Nr
IGNvbCAyIiwgImJsYW5rX29wdGlvbiI6IGZhbHNlLCAicHJlZml4IjogbnVsbCwgImNoYW5nZWFi
bGUiOiB0cnVlLCAiaWQiOiAyMzQsICJyZWFkX29ubHkiOiBmYWxzZSwgInV1aWQiOiAiNWNiMjgz
OWItMzhjZi00MTk0LThkNTMtYWQ5YzE5OWY0MjY2IiwgImNob3NlbiI6IGZhbHNlLCAiaW5wdXRf
dHlwZSI6ICJ0ZXh0IiwgInRvb2x0aXAiOiAiIiwgIndpZHRoIjogMzMxLCAiaW50ZXJuYWwiOiBm
YWxzZSwgInJpY2hfdGV4dCI6IGZhbHNlLCAidGVtcGxhdGVzIjogW10sICJ0YWdzIjogW10sICJh
bGxvd19kZWZhdWx0X3ZhbHVlIjogZmFsc2UsICJleHBvcnRfa2V5IjogIm1vY2tfZGF0YV90YWJs
ZS9tb2NrX2NvbF8yIiwgImhpZGVfbm90aWZpY2F0aW9uIjogZmFsc2UsICJwbGFjZWhvbGRlciI6
ICIiLCAiZGVmYXVsdF9jaG9zZW5fYnlfc2VydmVyIjogZmFsc2UsICJuYW1lIjogIm1vY2tfY29s
XzIiLCAiZGVwcmVjYXRlZCI6IGZhbHNlLCAiY2FsY3VsYXRlZCI6IGZhbHNlLCAidmFsdWVzIjog
W10sICJvcmRlciI6IDF9LCAibW9ja19jb2xfMSI6IHsib3BlcmF0aW9ucyI6IFtdLCAidHlwZV9p
ZCI6IDEwMDEsICJvcGVyYXRpb25fcGVybXMiOiB7fSwgInRleHQiOiAibW9jayBjb2wgMSIsICJi
bGFua19vcHRpb24iOiBmYWxzZSwgInByZWZpeCI6IG51bGwsICJjaGFuZ2VhYmxlIjogdHJ1ZSwg
ImlkIjogMjMzLCAicmVhZF9vbmx5IjogZmFsc2UsICJ1dWlkIjogImI1M2FhNmE1LTA0N2YtNDY3
Yi1hNWY3LWMyMjYxYmNkOTEyMSIsICJjaG9zZW4iOiBmYWxzZSwgImlucHV0X3R5cGUiOiAidGV4
dCIsICJ0b29sdGlwIjogIiIsICJ3aWR0aCI6IDMzMCwgImludGVybmFsIjogZmFsc2UsICJyaWNo
X3RleHQiOiBmYWxzZSwgInRlbXBsYXRlcyI6IFtdLCAidGFncyI6IFtdLCAiYWxsb3dfZGVmYXVs
dF92YWx1ZSI6IGZhbHNlLCAiZXhwb3J0X2tleSI6ICJtb2NrX2RhdGFfdGFibGUvbW9ja19jb2xf
MSIsICJoaWRlX25vdGlmaWNhdGlvbiI6IGZhbHNlLCAicGxhY2Vob2xkZXIiOiAiIiwgImRlZmF1
bHRfY2hvc2VuX2J5X3NlcnZlciI6IGZhbHNlLCAibmFtZSI6ICJtb2NrX2NvbF8xIiwgImRlcHJl
Y2F0ZWQiOiBmYWxzZSwgImNhbGN1bGF0ZWQiOiBmYWxzZSwgInZhbHVlcyI6IFtdLCAib3JkZXIi
OiAwfX0sICJwYXJlbnRfdHlwZXMiOiBbImluY2lkZW50Il0sICJ0eXBlX2lkIjogOCwgImV4cG9y
dF9rZXkiOiAibW9ja19kYXRhX3RhYmxlIiwgImZvcl9jdXN0b21fZmllbGRzIjogZmFsc2UsICJh
Y3Rpb25zIjogW10sICJpZCI6IG51bGwsICJmb3JfYWN0aW9ucyI6IGZhbHNlLCAic2NyaXB0cyI6
IFtdLCAidHlwZV9uYW1lIjogIm1vY2tfZGF0YV90YWJsZSIsICJmb3Jfbm90aWZpY2F0aW9ucyI6
IGZhbHNlfV0sICJ0aW1lZnJhbWVzIjogbnVsbCwgIndvcmtzcGFjZXMiOiBbXSwgInRhZ3MiOiBb
XSwgImF1dG9tYXRpY190YXNrcyI6IFtdLCAicGhhc2VzIjogW10sICJub3RpZmljYXRpb25zIjog
bnVsbCwgInJlZ3VsYXRvcnMiOiBudWxsLCAiaW5jaWRlbnRfdHlwZXMiOiBbeyJjcmVhdGVfZGF0
ZSI6IDE1NTgwODM4MDk1MzgsICJkZXNjcmlwdGlvbiI6ICJDdXN0b21pemF0aW9uIFBhY2thZ2Vz
IChpbnRlcm5hbCkiLCAiZXhwb3J0X2tleSI6ICJDdXN0b21pemF0aW9uIFBhY2thZ2VzIChpbnRl
cm5hbCkiLCAiaWQiOiAwLCAibmFtZSI6ICJDdXN0b21pemF0aW9uIFBhY2thZ2VzIChpbnRlcm5h
bCkiLCAidXBkYXRlX2RhdGUiOiAxNTU4MDgzODA5NTM4LCAidXVpZCI6ICJiZmVlYzJkNC0zNzcw
LTExZTgtYWQzOS00YTAwMDQwNDRhYTAiLCAiZW5hYmxlZCI6IGZhbHNlLCAic3lzdGVtIjogZmFs
c2UsICJwYXJlbnRfaWQiOiBudWxsLCAiaGlkZGVuIjogZmFsc2V9XSwgInNjcmlwdHMiOiBbXSwg
InNlcnZlcl92ZXJzaW9uIjogeyJtYWpvciI6IDMzLCAidmVyc2lvbiI6ICIzMy4wLjAiLCAiYnVp
bGRfbnVtYmVyIjogMCwgIm1pbm9yIjogMH0sICJtZXNzYWdlX2Rlc3RpbmF0aW9ucyI6IFt7InBy
b2dyYW1tYXRpY19uYW1lIjogImZuX21vY2tfaW50ZWdyYXRpb24iLCAidGFncyI6IFtdLCAiZXhw
b3J0X2tleSI6ICJmbl9tb2NrX2ludGVncmF0aW9uIiwgInV1aWQiOiAiODE5NTExNzYtYjdjMy00
ZWNkLWJlYWItOGU0ZmUyMzE0MjZiIiwgImV4cGVjdF9hY2siOiB0cnVlLCAiZGVzdGluYXRpb25f
dHlwZSI6IDAsICJ1c2VycyI6IFtdLCAiYXBpX2tleXMiOiBbXSwgIm5hbWUiOiAiZm5fbW9ja19p
bnRlZ3JhdGlvbiJ9XSwgImluY2lkZW50X2FydGlmYWN0X3R5cGVzIjogW10sICJyb2xlcyI6IFtd
LCAiZmllbGRzIjogW3sib3BlcmF0aW9ucyI6IFtdLCAidHlwZV9pZCI6IDExLCAib3BlcmF0aW9u
X3Blcm1zIjoge30sICJ0ZXh0IjogIm1vY2tfZmllbGRfb25lIiwgImJsYW5rX29wdGlvbiI6IGZh
bHNlLCAicHJlZml4IjogbnVsbCwgImNoYW5nZWFibGUiOiB0cnVlLCAiaWQiOiAyMzAsICJyZWFk
X29ubHkiOiBmYWxzZSwgInV1aWQiOiAiMTY5MzM4NTItNzEyYy00ZTNiLWI3ODUtN2M5OTUxNGU5
NTUxIiwgImNob3NlbiI6IGZhbHNlLCAiaW5wdXRfdHlwZSI6ICJ0ZXh0IiwgInRvb2x0aXAiOiAi
bW9ja190b29sdGlwIiwgImludGVybmFsIjogZmFsc2UsICJyaWNoX3RleHQiOiBmYWxzZSwgInRl
bXBsYXRlcyI6IFtdLCAidGFncyI6IFtdLCAiYWxsb3dfZGVmYXVsdF92YWx1ZSI6IGZhbHNlLCAi
ZXhwb3J0X2tleSI6ICJfX2Z1bmN0aW9uL21vY2tfZmllbGRfb25lIiwgImhpZGVfbm90aWZpY2F0
aW9uIjogZmFsc2UsICJwbGFjZWhvbGRlciI6ICJtb2NrX3BsYWNlaG9sZGVyIiwgIm5hbWUiOiAi
bW9ja19maWVsZF9vbmUiLCAiZGVwcmVjYXRlZCI6IGZhbHNlLCAiY2FsY3VsYXRlZCI6IGZhbHNl
LCAicmVxdWlyZWQiOiAiYWx3YXlzIiwgInZhbHVlcyI6IFtdLCAiZGVmYXVsdF9jaG9zZW5fYnlf
c2VydmVyIjogZmFsc2V9LCB7Im9wZXJhdGlvbnMiOiBbXSwgInR5cGVfaWQiOiAxMSwgIm9wZXJh
dGlvbl9wZXJtcyI6IHt9LCAidGV4dCI6ICJtb2NrX2ZpZWxkX3R3byIsICJibGFua19vcHRpb24i
OiBmYWxzZSwgInByZWZpeCI6IG51bGwsICJjaGFuZ2VhYmxlIjogdHJ1ZSwgImlkIjogMjMxLCAi
cmVhZF9vbmx5IjogZmFsc2UsICJ1dWlkIjogImExNjEyMjNjLTljNjAtNDgwZi04YjIwLTU5NzQz
ZTY0ZGQ1MyIsICJjaG9zZW4iOiBmYWxzZSwgImlucHV0X3R5cGUiOiAibnVtYmVyIiwgInRvb2x0
aXAiOiAibW9ja190b29sdGlwX3R3byIsICJpbnRlcm5hbCI6IGZhbHNlLCAicmljaF90ZXh0Ijog
ZmFsc2UsICJ0ZW1wbGF0ZXMiOiBbXSwgInRhZ3MiOiBbXSwgImFsbG93X2RlZmF1bHRfdmFsdWUi
OiBmYWxzZSwgImV4cG9ydF9rZXkiOiAiX19mdW5jdGlvbi9tb2NrX2ZpZWxkX3R3byIsICJoaWRl
X25vdGlmaWNhdGlvbiI6IGZhbHNlLCAicGxhY2Vob2xkZXIiOiAibW9ja19wbGFjZWhvbGRlcl90
d28iLCAibmFtZSI6ICJtb2NrX2ZpZWxkX3R3byIsICJkZXByZWNhdGVkIjogZmFsc2UsICJjYWxj
dWxhdGVkIjogZmFsc2UsICJyZXF1aXJlZCI6ICJhbHdheXMiLCAidmFsdWVzIjogW10sICJkZWZh
dWx0X2Nob3Nlbl9ieV9zZXJ2ZXIiOiBmYWxzZX0sIHsib3BlcmF0aW9ucyI6IFtdLCAidHlwZV9p
ZCI6IDYsICJvcGVyYXRpb25fcGVybXMiOiB7fSwgInRleHQiOiAiTW9jayBBY3Rpdml0eSBGaWVs
ZCBPbmUiLCAiYmxhbmtfb3B0aW9uIjogZmFsc2UsICJwcmVmaXgiOiAicHJvcGVydGllcyIsICJj
aGFuZ2VhYmxlIjogdHJ1ZSwgImlkIjogMjMyLCAicmVhZF9vbmx5IjogZmFsc2UsICJ1dWlkIjog
ImYxZDU0MTlmLWRiODktNGI2Yy1hN2YwLTEwNTU5YTZlMzZjMiIsICJjaG9zZW4iOiBmYWxzZSwg
ImlucHV0X3R5cGUiOiAidGV4dCIsICJ0b29sdGlwIjogIiIsICJpbnRlcm5hbCI6IGZhbHNlLCAi
cmljaF90ZXh0IjogZmFsc2UsICJ0ZW1wbGF0ZXMiOiBbXSwgInRhZ3MiOiBbXSwgImFsbG93X2Rl
ZmF1bHRfdmFsdWUiOiBmYWxzZSwgImV4cG9ydF9rZXkiOiAiYWN0aW9uaW52b2NhdGlvbi9tb2Nr
X2FjdGl2aXR5X2ZpZWxkX29uZSIsICJoaWRlX25vdGlmaWNhdGlvbiI6IGZhbHNlLCAicGxhY2Vo
b2xkZXIiOiAibW9jayBwbGFjZWhvbGRlciIsICJuYW1lIjogIm1vY2tfYWN0aXZpdHlfZmllbGRf
b25lIiwgImRlcHJlY2F0ZWQiOiBmYWxzZSwgImNhbGN1bGF0ZWQiOiBmYWxzZSwgInZhbHVlcyI6
IFtdLCAiZGVmYXVsdF9jaG9zZW5fYnlfc2VydmVyIjogZmFsc2V9LCB7Im9wZXJhdGlvbnMiOiBb
XSwgInR5cGVfaWQiOiAwLCAib3BlcmF0aW9uX3Blcm1zIjoge30sICJ0ZXh0IjogIm1vY2sgaW5j
aWRlbnQgZmllbGQiLCAiYmxhbmtfb3B0aW9uIjogZmFsc2UsICJwcmVmaXgiOiAicHJvcGVydGll
cyIsICJjaGFuZ2VhYmxlIjogdHJ1ZSwgImlkIjogMjM1LCAicmVhZF9vbmx5IjogZmFsc2UsICJ1
dWlkIjogImY5MGI4Mzg2LTJmZDUtNDg2Ny05ZmM4LWY1MjM4ZWYzYjZmYyIsICJjaG9zZW4iOiBm
YWxzZSwgImlucHV0X3R5cGUiOiAidGV4dCIsICJ0b29sdGlwIjogIiIsICJpbnRlcm5hbCI6IGZh
bHNlLCAicmljaF90ZXh0IjogZmFsc2UsICJ0ZW1wbGF0ZXMiOiBbXSwgInRhZ3MiOiBbXSwgImFs
bG93X2RlZmF1bHRfdmFsdWUiOiBmYWxzZSwgImV4cG9ydF9rZXkiOiAiaW5jaWRlbnQvbW9ja19p
bmNpZGVudF9maWVsZCIsICJoaWRlX25vdGlmaWNhdGlvbiI6IGZhbHNlLCAicGxhY2Vob2xkZXIi
OiAiIiwgIm5hbWUiOiAibW9ja19pbmNpZGVudF9maWVsZCIsICJkZXByZWNhdGVkIjogZmFsc2Us
ICJjYWxjdWxhdGVkIjogZmFsc2UsICJ2YWx1ZXMiOiBbXSwgImRlZmF1bHRfY2hvc2VuX2J5X3Nl
cnZlciI6IGZhbHNlfV0sICJvdmVycmlkZXMiOiBbXSwgImV4cG9ydF9kYXRlIjogMTU1ODA4Mzgw
ODAyOX0=
"""
)
| 5,347,706 |
def element_norm_spatial_exoao(processes,
comp_sol,
test_time,
test_var_list,
exact_solution,
subel_ints = 1,
zfill=None,
exact_time=None,
block_ids=[]):
"""
This is element_norm_spatial but input solution types are limited. An
exodus.ExodusFile object is expected for the computed solution, and an
analytic solution object is expected for the exact solution.
if exact_time is not given, the exact_solution is evaluated at test_time
"""
# Accept an exodus object as the computed solution.
if not isinstance(comp_sol, exodus.ExodusFile):
# Unrecognized type
print "Computed solution is not a recognized type."
print "It should be either an exodus.ExodusFile object."
sys.exit(1)
# Get the (1-based) index of the time for the computed solution
comp_t_idx1 = find_time_index(comp_sol, test_time)
# The (0-based) index of the variable in the computed solution
comp_var_idx0 = comp_sol.findVar(exodus.EX_ELEM_BLOCK,
test_var_list[0])
# Add error checking for test_var_list?
# If no list of block ids is given, generate a list including all blocks
if block_ids == []:
for block_idx0 in range(comp_sol.getNumber(exodus.EX_ELEM_BLOCK)):
block_ids.append(comp_sol.getId(exodus.EX_ELEM_BLOCK, block_idx0) )
# Accept a solution object as the exact solution
if hasattr(exact_solution, test_var_list[1]):
exact_sol = exact_solution
# If not overridden by exact_time argument, ensure the
# analytic solution time matches the simulation data time
if exact_time == None:
exact_time = comp_sol.getTimes()[comp_t_idx1 - 1]
# Refer directly to the attribute (method) we want
func_direct = getattr(exact_sol, test_var_list[1])
# Get nodal coords here rather than over and over for each element block
# for subel_ints == 1 restructure after computing center coordinates,
# which happens in the block loop
current_coordinates = get_current_coordinates(comp_sol, comp_t_idx1)
if subel_ints > 1:
restructured_coords = restructure_coordinates(current_coordinates)
else:
# Unrecognized type
print "Exact solution is not a recognized type."
print "It should be an analytic solution object."
sys.exit(1)
# Initialize
varET = WeightedErrorTally()
######## The work proper ########
for block_id in block_ids:
element_volumes = get_element_volumes(comp_sol,
block_id,
comp_t_idx1)
comp_var = comp_sol.readVar(comp_t_idx1,
exodus.EX_ELEM_BLOCK,
block_id,
comp_var_idx0)
exact_var = array.array('d')
# exact solution will be calculated from a function
if subel_ints == 1:
# Evaluate the exact solution at the center of the element
ctr_coords = comp_sol.computeCenters(exodus.EX_ELEM_BLOCK,
block_id,
current_coordinates)
# Have to add the fill here because computeCenters knows
# the true number of dimensions
if comp_sol.getDimension()==2 and not zfill==None:
x2_fill = array.array(comp_sol.storageType())
for i in range(len(ctr_coords[0])):
x2_fill.append(zfill)
ctr_coords.append(x2_fill)
r_coords = restructure_coordinates(ctr_coords)
len_r_coords = len(r_coords)
if processes <= 2:
# No point in parallelizing for 2 processes, since only 1 child process would be created.
exact_var = map_func(func_direct, 0, len_r_coords, r_coords, exact_time)
else:
child_processes = processes - 1
exact_var = [None for i in range(len_r_coords)]
pipes = [(None, None) for i in range(child_processes)]
process_list = [None for i in range(child_processes)]
for process_number in range(child_processes):
idx_start = (process_number * len_r_coords) / child_processes
idx_end = ((process_number+1) * len_r_coords) / child_processes
pipes[process_number] = multiprocessing.Pipe(False)
p = multiprocessing.Process(target=map_func_parallel, args=(pipes[process_number][1], func_direct, idx_start, idx_end, r_coords, exact_time,))
process_list[process_number] = p
p.start()
for process_number in range(child_processes):
p = process_list[process_number]
idx_start = (process_number * len_r_coords) / child_processes
idx_end = ((process_number+1) * len_r_coords) / child_processes
conn_obj = pipes[process_number][0]
exact_var_local = conn_obj.recv()
for idx in range(idx_start, idx_end):
exact_var[idx] = exact_var_local[idx - idx_start]
conn_obj.close()
p.join()
else:
avg_evar_on_block(processes,
comp_sol,
block_id,
comp_t_idx1,
restructured_coords,
func_direct,
subel_ints,
zfill,
evar_array = exact_var)
varET.w_accumulate(exact_var, comp_var, element_volumes)
return varET
| 5,347,707 |
def generate_close_coordinates(
draw: st.DrawFn, prev_coord: Coordinates[str, np.float64]
) -> Coordinates[str, np.float64]:
"""Create coordinates using Hypothesis."""
diff = [
draw(hynp.from_dtype(np.dtype(np.float64), min_value=0.1, max_value=1.0)),
draw(hynp.from_dtype(np.dtype(np.float64), min_value=0.1, max_value=1.0)),
draw(hynp.from_dtype(np.dtype(np.float64), min_value=0.1, max_value=1.0)),
draw(hynp.from_dtype(np.dtype(np.float64), min_value=0.1, max_value=1.0)),
draw(hynp.from_dtype(np.dtype(np.float64), min_value=0.1, max_value=1.0)),
draw(hynp.from_dtype(np.dtype(np.float64), min_value=0.1, max_value=1.0)),
]
coord = vectorize(prev_coord) + diff
formatted: Iterator[np.float64] = (np.float64(i) for i in coord)
return dict(zip(SIXAXES, formatted))
| 5,347,708 |
def redistribute_vertices(
geom: Union[LineString, MultiLineString],
distance: float
) -> Union[LineString, MultiLineString]:
"""Redistribute the vertices of input line strings
Parameters
----------
geom : LineString or MultiLineString
Input line strings whose vertices is to be redistributed.
distance : float
The distance to be used for redistribution.
Returns
-------
LineString or MultiLineString
The resulting line strings with redistributed vertices.
Raises
------
ValueError
If input geometry is not LineString or MultiLineString.
"""
if geom.geom_type == 'LineString': # pylint: disable=R1705
num_vert = int(round(geom.length / distance))
if num_vert == 0:
num_vert = 1
return LineString(
[geom.interpolate(float(n) / num_vert, normalized=True)
for n in range(num_vert + 1)])
elif geom.geom_type == 'MultiLineString':
parts = [redistribute_vertices(part, distance)
for part in geom]
return type(geom)([p for p in parts if not p.is_empty])
raise ValueError(f'unhandled geometry {geom.geom_type}')
| 5,347,709 |
def test_predict_is_intervals_bbvi():
"""
Tests prediction intervals are ordered correctly
"""
model = pf.GASReg(formula="y ~ x1", data=data, family=pf.Poisson())
x = model.fit('BBVI', iterations=100)
predictions = model.predict_is(h=10, intervals=True)
assert(np.all(predictions['99% Prediction Interval'].values >= predictions['95% Prediction Interval'].values))
assert(np.all(predictions['95% Prediction Interval'].values >= predictions['5% Prediction Interval'].values))
assert(np.all(predictions['5% Prediction Interval'].values >= predictions['1% Prediction Interval'].values))
| 5,347,710 |
def test_sanity(ec2_resource, args):
"""Test if env vars are set, key exists, and can access ec2"""
if args.profile is None:
for var in mand_vars:
if os.environ.get(var) is None:
print(var + ' must be set as an evironment variable. \nExiting.')
exit(1)
if not os.path.exists(args.key_path):
print('Unable to see your key: {}, exiting now :-('.format(args.key_path))
exit(1)
try:
ec2_resource.instances.all().__iter__().__next__()
except botocore.exceptions.ClientError as expn:
print(expn)
print(perm_error)
exit(1)
| 5,347,711 |
def get_boolean_value(value):
"""Get the boolean value of the ParameterValue."""
if value.type == ParameterType.PARAMETER_BOOL:
return value.bool_value
else:
raise ValueError('Expected boolean value.')
| 5,347,712 |
def test_update_3():
"""
does not update if input is incorrect
"""
table_name = "table_2"
table_name_2 = "table_1"
result_1 = DB.update(
table_name=table_name,
fields={
"field_1": "2",
"field_12": "New Field"
},
where={
"field_8": "1"
}
)
result_2 = DB.update(
table_name=table_name_2,
fields={
"field_1": "2",
"field_12": "New Field"
},
where={
"field_8": "1"
}
)
result_3 = DB.update(
table_name=table_name_2,
fields={
"field_1": "2",
"field_12": "New Field"
},
where={
"field_1": "1"
}
)
result_4 = DB.update(
table_name=table_name_2,
fields={
"field_1": 2,
"field_12": "New Field"
},
where={
"field_1": 1
}
)
assert(
result_2 is None and
result_3 is None and
result_4 is None and
result_1[0]["field_12"] == "New Field" and
result_1[0]["field_1"] == "2" and
DB._db[table_name][0]["field_12"] == "New Field" and
DB._db[table_name][0]["field_1"] == "2" and
DB._db[table_name_2][0]["field_1"] == 1
)
| 5,347,713 |
def eval_bayesian_optimization(net: torch.nn.Module, input_picture: DATA,\
label_picture: DATA, ) -> float:
""" Compute classification accuracy on provided dataset to find the optimzed hyperparamter
settings.
Args:
net: trained neural network
Input: The image
Label: Th label to the respective image
Returns:
float: classification accuracy """
# Define the data
x_valid = input_picture
y_valid = label_picture
# Pre-locating memory
correct = 0
# Get the number of samples and batches before testing the network
num_samples = x_valid.shape[0]
num_batches = int(np.ceil(num_samples / float(BATCH_SIZE)))
net.eval()
with torch.no_grad():
for i in range(num_batches):
idx = range(i*BATCH_SIZE, np.minimum((i+1) * BATCH_SIZE, num_samples))
x_batch_val = get_variable(Variable(torch.from_numpy(x_valid[idx])))
y_batch_val = get_variable(Variable(torch.from_numpy(y_valid[idx]).long()))
output, _ = net(x_batch_val)
_, predicted = torch.max(output.data, 1)
correct += (predicted == y_batch_val).float().mean()
# Calculating the accuracy
return float(correct/num_batches)
| 5,347,714 |
def parse(url):
"""
URL-parsing function that checks that
- port is an integer 0-65535
- host is a valid IDNA-encoded hostname with no null-bytes
- path is valid ASCII
Args:
A URL (as bytes or as unicode)
Returns:
A (scheme, host, port, path) tuple
Raises:
ValueError, if the URL is not properly formatted.
"""
parsed = urllib.parse.urlparse(url)
if not parsed.hostname:
raise ValueError("No hostname given")
if isinstance(url, bytes):
host = parsed.hostname
# this should not raise a ValueError,
# but we try to be very forgiving here and accept just everything.
# decode_parse_result(parsed, "ascii")
else:
host = parsed.hostname.encode("idna")
parsed = encode_parse_result(parsed, "ascii")
port = parsed.port
if not port:
port = 443 if parsed.scheme == b"https" else 80
full_path = urllib.parse.urlunparse(
(b"", b"", parsed.path, parsed.params, parsed.query, parsed.fragment)
)
if not full_path.startswith(b"/"):
full_path = b"/" + full_path
if not check.is_valid_host(host):
raise ValueError("Invalid Host")
if not check.is_valid_port(port):
raise ValueError("Invalid Port")
return parsed.scheme, host, port, full_path
| 5,347,715 |
def show_label_images(input_yaml, wait_ms=10):
"""
Shows and draws pictures with labeled traffic lights.
Can save pictures.
:param input_yaml: Path to yaml file
:param wait_ms: wait time in milliseconds before OpenCV shows next image
"""
label_list = {'off':0, 'green':1, 'yellow':2, 'red':3}
# load the model
tlc = TLClassifierCNN()
model_dir = 'model'
tlc.load_model(model_dir)
# Shows and draws pictures with labeled traffic lights
images = get_all_labels(input_yaml)
for i, image_dict in enumerate(images):
image = cv2.imread(image_dict['path'])
if image is None:
raise IOError('Could not open image path', image_dict['path'])
break
for box in image_dict['boxes']:
xmin = ir(box['x_min'])
ymin = ir(box['y_min'])
xmax = ir(box['x_max'])
ymax = ir(box['y_max'])
if xmax-xmin<=0 or ymax-ymin<=0:
continue
label = box['label']
label = label.lower()
roi = image[ymin:(ymax+1), xmin:(xmax+1)]
resized_roi = cv2.resize(roi, (32,32), interpolation=cv2.INTER_LINEAR)
prd_labels, prd_probabilities = tlc.predict(np.array([resized_roi]), batch_size=1)
prd_prob = prd_probabilities[0][label_list[prd_labels[0]]] * 100
if label == prd_labels[0]:
cv2.rectangle(image, (xmin, ymin), (xmax, ymax), (0, 255, 0))
label_str = '%s(%.2f)' % (prd_labels[0], prd_prob)
image = cv2.putText(image, label_str, (xmin, ymax+20), 0, 0.4, (0,255,0), 1, cv2.LINE_AA) # text green
else:
cv2.rectangle(image, (xmin, ymin), (xmax, ymax), (0, 0, 255)) # color red
label_str = '%s: %s(%.2f)' % (label, prd_labels[0], prd_prob)
image = cv2.putText(image, label_str, (xmin, ymax+20), 0, 0.4, (0,0,255), 1, cv2.LINE_AA)
cv2.imshow('labeled_image', image)
#cv2.waitKey(10)
if cv2.waitKey(wait_ms) == 27:
cv2.destroyAllWindows()
break
| 5,347,716 |
def is_amicable(num: int) -> bool:
""" Returns whether the number is part of an amicable number pair """
friend = sum(divisors(num)) - num
# Only those in pairs are amicable numbers. If the sum is the number itself, it's a perfect number
return friend != num and sum(divisors(friend)) - friend == num
| 5,347,717 |
def apply_connector_types(inferred: TypeInferenceDict):
""" Applies the inferred connector types on the SDFG. """
for (node, conn, is_in), dtype in inferred.items():
if dtype.type is None:
continue
if is_in:
node.in_connectors[conn] = dtype
else:
node.out_connectors[conn] = dtype
| 5,347,718 |
def threaded_encode_job(job):
"""
Given a job, run it through its encoding workflow in a non-blocking manner.
"""
# Update the timestamp for when the node last did something so it
# won't terminate itself.
NodeStateManager.i_did_something()
job.nommer.onomnom()
| 5,347,719 |
def no_data_info():
"""Returns information about not having enough information yet to display"""
return html.Div(children=[dcc.Markdown('''
# Please wait a little bit...
The MongoDB database was probably just initialized and is currently empty. You will need to wait a bit (~30 min) for it to populate with initial data before using the application.
''', className='eleven columns', style={'paddingLeft': '5%'})], className="row")
| 5,347,720 |
def delta_t(soil_type):
""" Displacement at Tu
"""
delta_ts = {
"dense sand": 0.003,
"loose sand": 0.005,
"stiff clay": 0.008,
"soft clay": 0.01,
}
return delta_ts.get(soil_type, ValueError("Unknown soil type."))
| 5,347,721 |
def extractPlate(imgOriginal, listOfMatchingChars, PlateWidthPaddingFactor, PlateHeightPaddingFactor):
""" Extract license-plate in the provided image, based on given contours group that corresponds for matching characters """
# Sort characters from left to right based on x position:
listOfMatchingChars.sort(key=lambda matchingChar_: matchingChar_.intCenterX)
# Calculate the plate centroid (average of leftmost and righhtmost characters):
fltPlateCenterX = (listOfMatchingChars[0].intCenterX + listOfMatchingChars[len(listOfMatchingChars) - 1].intCenterX) / 2.0
fltPlateCenterY = (listOfMatchingChars[0].intCenterY + listOfMatchingChars[len(listOfMatchingChars) - 1].intCenterY) / 2.0
ptPlateCenter = fltPlateCenterX, fltPlateCenterY
# Calculate plate width (rightmost - leftmost characters):
intPlateWidth = int(PlateWidthPaddingFactor * (listOfMatchingChars[len(listOfMatchingChars) - 1].intBoundingRectX +
listOfMatchingChars[len(listOfMatchingChars) - 1].intBoundingRectWidth -
listOfMatchingChars[0].intBoundingRectX))
# Calculate plate height (average over all characters):
intTotalOfCharHeights = 0
for matchingChar in listOfMatchingChars:
intTotalOfCharHeights = intTotalOfCharHeights + matchingChar.intBoundingRectHeight
fltAverageCharHeight = intTotalOfCharHeights / len(listOfMatchingChars)
intPlateHeight = int(fltAverageCharHeight * PlateHeightPaddingFactor)
# Calculate correction angle of plate region (simple geometry calculation):
fltOpposite = listOfMatchingChars[len(listOfMatchingChars) - 1].intCenterY - listOfMatchingChars[0].intCenterY
fltHypotenuse = (listOfMatchingChars[0] - listOfMatchingChars[len(listOfMatchingChars) - 1])
fltCorrectionAngleInRad = asin(fltOpposite / fltHypotenuse)
fltCorrectionAngleInDeg = fltCorrectionAngleInRad * (180.0 / pi)
# Rotate the entire image (affine warp), for compensating the angle of the plate region:
rotationMatrix = getRotationMatrix2D(tuple(ptPlateCenter), fltCorrectionAngleInDeg, 1.0)
height, width, _ = imgOriginal.shape
imgRotated = warpAffine(imgOriginal, rotationMatrix, (width, height))
# Crop the plate from the image:
imgCropped = getRectSubPix(imgRotated, (intPlateWidth, intPlateHeight), tuple(ptPlateCenter))
# Create and return possiblePlate object, which packs most the above information:
possiblePlate = PossiblePlate()
possiblePlate.rrLocationOfPlateInScene = (tuple(ptPlateCenter), (intPlateWidth, intPlateHeight), fltCorrectionAngleInDeg)
possiblePlate.imgPlate = imgCropped
return possiblePlate
| 5,347,722 |
def create_stratified_name(stem, stratification_name, stratum_name):
"""
generate a standardised stratified compartment name
:param stem: str
the previous stem to the compartment or parameter name that needs to be extended
:param stratification_name: str
the "stratification" or rationale for implementing the current stratification process
:param stratum_name: str
name of the stratum within the stratification
:return: str
the composite name with the standardised stratification name added on to the old stem
"""
return stem + create_stratum_name(stratification_name, stratum_name)
| 5,347,723 |
def eda_stream_test(freq_hz=4):
"""
Capture EDA data for 30s, check the data for frequency(default 4Hz) mismatch
:param freq_hz: Frequency in which the Sampling should happen(HZ)
:return:
"""
capture_time = 30
common.dcb_cfg('d', 'eda')
common.quick_start_eda(freq_hz)
time.sleep(capture_time)
common.watch_shell.quick_stop('eda', 'eda')
common.dcb_cfg('d', 'eda')
f_path = common.rename_stream_file(common.eda_stream_file_name, '_eda_stream{}hz_test.csv'.format(freq_hz))
err_status, err_str, results_dict = qa_utils.check_stream_data(f_path, 'eda', 1, freq_hz)
common.test_logger.info('EDA {}Hz Stream Test Results: {}'.format(freq_hz, results_dict))
if err_status:
common.test_logger.error('*** EDA {}Hz Stream Test - FAIL ***'.format(freq_hz))
raise ConditionCheckFailure("\n\n" + '{}'.format(err_str))
| 5,347,724 |
def bind_args_kwargs(sig: inspect.Signature, *args: typing.Any, **kwargs: typing.Any) -> typing.List[BoundParameter]:
"""Bind *args and **kwargs to signature and get Bound Parameters.
:param sig: source signature
:type sig: inspect.Signature
:param args: not keyworded arguments
:type args: typing.Any
:param kwargs: keyworded arguments
:type kwargs: typing.Any
:return: Iterator for bound parameters with all information about it
:rtype: typing.List[BoundParameter]
.. versionadded:: 3.3.0
.. versionchanged:: 5.3.1 return list
"""
result: typing.List[BoundParameter] = []
bound: typing.MutableMapping[str, inspect.Parameter] = sig.bind(*args, **kwargs).arguments
for param in sig.parameters.values():
result.append(BoundParameter(parameter=param, value=bound.get(param.name, param.default)))
return result
| 5,347,725 |
def user(user_type):
"""
:return: instance of a User
"""
return user_type()
| 5,347,726 |
def true_or_false(item):
"""This function is used to assist in getting appropriate
values set with the PythonOption directive
"""
try:
item = item.lower()
except:
pass
if item in ['yes','true', '1', 1, True]:
return True
elif item in ['no', 'false', '0', 0, None, False]:
return False
else:
raise Exception
| 5,347,727 |
def validate_task(task, variables, config=None):
""" Validate that a simulation can be executed with OpenCOR
Args:
task (:obj:`Task`): request simulation task
variables (:obj:`list` of :obj:`Variable`): variables that should be recorded
config (:obj:`Config`, optional): BioSimulators common configuration
Returns:
:obj:`tuple:`:
* :obj:`Task`: possibly alternate task that OpenCOR should execute
* :obj:`lxml.etree._ElementTree`: element tree for model
* :obj:`dict`: dictionary that maps the id of each SED variable to the name that OpenCOR uses to reference it
"""
config = config or get_config()
model = task.model
sim = task.simulation
if config.VALIDATE_SEDML:
raise_errors_warnings(validation.validate_task(task),
error_summary='Task `{}` is invalid.'.format(task.id))
raise_errors_warnings(validation.validate_model_language(model.language, ModelLanguage.CellML),
error_summary='Language for model `{}` is not supported.'.format(model.id))
raise_errors_warnings(validation.validate_model_change_types(model.changes, (ModelAttributeChange,)),
error_summary='Changes for model `{}` are not supported.'.format(model.id))
raise_errors_warnings(*validation.validate_model_changes(model),
error_summary='Changes for model `{}` are invalid.'.format(model.id))
raise_errors_warnings(validation.validate_simulation_type(sim, (UniformTimeCourseSimulation, )),
error_summary='{} `{}` is not supported.'.format(sim.__class__.__name__, sim.id))
raise_errors_warnings(*validation.validate_simulation(sim),
error_summary='Simulation `{}` is invalid.'.format(sim.id))
raise_errors_warnings(*validation.validate_data_generator_variables(variables),
error_summary='Data generator variables for task `{}` are invalid.'.format(task.id))
# read model; TODO: support imports
model_etree = lxml.etree.parse(model.source)
# validate variables
opencor_variable_names = validate_variable_xpaths(variables, model_etree)
# validate simulation
opencor_simulation = validate_simulation(task.simulation)
# check that OpenCOR can execute the request algorithm (or a similar one)
opencor_algorithm = get_opencor_algorithm(task.simulation.algorithm, config=config)
# create new task to manage configuration for OpenCOR
opencor_task = copy.deepcopy(task)
opencor_task.simulation = opencor_simulation
opencor_task.simulation.algorithm = opencor_algorithm
return opencor_task, model_etree, opencor_variable_names
| 5,347,728 |
def time_ms():
"""currently pypy only has Python 3.5.3, so we are missing Python 3.7's time.time_ns() with better precision
see https://www.python.org/dev/peps/pep-0564/
the function here is a convenience; you shall use `time.time_ns() // 1e6` if using >=Python 3.7
"""
return int(time.time() * 1e3)
| 5,347,729 |
def _fetch_git_repo(uri, version, dst_dir):
"""
Clone the git repo at ``uri`` into ``dst_dir``, checking out commit ``version`` (or defaulting
to the head commit of the repository's master branch if version is unspecified).
Assumes authentication parameters are specified by the environment, e.g. by a Git credential
helper.
"""
# We defer importing git until the last moment, because the import requires that the git
# executable is availble on the PATH, so we only want to fail if we actually need it.
import git
repo = git.Repo.init(dst_dir)
origin = repo.create_remote("origin", uri)
origin.fetch(depth=GIT_FETCH_DEPTH)
if version is not None:
try:
repo.git.checkout(version)
except git.exc.GitCommandError as e:
raise ExecutionException(
"Unable to checkout version '%s' of git repo %s"
"- please ensure that the version exists in the repo. "
"Error: %s" % (version, uri, e)
)
else:
repo.create_head("master", origin.refs.master)
repo.heads.master.checkout()
repo.submodule_update(init=True, recursive=True)
| 5,347,730 |
def _calculate_mk(tp, fp, tn, fn):
"""Calculate mk."""
ppv = np.where((tp + fp) > 0, tp / (tp + fp), np.array(float("nan")))
npv = np.where((tn + fn) > 0, tn / (tn + fn), np.array(float("nan")))
npv = tn / (tn + fn)
numerator = ppv + npv - 1.0
denominator = 1.0
return numerator, denominator
| 5,347,731 |
def geometric_progression_for_stepsize(
x, update, dist, decision_function, current_iteration
):
"""Geometric progression to search for stepsize.
Keep decreasing stepsize by half until reaching
the desired side of the boundary.
"""
epsilon = dist / np.sqrt(current_iteration)
while True:
updated = x + epsilon * update
success = decision_function(updated)[0]
if success:
break
else:
epsilon = epsilon / 2.0
return epsilon
| 5,347,732 |
def absorption_two_linear_known(freq_list, interaction_strength, decay_rate):
""" The absorption is half the imaginary part of the susecptibility. """
return susceptibility_two_linear_known(freq_list, interaction_strength,
decay_rate).imag/2.0
| 5,347,733 |
def mad(x, mask, base_size=(11, 3), mad_size=(21, 21), debug=False, sigma=True):
"""Calculate the MAD of freq-time data.
Parameters
----------
x : np.ndarray
Data to filter.
mask : np.ndarray
Initial mask.
base_size : tuple
Size of the window to use in (freq, time) when
estimating the baseline.
mad_size : tuple
Size of the window to use in (freq, time) when
estimating the MAD.
sigma : bool, optional
Rescale the output into units of Gaussian sigmas.
Returns
-------
mad : np.ndarray
Size of deviation at each point in MAD units.
"""
xs = medfilt(x, mask, size=base_size)
dev = np.abs(x - xs)
mad = medfilt(dev, mask, size=mad_size)
if sigma:
mad *= 1.4826 # apply the conversion from MAD->sigma
if debug:
return dev / mad, dev, mad
return dev / mad
| 5,347,734 |
def post_to_conf(post_grid, cell_size):
"""
Converts a N-dimensional grid of posterior values into a grid of confidence levels. The posterior values do not need
to be normalised, i.e. their distribution need not integrate to 1. Works with likelihood values (not log-likelihood)
instead of posteriors, assuming a flat prior.
Args:
post_grid (ND numpy array): Grid of posterior values.
cell_size (float): The size of a grid cell, e.g. for 2 dimensions x and y this would be dx*dy.
Returns:
ND numpy array: Grid of confidence levels, where the value at each point is the minimum confidence region that \
includes that point. The least likely point would have a value of 1, indicating that it is \
only included in the 100% confidence region and excluded from anything smaller.
"""
# Create flattened list of posteriors and sort in descending order
posteriors = post_grid.flatten()
posteriors[::-1].sort()
# Dictionary to contain mapping between posterior and confidence level
confidence_level_unnormalised = {}
# Calculate the cumulative integral of posterior values
integral = 0
for posterior in posteriors:
integral += posterior * cell_size
confidence_level_unnormalised[posterior] = integral
# Map each posterior in the grid to its confidence value
confidence_grid_unnormalised = np.vectorize(confidence_level_unnormalised.get)(post_grid)
# Normalise the confidence values using the final (complete) integral
confidence_grid_normalised = np.divide(confidence_grid_unnormalised, integral)
return confidence_grid_normalised
| 5,347,735 |
def test_remaining_segments_with_new_segment_returns_change():
"""
Verifies that moving to a new segment decrements the number of remaining segments.
"""
grid = Grid(((3, -1, -1), (-1, 2, -1), (0, -1, -1)))
# Test first point on second segment
draw_path(grid, ((0, 0), (0, 1), (0, 2), (1, 2)))
assert grid[1][2].remaining_segments == 2
# Test second point on second segment has no change
grid[1][2].child = grid[2][2]
assert grid[2][2].remaining_segments == 2
# Test first point on third segment
grid[2][2].child = grid[2][1]
assert grid[2][1].remaining_segments == 1
| 5,347,736 |
def get_fuzzer_display(testcase):
"""Return FuzzerDisplay tuple."""
if (testcase.overridden_fuzzer_name == testcase.fuzzer_name or
not testcase.overridden_fuzzer_name):
return FuzzerDisplay(
engine=None,
target=None,
name=testcase.fuzzer_name,
fully_qualified_name=testcase.fuzzer_name)
fuzz_target = get_fuzz_target(testcase.overridden_fuzzer_name)
if not fuzz_target:
# Legacy testcases.
return FuzzerDisplay(
engine=testcase.fuzzer_name,
target=testcase.get_metadata('fuzzer_binary_name'),
name=testcase.fuzzer_name,
fully_qualified_name=testcase.overridden_fuzzer_name)
return FuzzerDisplay(
engine=fuzz_target.engine,
target=fuzz_target.binary,
name=fuzz_target.engine,
fully_qualified_name=fuzz_target.fully_qualified_name())
| 5,347,737 |
def process_articles_results(articles_list):
"""
Function that processes the articles result and transform them to a list of Objects
Args:
articles_list: A list of dictionaries that contain sources details
Returns :
articles_results: A list of source objects
"""
articles_results = []
for article_item in articles_list:
id = article_item.get('id')
author = article_item.get('author')
title = article_item.get('title')
description = article_item.get('description')
url = article_item.get('url')
urlToImage = article_item.get('urlToImage')
publishedAt = article_item.get('publishedAt')
content = article_item.get('content')
if urlToImage:
article_object = Articles(id, author, title, description, url, urlToImage, publishedAt, content)
articles_results.append(article_object)
return articles_results
| 5,347,738 |
def list_to_bytes_list(strList):
"""
This function turns an array of strings into a pointer array
with pointers pointing to the encodings of those strings
Possibly contained bytes are kept as they are.
:param strList: List of strings that shall be converted
:type strList: List of strings
:returns: Pointer array with pointers pointing to bytes
:raises: TypeError if strList is not list, set or tuple
"""
pList = c_char_p * len(strList)
# if strList is already a pointerarray or None, there is nothing to do
if isinstance(strList, (pList, type(None))):
return strList
if not isinstance(strList, (list, set, tuple)):
raise TypeError("strList must be list, set or tuple, not " +
str(type(strList)))
pList = pList()
for i, elem in enumerate(strList):
pList[i] = str_to_bytes(elem)
return pList
| 5,347,739 |
def handle_td(element, box, _get_image_from_uri):
"""Handle the ``colspan``, ``rowspan`` attributes."""
if isinstance(box, boxes.TableCellBox):
# HTML 4.01 gives special meaning to colspan=0
# http://www.w3.org/TR/html401/struct/tables.html#adef-rowspan
# but HTML 5 removed it
# http://www.w3.org/TR/html5/tabular-data.html#attr-tdth-colspan
# rowspan=0 is still there though.
integer_attribute(element, box, 'colspan')
integer_attribute(element, box, 'rowspan', minimum=0)
return [box]
| 5,347,740 |
def login_flags(db, host, port, user, db_prefix=True):
"""
returns a list of connection argument strings each prefixed
with a space and quoted where necessary to later be combined
in a single shell string with `"".join(rv)`
db_prefix determines if "--dbname" is prefixed to the db argument,
since the argument was introduced in 9.3.
"""
flags = []
if db:
if db_prefix:
flags.append(' --dbname={0}'.format(pipes.quote(db)))
else:
flags.append(' {0}'.format(pipes.quote(db)))
if host:
flags.append(' --host={0}'.format(host))
if port:
flags.append(' --port={0}'.format(port))
if user:
flags.append(' --username={0}'.format(user))
return flags
| 5,347,741 |
def glplot(ncfile, times, colora, label):
"""
add a plot of grounding line points to current axes.
makes use of the numpy.ma.MaskedArray when reading xGL,yGL
"""
try:
ncid = Dataset(ncfile, 'r')
except:
print("Failed to open file: {}. Skipping.".format(ncfile))
return 350.0, 500.0
time = ncid.variables["time"][:]
lxmax = 0.0
lxmin = 800.0
for i in range(0, len(times)):
seq = (time == times[i])
xGL = ncid.variables["xGL"][:, seq]*1e-3
lxmax = max(np.max(xGL), lxmax)
lxmin = min(np.min(xGL), lxmin)
yGL = ncid.variables["yGL"][:, seq]*1e-3
plt.plot(xGL, yGL, 's', ms=3, mfc=colora[i],
mec=colora[i], label=label + ', t = ' + format(times[i]))
return lxmin, lxmax
| 5,347,742 |
def roi_intersect(a, b):
"""
Compute intersection of two ROIs.
.. rubric:: Examples
.. code-block::
s_[1:30], s_[20:40] => s_[20:30]
s_[1:10], s_[20:40] => s_[10:10]
# works for N dimensions
s_[1:10, 11:21], s_[8:12, 10:30] => s_[8:10, 11:21]
"""
def slice_intersect(a, b):
if a.stop < b.start:
return slice(a.stop, a.stop)
if a.start > b.stop:
return slice(a.start, a.start)
_in = max(a.start, b.start)
_out = min(a.stop, b.stop)
return slice(_in, _out)
if isinstance(a, slice):
if not isinstance(b, slice):
b = b[0]
return slice_intersect(a, b)
b = (b,) if isinstance(b, slice) else b
return tuple(slice_intersect(sa, sb) for sa, sb in zip(a, b))
| 5,347,743 |
def _create_file(file_name, size):
"""Create a file with the file size is size"""
file = open(file_name, "w")
file.seek(size)
file.write('\x00')
file.close()
| 5,347,744 |
def _flows_finished(pgen_grammar, stack):
"""
if, while, for and try might not be finished, because another part might
still be parsed.
"""
for stack_node in stack:
if stack_node.nonterminal in ('if_stmt', 'while_stmt', 'for_stmt', 'try_stmt'):
return False
return True
| 5,347,745 |
def PositionToPercentile(position, field_size):
"""Converts from position in the field to percentile.
position: int
field_size: int
"""
beat = field_size - position + 1
percentile = 100.0 * beat / field_size
return percentile
| 5,347,746 |
def grid(num, ndim, large=False):
"""Build a uniform grid with num points along each of ndim axes."""
if not large:
_check_not_too_large(np.power(num, ndim) * ndim)
x = np.linspace(0, 1, num, dtype='float64')
w = 1 / (num - 1)
points = np.stack(
np.meshgrid(*[x for _ in range(ndim)], indexing='ij'), axis=-1)
return points, w
| 5,347,747 |
def rolling_outlier_quantile(x, width, q, m):
"""Detect outliers by multiples of a quantile in a window.
Outliers are the array elements outside `m` times the `q`'th
quantile of deviations from the smoothed trend line, as calculated from
the trend line residuals. (For example, take the magnitude of the 95th
quantile times 5, and mark any elements greater than that value as
outliers.)
This is the smoothing method used in BIC-seq (doi:10.1073/pnas.1110574108)
with the parameters width=200, q=.95, m=5 for WGS.
Returns
-------
np.array
A boolean array of the same size as `x`, where outlier indices are True.
"""
if len(x) <= width:
return np.zeros(len(x), dtype=np.bool_)
dists = np.abs(x - savgol(x, width))
quants = rolling_quantile(dists, width, q)
outliers = (dists > quants * m)
return outliers
| 5,347,748 |
def append_gopath_to_env(envfile: str):
"""
Append the go path to the user's shell profile.
Args:
envfile (str): path to the env file, auto generated
"""
# open the current active shell source file and append the go path
print('Appending go path to $PATH')
with open(envfile, 'a') as f:
f.write('\n' + 'export PATH=$PATH:/usr/local/go/bin' + '\n')
f.close()
# source the updated envfile
subprocess.call(['.', envfile], shell=True)
| 5,347,749 |
def set_parameter(root, name, value):
""" sets parameter to value """
for child in root.iter('Parameter'):
if child.attrib['name'] == name:
child.attrib['value'] = str(value)
| 5,347,750 |
def compute_region_classification_len(dataset_output,
dataset_type: str):
"""
Compute the number of points per class and return a dictionary (dataset_type specifies the keys) with the results
"""
stable_region_indices, marginal_stable_region_indices, marginal_region_indices, marginal_unstable_region_indices, unstable_region_indices = compute_regions_belongings(
value=dataset_output)
region_len_dict = {f"len_{dataset_type}_stable_region": sum(stable_region_indices),
f"len_{dataset_type}_marginal_stable_region": sum(marginal_stable_region_indices),
f"len_{dataset_type}_marginal_region": sum(marginal_region_indices),
f"len_{dataset_type}_marginal_unstable_region": sum(marginal_unstable_region_indices),
f"len_{dataset_type}_unstable_region": sum(unstable_region_indices),
}
return region_len_dict
| 5,347,751 |
def merge_rdn(merged_image_file, mask_file, sensors):
""" Merge radiance images.
Arguments:
merged_image_file: str
Merged radiance image filename.
mask_file: str
Background mask filename.
sensors: dict
Sensor dictionaries.
"""
if os.path.exists(merged_image_file):
logger.info('Write the merged refletance image to %s.' %merged_image_file)
return
from ENVI import empty_envi_header, read_envi_header, write_envi_header
# Read mask.
mask_header = read_envi_header(os.path.splitext(mask_file)[0]+'.hdr')
mask_image = np.memmap(mask_file,
mode='r',
dtype='bool',
shape=(mask_header['lines'],
mask_header['samples']))
# Get the map upper-left coordinates and pixel sizes of VNIR and SWIR images.
ulx, uly, pixel_size = float(mask_header['map info'][3]), float(mask_header['map info'][4]), float(mask_header['map info'][5])
# Determine regular map grids.
x, y = np.meshgrid(ulx+np.arange(mask_header['samples'])*pixel_size,
uly-np.arange(mask_header['lines'])*pixel_size)
del ulx, uly, pixel_size
# Read radiance header and image.
header_dict = dict()
image_file_dict = dict()
bands_waves_fwhms = []
for sensor_index, sensor_dict in sensors.items():
tmp_header = read_envi_header(os.path.splitext(sensor_dict['ortho_rdn_image_file'])[0]+'.hdr')
for band in range(tmp_header['bands']):
bands_waves_fwhms.append(['%s_%d' %(sensor_index,band), tmp_header['wavelength'][band], tmp_header['fwhm'][band]])
header_dict[sensor_index] = tmp_header
image_file_dict[sensor_index] = sensor_dict['ortho_rdn_image_file']
bands_waves_fwhms.sort(key = lambda x: x[1])
# Merge images.
wavelengths = []
fwhms = []
fid = open(merged_image_file, 'wb')
for v in bands_waves_fwhms:
# Determine which sensor, band to read.
sensor_index, band = v[0].split('_')
band = int(band)
wavelengths.append(v[1])
fwhms.append(v[2])
header = header_dict[sensor_index]
image_file = image_file_dict[sensor_index]
# Write image.
if ((v[1]>=1339.0)&(v[1]<=1438.0))|((v[1]>=1808.0)&(v[1]<=1978.0))|(v[1]>=2467.0):
resampled_image = np.zeros(x.shape)
else:
offset = header['header offset']+4*band*header['lines']*header['samples']# in bytes
rdn_image = np.memmap(image_file,
dtype='float32',
mode='r',
offset=offset,
shape=(header['lines'], header['samples']))
resampled_image = resample_ortho_rdn(np.copy(rdn_image),
float(header_dict[sensor_index]['map info'][3]),
float(header_dict[sensor_index]['map info'][4]),
float(header_dict[sensor_index]['map info'][5]),
x, y)
resampled_image[mask_image] = 0.0
rdn_image.flush()
del rdn_image
fid.write(resampled_image.astype('float32').tostring())
del resampled_image
fid.close()
del header_dict, image_file_dict, x, y
mask_image.flush()
del mask_image
# Write header.
header = empty_envi_header()
header['description'] = 'Merged radiance, in [mW/(cm2*um*sr)]'
header['file type'] = 'ENVI Standard'
header['samples'] = mask_header['samples']
header['lines'] = mask_header['lines']
header['bands'] = len(wavelengths)
header['byte order'] = 0
header['header offset'] = 0
header['interleave'] = 'bsq'
header['data type'] = 4
header['wavelength'] = wavelengths
header['fwhm'] = fwhms
header['wavelength units'] = 'nm'
header['acquisition time'] = tmp_header['acquisition time']
header['map info'] = mask_header['map info']
header['coordinate system string'] = mask_header['coordinate system string']
write_envi_header(os.path.splitext(merged_image_file)[0]+'.hdr', header)
del header, tmp_header
logger.info('Write the merged refletance image to %s.' %merged_image_file)
| 5,347,752 |
def set_world_properties(world_uid, world_name=None, owner=None, config=None):
""" Set the properties of the given world """
return runtime.set_world_properties(world_uid, world_name, owner, config)
| 5,347,753 |
def sort(values, key=None):
"""Perform an in-place sort on 'values', which must be a mutable sequence."""
if len(values) <= 1:
return
if key is None:
_quicksortBetween(values, 0, len(values) - 1, 1, lambda x, y: x < y)
else:
_quicksortBetween(values, 0, len(values) - 1, 1, lambda x, y: key(x) < key(y))
| 5,347,754 |
def _resize_and_pad(img, desired_size):
"""
Resize an image to the desired width and height
:param img:
:param desired_size:
:return:
"""
old_size = img.shape[:2] # old_size is in (height, width) format
ratio = float(desired_size) / max(old_size)
new_size = tuple([int(x * ratio) for x in old_size])
if new_size[0] == 0:
new_size = (new_size[0] + 1, new_size[1])
if new_size[1] == 0:
new_size = (new_size[0], new_size[1] + 1)
# New_size should be in (width, height) format
im = cv2.resize(img, (new_size[1], new_size[0]))
delta_w = desired_size - new_size[1]
delta_h = desired_size - new_size[0]
top, bottom = delta_h // 2, delta_h - (delta_h // 2)
left, right = delta_w // 2, delta_w - (delta_w // 2)
color = [0, 0, 0]
img = cv2.copyMakeBorder(im, top, bottom, left, right,
cv2.BORDER_CONSTANT,
value=color)
return img
| 5,347,755 |
def _terminal_size(fallback: Tuple[int, int]) -> Tuple[int, int]:
"""
Try to get the size of the terminal window.
If it fails, the passed fallback will be returned.
"""
for i in (0, 1):
try:
window_width = os.get_terminal_size(i)
return cast(Tuple[int, int], tuple(window_width))
except OSError:
continue
return fallback
| 5,347,756 |
def dropout(x, name=None):
"""
Compute a new tensor with `dropoutRate` perecent set to zero. The values
that are set to zero are randomly chosen. This is commonly used to prevent
overfitting during the training process.
The output tensor has the same shape as `x`, but with `dropoutRate` of the
elements set to zero (droped out).
Args:
x: source tensor
name (str): the name of the node in the network
Returns:
:class:`cntk.graph.ComputationNode`
"""
from cntk.ops.cntk2 import Dropout
op = Dropout(x, name = name)
wrap_numpy_arrays(op)
op.rank = op._.rank
return op
| 5,347,757 |
def mol2graph(crystal_batch: CrystalDataset, args: Namespace) -> BatchMolGraph:
"""
Converts a list of SMILES strings to a BatchMolGraph containing the batch of molecular graphs.
:param crystal_batch: a list of CrystalDataset
:param args: Arguments.
:return: A BatchMolGraph containing the combined molecular graph for the molecules
"""
crystal_graphs = list()
for crystal_point in crystal_batch:
if crystal_point in CRYSTAL_TO_GRAPH.keys():
crystal_graph = CRYSTAL_TO_GRAPH[crystal_point]
else:
crystal_graph = MolGraph(crystal_point, args)
if not args.no_cache and len(CRYSTAL_TO_GRAPH) <= 10000:
CRYSTAL_TO_GRAPH[crystal_point] = crystal_graph
crystal_graphs.append(crystal_graph)
return BatchMolGraph(crystal_graphs, args)
| 5,347,758 |
def _compile(s: str):
"""compiles string into AST.
:param s: string to be compiled into AST.
:type s: str
"""
return compile(
source = s,
filename = '<unknown>',
mode = 'eval',
flags = ast.PyCF_ONLY_AST,
)
| 5,347,759 |
def result_logger(result_dict, epoch_num, result_path='./results', model_name='model', make_dir=True):
"""
saves train results as .csv file
"""
log_path = result_path + '/logs'
file_name = model_name + '_results.csv'
directory_setter(log_path, make_dir)
save_path = os.path.join(log_path, file_name)
header = ','.join(result_dict.keys()) + '\n'
with open(save_path, 'w') as f:
f.write(header)
for i in range(epoch_num):
row = []
for item in result_dict.values():
if type(item) is not list:
row.append('')
elif item[i][1] is not None:
assert item[i][0] == (i+1), 'Not aligned epoch indices'
elem = round(item[i][1], 5)
row.append(str(elem))
else:
row.append('')
# write each row
f.write(','.join(row) + '\n')
sep = len(result_dict.keys()) - 2
f.write(','*sep + '%0.5f, %0.5f'% (result_dict['test_loss'], result_dict['test_acc']))
print('results are logged at: \'%s' % save_path)
| 5,347,760 |
def rename_storms(
top_input_dir_name, first_date_unix_sec, last_date_unix_sec,
first_id_number, max_dropout_time_seconds, top_output_dir_name):
"""Renames storms. This ensures that all storm IDs are unique.
:param top_input_dir_name: Name of top-level directory with input files
(processed probSevere files, readable by `storm_tracking_io.read_file`).
:param first_date_unix_sec: First date in time period. This method will fix
IDs for all dates from `first_date_unix_sec`...`last_date_unix_sec`.
:param last_date_unix_sec: See above.
:param first_id_number: Will start with this ID.
:param max_dropout_time_seconds: Max dropout time. For each storm ID "s"
found in the original data, this method will find all periods where "s"
appears in consecutive time steps with no dropout longer than
`max_dropout_time_seconds`. Each such period will get a new, unique
storm ID.
:param top_output_dir_name: Name of top-level directory for output files
(files with new IDs, to be written by `storm_tracking_io.write_file`).
"""
error_checking.assert_is_integer(first_id_number)
error_checking.assert_is_geq(first_id_number, 0)
error_checking.assert_is_integer(max_dropout_time_seconds)
error_checking.assert_is_greater(max_dropout_time_seconds, 0)
(input_file_names_by_date, output_file_names_by_date,
valid_times_by_date_unix_sec
) = _find_io_files_for_renaming(
top_input_dir_name=top_input_dir_name,
first_date_unix_sec=first_date_unix_sec,
last_date_unix_sec=last_date_unix_sec,
top_output_dir_name=top_output_dir_name)
num_dates = len(input_file_names_by_date)
storm_object_table_by_date = [None] * num_dates
next_id_number = first_id_number + 0
for i in range(num_dates):
date_needed_indices = _get_dates_needed_for_renaming_storms(
working_date_index=i, num_dates_in_period=num_dates)
storm_object_table_by_date = _shuffle_io_for_renaming(
input_file_names_by_date=input_file_names_by_date,
output_file_names_by_date=output_file_names_by_date,
valid_times_by_date_unix_sec=valid_times_by_date_unix_sec,
storm_object_table_by_date=storm_object_table_by_date,
working_date_index=i)
concat_storm_object_table = pandas.concat(
[storm_object_table_by_date[j] for j in date_needed_indices],
axis=0, ignore_index=True
)
concat_storm_object_table, next_id_number = _rename_storms_one_table(
storm_object_table=concat_storm_object_table,
next_id_number=next_id_number,
max_dropout_time_seconds=max_dropout_time_seconds,
working_date_index=i)
for j in date_needed_indices:
storm_object_table_by_date[j] = concat_storm_object_table.loc[
concat_storm_object_table[DATE_INDEX_KEY] == j
]
_shuffle_io_for_renaming(
input_file_names_by_date=input_file_names_by_date,
output_file_names_by_date=output_file_names_by_date,
valid_times_by_date_unix_sec=valid_times_by_date_unix_sec,
storm_object_table_by_date=storm_object_table_by_date,
working_date_index=None)
| 5,347,761 |
def test_mark_dirty_fat32():
"""Test that _mark_dirty is able to mark partition as dirty."""
pf = PyFat()
pf.fat_type = pf.FAT_TYPE_FAT32
fat_orig = [0xFFFFFF8, 0xFFFFFFF]
pf.fat = list(fat_orig)
pf.fat_header = {"BS_Reserved1": 0x0}
with mock.patch('pyfatfs.PyFat.PyFat.flush_fat') as ff:
with mock.patch('pyfatfs.PyFat.PyFat._write_fat_header'):
pf._mark_dirty()
assert pf.fat[1] == 0x7FFFFFF
assert pf.fat_header["BS_Reserved1"] == 0x1
assert ff.call_count == 1
| 5,347,762 |
def temp_get_users_with_permission_form(self):
"""Used to test that swapping the Form method works"""
# Search string: ABC
return ()
| 5,347,763 |
def create_resource_types(raml_data, root):
"""
Parse resourceTypes into ``ResourceTypeNode`` objects.
:param dict raml_data: Raw RAML data
:param RootNode root: Root Node
:returns: list of :py:class:`.raml.ResourceTypeNode` objects
"""
# TODO: move this outside somewhere - config?
accepted_methods = root.config.get("http_optional")
#####
# Helper functions
#####
def get_union(resource, method, inherited):
union = {}
for key, value in list(iteritems(inherited)):
if resource.get(method) is not None:
if key not in list(iterkeys(resource.get(method, {}))):
union[key] = value
else:
resource_values = resource.get(method, {}).get(key)
inherited_values = inherited.get(key, {})
union[key] = dict(list(iteritems(resource_values)) +
list(iteritems(inherited_values)))
if resource.get(method) is not None:
for key, value in list(iteritems(resource.get(method, {}))):
if key not in list(iterkeys(inherited)):
union[key] = value
return union
def get_inherited_resource(res_name):
for resource in resource_types:
if res_name == list(iterkeys(resource))[0]:
return resource
def get_inherited_type(root, resource, type, raml):
inherited = get_inherited_resource(type)
res_type_objs = []
for key, value in list(iteritems(resource)):
for i in list(iterkeys(value)):
if i in accepted_methods:
data_union = get_union(
value, i, list(itervalues(inherited))[0].get(i, {})
)
# res = wrap(key, data_union, i)
res = ResourceTypeNode(
name=key,
raw=data_union,
root=root,
headers=headers(data_union.get("headers", {})),
body=body(data_union.get("body", {})),
responses=responses(data_union),
uri_params=uri_params(data_union),
base_uri_params=base_uri_params(data_union),
query_params=query_params(data_union),
form_params=form_params(data_union),
media_type=media_type(),
desc=description(),
type=type_(),
method=method(i),
usage=usage(),
optional=optional(),
is_=is_(data_union),
traits=traits(data_union),
secured_by=secured_by(data_union),
security_schemes=security_schemes(data_union),
display_name=display_name(data_union, key),
protocols=protocols(data_union)
)
res_type_objs.append(res)
return res_type_objs
def get_scheme(item):
schemes = raml_data.get("securitySchemes", [])
for s in schemes:
if item == list(iterkeys(s))[0]:
return s
def get_inherited_type_params(data, attribute, params):
inherited = get_inherited_resource(data.get("type"))
inherited = inherited.get(data.get("type"))
inherited_params = inherited.get(attribute, {})
return dict(list(iteritems(params)) +
list(iteritems(inherited_params)))
def get_attribute(res_data, method_data, item, default={}):
method_level = _get(method_data, item, default)
resource_level = _get(res_data, item, default)
return method_level, resource_level
def get_inherited_item(items, item_name):
inherited = get_inherited_resource(v.get("type"))
resource = inherited.get(v.get("type"))
res_level = resource.get(meth, {}).get(item_name, {})
method = resource.get(meth, {})
method_level = method.get(item_name, {})
items = dict(
list(iteritems(items)) +
list(iteritems(res_level)) +
list(iteritems(method_level))
)
return items
def get_attribute_dict(data, item):
resource_level = _get(v, item, {})
method_level = _get(data, item, {})
return dict(list(iteritems(resource_level)) +
list(iteritems(method_level)))
#####
# Set ResourceTypeNode attributes
#####
def display_name(data, name):
return data.get("displayName", name)
def headers(data):
_headers = _get(data, "headers", {})
if _get(v, "type"):
_headers = get_inherited_item(_headers, "headers")
header_objs = _create_base_param_obj(_headers, Header, root.config)
if header_objs:
for h in header_objs:
h.method = method(meth)
return header_objs
def body(data):
_body = _get(data, "body", default={})
if _get(v, "type"):
_body = get_inherited_item(_body, "body")
body_objects = []
for key, value in list(iteritems(_body)):
body = Body(
mime_type=key,
raw=value,
schema=load_schema(value.get("schema")),
example=load_schema(value.get("example")),
form_params=value.get("formParameters"),
config=root.config
)
body_objects.append(body)
return body_objects or None
def responses(data):
response_objects = []
_responses = _get(data, "responses", {})
if _get(v, "type"):
_responses = get_inherited_item(_responses, "responses")
for key, value in list(iteritems(_responses)):
_headers = data.get("responses", {}).get(key, {})
_headers = _get(_headers, "headers", {})
header_objs = _create_base_param_obj(_headers, Header, root.config)
if header_objs:
for h in header_objs:
h.method = method(meth)
response = Response(
code=key,
raw={key: value},
desc=_get(value, "description"),
headers=header_objs,
body=body(value),
config=root.config,
method=method(meth)
)
response_objects.append(response)
if response_objects:
return sorted(response_objects, key=lambda x: x.code)
return None
def uri_params(data):
uri_params = get_attribute_dict(data, "uriParameters")
if _get(v, "type"):
uri_params = get_inherited_type_params(v, "uriParameters",
uri_params)
return _create_base_param_obj(uri_params, URIParameter, root.config)
def base_uri_params(data):
uri_params = get_attribute_dict(data, "baseUriParameters")
return _create_base_param_obj(uri_params, URIParameter, root.config)
def query_params(data):
query_params = get_attribute_dict(data, "queryParameters")
if _get(v, "type"):
query_params = get_inherited_type_params(v, "queryParameters",
query_params)
return _create_base_param_obj(query_params, QueryParameter,
root.config)
def form_params(data):
form_params = get_attribute_dict(data, "formParameters")
if _get(v, "type"):
form_params = get_inherited_type_params(v, "formParameters",
form_params)
return _create_base_param_obj(form_params, FormParameter, root.config)
def media_type():
return _get(v, "mediaType")
def description():
return _get(v, "description")
def type_():
return _get(v, "type")
def method(meth):
if not meth:
return None
if "?" in meth:
return meth[:-1]
return meth
def usage():
return _get(v, "usage")
def optional():
if meth:
return "?" in meth
def protocols(data):
m, r = get_attribute(v, data, "protocols", None)
if m:
return m
return r
def is_(data):
m, r = get_attribute(v, data, "is", default=[])
return m + r or None
def get_trait(item):
traits = raml_data.get("traits", [])
for t in traits:
if item == list(iterkeys(t))[0]:
return t
# TODO: clean up
def traits(data):
assigned = is_(data)
if assigned:
trait_objs = []
for item in assigned:
assigned_trait = get_trait(item)
raw_data = list(itervalues(assigned_trait))[0]
trait = TraitNode(
name=list(iterkeys(assigned_trait))[0],
raw=raw_data,
root=root,
headers=headers(raw_data),
body=body(raw_data),
responses=responses(raw_data),
uri_params=uri_params(raw_data),
base_uri_params=base_uri_params(raw_data),
query_params=query_params(raw_data),
form_params=form_params(raw_data),
media_type=media_type(),
desc=description(),
usage=usage(),
protocols=protocols(raw_data)
)
trait_objs.append(trait)
return trait_objs
return None
def secured_by(data):
m, r = get_attribute(v, data, "securedBy", [])
return m + r or None
def security_schemes(data):
secured = secured_by(data)
if secured:
secured_objs = []
for item in secured:
assigned_scheme = get_scheme(item)
raw_data = list(itervalues(assigned_scheme))[0]
scheme = SecurityScheme(
name=list(iterkeys(assigned_scheme))[0],
raw=raw_data,
type=raw_data.get("type"),
described_by=raw_data.get("describedBy"),
desc=raw_data.get("description"),
settings=raw_data.get("settings"),
config=root.config
)
secured_objs.append(scheme)
return secured_objs
return None
def wrap(key, data, meth, v):
return ResourceTypeNode(
name=key,
raw=data,
root=root,
headers=headers(data),
body=body(data),
responses=responses(data),
uri_params=uri_params(data),
base_uri_params=base_uri_params(data),
query_params=query_params(data),
form_params=form_params(data),
media_type=media_type(),
desc=description(),
type=type_(),
method=method(meth),
usage=usage(),
optional=optional(),
is_=is_(data),
traits=traits(data),
secured_by=secured_by(data),
security_schemes=security_schemes(data),
display_name=display_name(data, key),
protocols=protocols(data)
)
resource_types = raml_data.get("resourceTypes", [])
resource_type_objects = []
for res in resource_types:
for k, v in list(iteritems(res)):
if isinstance(v, dict):
if "type" in list(iterkeys(v)):
r = get_inherited_type(root, res, v.get("type"), raml_data)
resource_type_objects.extend(r)
else:
for meth in list(iterkeys(v)):
if meth in accepted_methods:
method_data = v.get(meth, {})
resource = wrap(k, method_data, meth, v)
resource_type_objects.append(resource)
else:
meth = None
resource = wrap(k, {}, meth, v)
resource_type_objects.append(resource)
return resource_type_objects or None
| 5,347,764 |
def plot_roc(y_test, y_score, title=None,n_classes=3,lw=2):
"""
plot roc for classification y_score of y_test
Parameters
----------
y_test: true class for test set
y_score: classification score
title: to save as eps
Default: None (only show but not save it)
n_classes: int
The number of classes in total
lw: int
plot line width
"""
# Compute ROC curve and ROC area for each class
fpr = dict()
tpr = dict()
roc_auc = dict()
for i in range(n_classes):
fpr[i], tpr[i], _ = roc_curve(y_test[:, i], y_score[:, i])
roc_auc[i] = auc(fpr[i], tpr[i])
# Compute micro-average ROC curve and ROC area
fpr["micro"], tpr["micro"], _ = roc_curve(y_test.ravel(), y_score.ravel())
roc_auc["micro"] = auc(fpr["micro"], tpr["micro"])
# Compute macro-average ROC curve and ROC area
# First aggregate all false positive rates
all_fpr = np.unique(np.concatenate([fpr[i] for i in range(n_classes)]))
# Then interpolate all ROC curves at this points
mean_tpr = np.zeros_like(all_fpr)
for i in range(n_classes):
mean_tpr += np.interp(all_fpr, fpr[i], tpr[i])
# Finally average it and compute AUC
mean_tpr /= n_classes
fpr["macro"] = all_fpr
tpr["macro"] = mean_tpr
roc_auc["macro"] = auc(fpr["macro"], tpr["macro"])
# Plot all ROC curves
plt.figure()
plt.plot(fpr["micro"], tpr["micro"],
label='micro-average ROC ({0:0.2f})'
''.format(roc_auc["micro"]),
color='deeppink', linestyle=':', linewidth=4)
plt.plot(fpr["macro"], tpr["macro"],
label='macro-average ROC ({0:0.2f})'
''.format(roc_auc["macro"]),
color='navy', linestyle=':', linewidth=4)
colors = cycle(['aqua', 'darkorange', 'cornflowerblue'])
for i, color in zip(range(n_classes), colors):
classes = {0: "BPD", 1: "HC", 2: "BD"}[i]
plt.plot(fpr[i], tpr[i], color=color, lw=lw,
label='ROC of ' +classes+' ({1:0.2f})'
''.format(i,roc_auc[i]))
plt.plot([0, 1], [0, 1], 'k--', lw=lw)
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
# plt.title('Receiver operating characteristic for '+title)
# plt.legend(loc="lower right", bbox_to_anchor=(1.8, 0.5))
plt.legend(loc="lower right")
if title==None:
plt.show()
else:
plt.savefig('ROC_for_'+title+'.eps')
| 5,347,765 |
def verify_inclusion(position, R, geometries_nodes, allNodes, ax):
"""
Quick function, with drawing, to verify if a particle is inside or not.
"""
h = np.linspace(0, 1, 100)
ax.plot(position[0] + 0.5*R*np.cos(2*np.pi*h),
position[1] + 0.5*R*np.sin(2*np.pi*h),
color='cyan', linestyle='solid')
ax.plot(position[0], position[1], marker='.', color='cyan')
draw_geometries(geometries_nodes, allNodes, ax)
inside = is_in_geometries(position, R, geometries_nodes, allNodes)
print('Q: is it inside?\nA: %s' % inside[0])
print('Q: in which geometry?\nA: %s' % inside[1])
| 5,347,766 |
def permute_images(images, permutation_index):
"""
Permute pixels in all images.
:param images: numpy array of images
:param permutation_index: index of the permutation (#permutations = #tasks - 1)
:return: numpy array of permuted images (of the same size)
"""
# seed = np.random.randint(low=4294967295, dtype=np.uint32) # make a random seed for all images in an array
# baseline and superposition have the same permutation of images for the corresponding task
global seeds
seed = seeds[permutation_index] # the same permutation each run for the first, second, ... task
return np.array([permute_pixels(im, seed) for im in images])
| 5,347,767 |
def generate_random_urdf_box(save_path, how_many):
"""
generate 10 boxes - to change the limits of dimensions modify $rand_box_shape$
source: http://wiki.ros.org/urdf/Tutorials/Adding%20Physical%20and%20Collision%20Properties%20to%20a%20URDF%20Model
"""
scale = 0.03
cube_height = 0.03
gripper_width_limit = 0.055
for i in range(how_many):
rand_box_shape = (random.uniform(0.01, gripper_width_limit), random.uniform(0.01, gripper_width_limit), cube_height)
size_dict = {"size": str(rand_box_shape[0]) + " " + str(rand_box_shape[1]) + " " + str(rand_box_shape[2])}
print("size_dict", size_dict)
color_dict = {"rgba": str(random.uniform(0, 1)) + " " + str(random.uniform(0, 1)) + " " + str(random.uniform(0, 1))+ " 1"}
robot_et = et.Element('robot', {'name': 'random_box'})
# link name="box"
link_et = et.SubElement(robot_et, 'link', {'name': 'box'})
visual_et = et.SubElement(link_et, 'visual')
geometry_et = et.SubElement(visual_et, 'geometry')
box_et = et.SubElement(geometry_et, 'box', size_dict )
material_et = et.SubElement(visual_et, 'material', {"name": "color"})
color_et = et.SubElement(material_et, 'color', color_dict)
collision_et = et.SubElement(link_et, 'collision')
geometry_col_et = et.SubElement(collision_et, 'geometry')
box_col_et = et.SubElement(geometry_col_et, 'box', size_dict)
tree = et.ElementTree(robot_et)
tree.write(save_path + 'box_' + str(i) + '.urdf', pretty_print=True, xml_declaration=True, encoding="utf-8")
| 5,347,768 |
def atanh(x) -> float:
"""
Return the inverse hyperbolic tangent of ``x``.
"""
...
| 5,347,769 |
def new_user(request, id):
"""
Page for creating users after registering a person.
person must be either volunteer, NGO employee or Government
"""
msg = ''
password = ''
try:
person_id = int(id)
# Get Name
user = RegPerson.objects.get(pk=person_id)
personfname = user.first_name
personsname = user.surname
names = user.full_name
if request.method == 'POST':
form = NewUser(user, data=request.POST)
username = request.POST.get('username')
password1 = request.POST.get('password1')
password2 = request.POST.get('password2')
# resolve existing account
user_exists = AppUser.objects.filter(reg_person=person_id)
if user_exists:
msg = 'Person ({} {}) has an existing user account.'.format(
personfname, personsname)
messages.add_message(request, messages.INFO, msg)
return HttpResponseRedirect(reverse(persons_search))
if password1 == password2:
password = password1
else:
msg = 'Passwords do not match!'
messages.add_message(request, messages.INFO, msg)
form = NewUser(user, data=request.POST)
return render(request, 'registry/new_user.html',
{'form': form}, )
# validate username if__exists
username_exists = AppUser.objects.filter(username__iexact=username)
if username_exists:
msg = 'Username ({}) is taken. Pick another one.'.format(
username)
messages.add_message(request, messages.INFO, msg)
form = NewUser(user, data=request.POST)
return render(request, 'registry/new_user.html',
{'form': form}, )
else:
# Create User
user = AppUser.objects.create_user(username=username,
reg_person=person_id,
password=password)
if user:
user.groups.add(Group.objects.get(
name='Standard logged in'))
# Capture msg & op status
msg = 'User ({}) save success.'.format(username)
messages.add_message(request, messages.INFO, msg)
return HttpResponseRedirect(
'%s?id=%d' % (reverse(persons_search), int(person_id)))
else:
form = NewUser(user)
return render(request, 'registry/new_user.html',
{'names': names, 'form': form}, )
except Exception as e:
msg = 'Error - ({}) '.format(str(e))
messages.add_message(request, messages.ERROR, msg)
return HttpResponseRedirect(reverse(persons_search))
| 5,347,770 |
def transformAndSaveResultsAsTiff(filename, segmentation, predictions, transform):
""" Saves results as tiff:
segmentation : name of the segmentation
filename: full filename with the extention
image_file : file of the size for the classifiacation
"""
csvfilename, npz_filename, image_file_green, _, _ = get_files(segmentation)
# get file info
im_info = tif.TiffFile(image_file_green)
series = im_info.series[0]
# create results as volume
centroids_tiff, labels = get_centroids_and_labels(csvfilename, npz_filename)
centroids_tiff = applyTransform(centroids_tiff, transform)
labels = labels.astype(bool)
# all available choises
# volume = np.zeros(series.shape,dtype = np.int16)
# volume = make_cube(volume, centroids_tiff, 1)
# tif.imsave(f"{filename}_all_local_max.tif", volume, photometric='minisblack')
# print("did all available choises")
# human
# volume = np.zeros(series.shape,dtype = np.int16)
# volume = make_cube(volume, centroids_tiff[labels], 1)
# tif.imsave(f"{filename}_human_grader.tif", volume, photometric='minisblack')
# print("did human")
# all predictions
volume = np.zeros(series.shape, dtype=np.int16)
volume = make_cube(volume, centroids_tiff[predictions], 1)
tif.imsave(f"{filename}_all.tif", volume, photometric='minisblack')
print("did all predictions")
# all correct predictions
correct = np.logical_and(predictions, labels)
volume = np.zeros(series.shape, dtype=np.int16)
volume = make_cube(volume, centroids_tiff[correct], 1)
tif.imsave(f"{filename}_correct.tif", volume, photometric='minisblack')
print("did all correct predictions")
# false positive
false_pos = np.logical_and(predictions, np.logical_not(labels))
volume = np.zeros(series.shape, dtype=np.int16)
volume = make_cube(volume, centroids_tiff[false_pos], 1)
tif.imsave(f"{filename}_false_pos.tif", volume, photometric='minisblack')
print("did all false positives")
# false negative
false_neg = np.logical_and(labels, np.logical_not(predictions))
volume = np.zeros(series.shape, dtype=np.int16)
volume = make_cube(volume, centroids_tiff[false_neg], 1)
tif.imsave(f"{filename}_false_neg.tif", volume, photometric='minisblack')
print("did all false negatives")
# TODO : make similar only for the classifier and humans separately
| 5,347,771 |
def multi_conv(func=None, options=None):
"""A function decorator for generating multi-convolution operations.
Multi-convolutions allow for a set of data-independent convolutions to be
executed in parallel. Executing convolutions in parallel can lead to an
increase in the data throughput.
The ``multi_conv`` function decorator is a convenient way to generate
multi-convolutions - it detects all the convolution operations inside of the
decorated function and executes them in parallel.
For example:
.. code-block:: python
from tensorflow import keras
from tensorflow.python import ipu
@ipu.nn_ops.multi_conv
def convs(x, y, z):
x = keras.layers.DepthwiseConv2D(8, 2, depth_multiplier=2)(x)
y = keras.layers.DepthwiseConv2D(16, 4, depth_multiplier=2)(y)
z = keras.layers.Conv2D(8, 3)(z)
return x, y, z
Will detect and execute the three convolutions ``x``, ``y`` and ``z`` in
parallel.
Note that any operations which are not convolutions, such as bias add
operations, will be executed in the same way as if they were not inside of a
``multi_conv`` decorated function.
It is also possible to set PopLibs multi-convolution options using this
decorator.
For example:
.. code-block:: python
from tensorflow import keras
from tensorflow.python import ipu
@ipu.nn_ops.multi_conv(options={"perConvReservedTiles":"50"})
def convs(x, y, z):
x = keras.layers.DepthwiseConv2D(8, 2, depth_multiplier=2)(x)
y = keras.layers.DepthwiseConv2D(16, 4, depth_multiplier=2)(y)
z = keras.layers.Conv2D(8, 3)(z)
return x, y, z
See the PopLibs documention for the list of all available flags.
Note that these options will also be applied to the gradient operations
generated during backpropagation.
Args:
func: A python function which takes a list of positional arguments only. All
the arguments must be `tf.Tensor`-like objects, or be convertible to them.
The function provided must return at least one `tf.Tensor`-like object.
options: A dictionary of Poplar option flags for multi-convolution. See the
multi-convolution PopLibs documentation for available flags.
"""
def decorated(inner_func):
def multi_conv_wrapper(*args):
inner_options = options if options else {}
if not isinstance(inner_options, dict):
raise TypeError(
"Expected the multi_conv `options` to be a `dict`, but got %s "
"instead." % (str(inner_options)))
option_proto = option_flag_pb2.PoplarOptionFlags()
for key, value in inner_options.items():
flag = option_proto.flags.add()
flag.option = key
flag.value = value
def func_wrapper(*args):
with ops.get_default_graph().as_default() as g:
with g.gradient_override_map(_gradient_override_map):
return inner_func(*args)
args = functional_ops._convert_to_list(args) # pylint: disable=protected-access
with ops.name_scope("multi_conv") as scope:
func_graph, captured_args = functional_ops._compile_function( # pylint: disable=protected-access
func_wrapper,
args,
scope, [],
allow_external_captures=True)
with ops.control_dependencies(list(func_graph.control_captures)):
outputs = gen_functional_ops.multi_conv(
captured_args,
to_apply=util.create_new_tf_function(func_graph),
Tout=func_graph.output_types,
output_shapes=func_graph.output_shapes,
option_flags=json_format.MessageToJson(option_proto))
return func_graph_module.pack_sequence_as(func_graph.structured_outputs,
outputs)
return multi_conv_wrapper
if func is not None:
return decorated(func)
return decorated
| 5,347,772 |
def use_fixture(obj, name, force):
"""Switch to fixture with given name."""
if not force:
click.confirm(
f'Are you sure you want to change the database to fixture "{name}"?',
abort=True,
)
api = lib.get_api(**obj)
poll, NoTaskResultYet = api.dbctl_action("use_fixture", dict(name=name))
lib.log(f"Requested change to fixture {name}.")
lib.log("Please verify by other means (e.g. look at the logs).")
| 5,347,773 |
def hi_joseangel():
""" Hi Jose Angel Function """
return "hi joseangel!"
| 5,347,774 |
def test_as_meg_type_evoked():
"""Test interpolation of data on to virtual channels."""
# validation tests
raw = read_raw_fif(raw_fname)
events = mne.find_events(raw)
picks = pick_types(raw.info, meg=True, eeg=True, stim=True,
ecg=True, eog=True, include=['STI 014'],
exclude='bads')
epochs = mne.Epochs(raw, events, picks=picks)
evoked = epochs.average()
with pytest.raises(ValueError, match="Invalid value for the 'ch_type'"):
evoked.as_type('meg')
with pytest.raises(ValueError, match="Invalid value for the 'ch_type'"):
evoked.copy().pick_types(meg='grad').as_type('meg')
# channel names
ch_names = evoked.info['ch_names']
virt_evoked = evoked.copy().pick_channels(ch_names=ch_names[:10:1])
virt_evoked.info.normalize_proj()
virt_evoked = virt_evoked.as_type('mag')
assert (all(ch.endswith('_v') for ch in virt_evoked.info['ch_names']))
# pick from and to channels
evoked_from = evoked.copy().pick_channels(ch_names=ch_names[2:10:3])
evoked_to = evoked.copy().pick_channels(ch_names=ch_names[0:10:3])
info_from, info_to = evoked_from.info, evoked_to.info
# set up things
args1, args2 = _setup_args(info_from), _setup_args(info_to)
args1.update(coils2=args2['coils1'])
args2.update(coils2=args1['coils1'])
# test cross dots
cross_dots1 = _do_cross_dots(**args1)
cross_dots2 = _do_cross_dots(**args2)
assert_array_almost_equal(cross_dots1, cross_dots2.T)
# correlation test
evoked = evoked.pick_channels(ch_names=ch_names[:10:]).copy()
data1 = evoked.pick_types(meg='grad').data.ravel()
data2 = evoked.as_type('grad').data.ravel()
assert (np.corrcoef(data1, data2)[0, 1] > 0.95)
# Do it with epochs
virt_epochs = \
epochs.copy().load_data().pick_channels(ch_names=ch_names[:10:1])
virt_epochs.info.normalize_proj()
virt_epochs = virt_epochs.as_type('mag')
assert (all(ch.endswith('_v') for ch in virt_epochs.info['ch_names']))
assert_allclose(virt_epochs.get_data().mean(0), virt_evoked.data)
| 5,347,775 |
def main():
"""Main function call to test the shortest_path function."""
test(shortest_path)
| 5,347,776 |
def received_date_date(soup):
"""
Find the received date received_date_date in human readable form
"""
received_date = get_history_date(soup, date_type = "received")
date_string = None
try:
date_string = time.strftime("%B %d, %Y", received_date)
except(TypeError):
# Date did not convert
pass
return date_string
| 5,347,777 |
def is_on(hass, entity_id):
""" Returns if the group state is in its ON-state. """
state = hass.states.get(entity_id)
if state:
group_type = _get_group_type(state.state)
# If we found a group_type, compare to ON-state
return group_type and state.state == _GROUP_TYPES[group_type][0]
return False
| 5,347,778 |
def pytest_report_header(config, startdir):
"""return a string to be displayed as header info for terminal reporting."""
capabilities = config.getoption('capabilities')
if capabilities:
return 'capabilities: {0}'.format(capabilities)
| 5,347,779 |
def chessboard_distance(x_a, y_a, x_b, y_b):
"""
Compute the rectilinear distance between
point (x_a,y_a) and (x_b, y_b)
"""
return max(abs(x_b-x_a),abs(y_b-y_a))
| 5,347,780 |
def bayesnet():
"""
References:
https://class.coursera.org/pgm-003/lecture/17
http://www.cs.ubc.ca/~murphyk/Bayes/bnintro.html
http://www3.cs.stonybrook.edu/~sael/teaching/cse537/Slides/chapter14d_BP.pdf
http://www.cse.unsw.edu.au/~cs9417ml/Bayes/Pages/PearlPropagation.html
https://github.com/pgmpy/pgmpy.git
http://pgmpy.readthedocs.org/en/latest/
http://nipy.bic.berkeley.edu:5000/download/11
"""
# import operator as op
# # Enumerate all possible events
# varcard_list = list(map(op.attrgetter('variable_card'), cpd_list))
# _esdat = list(ut.iprod(*map(range, varcard_list)))
# _escol = list(map(op.attrgetter('variable'), cpd_list))
# event_space = pd.DataFrame(_esdat, columns=_escol)
# # Custom compression of event space to inspect a specific graph
# def compress_space_flags(event_space, var1, var2, var3, cmp12_):
# """
# var1, var2, cmp_ = 'Lj', 'Lk', op.eq
# """
# import vtool as vt
# data = event_space
# other_cols = ut.setdiff_ordered(data.columns.tolist(), [var1, var2, var3])
# case_flags12 = cmp12_(data[var1], data[var2]).values
# # case_flags23 = cmp23_(data[var2], data[var3]).values
# # case_flags = np.logical_and(case_flags12, case_flags23)
# case_flags = case_flags12
# case_flags = case_flags.astype(np.int64)
# subspace = np.hstack((case_flags[:, None], data[other_cols].values))
# sel_ = vt.unique_row_indexes(subspace)
# flags = np.logical_and(mask, case_flags)
# return flags
# # Build special cases
# case_same = event_space.loc[compress_space_flags(event_space, 'Li', 'Lj', 'Lk', op.eq)]
# case_diff = event_space.loc[compress_space_flags(event_space, 'Li', 'Lj', 'Lk', op.ne)]
# special_cases = [
# case_same,
# case_diff,
# ]
from pgmpy.factors import TabularCPD
from pgmpy.models import BayesianModel
import pandas as pd
from pgmpy.inference import BeliefPropagation # NOQA
from pgmpy.inference import VariableElimination # NOQA
name_nice = ['n1', 'n2', 'n3']
score_nice = ['low', 'high']
match_nice = ['diff', 'same']
num_names = len(name_nice)
num_scores = len(score_nice)
nid_basis = list(range(num_names))
score_basis = list(range(num_scores))
semtype2_nice = {
'score': score_nice,
'name': name_nice,
'match': match_nice,
}
var2_cpd = {
}
globals()['semtype2_nice'] = semtype2_nice
globals()['var2_cpd'] = var2_cpd
name_combo = np.array(list(ut.iprod(nid_basis, nid_basis)))
combo_is_same = name_combo.T[0] == name_combo.T[1]
def get_expected_scores_prob(level1, level2):
part1 = combo_is_same * level1
part2 = (1 - combo_is_same) * (1 - (level2))
expected_scores_level = part1 + part2
return expected_scores_level
# def make_cpd():
def name_cpd(aid):
from pgmpy.factors import TabularCPD
cpd = TabularCPD(
variable='N' + aid,
variable_card=num_names,
values=[[1.0 / num_names] * num_names])
cpd.semtype = 'name'
return cpd
name_cpds = [name_cpd('i'), name_cpd('j'), name_cpd('k')]
var2_cpd.update(dict(zip([cpd.variable for cpd in name_cpds], name_cpds)))
if True:
num_same_diff = 2
samediff_measure = np.array([
# get_expected_scores_prob(.12, .2),
# get_expected_scores_prob(.88, .8),
get_expected_scores_prob(0, 0),
get_expected_scores_prob(1, 1),
])
samediff_vals = (samediff_measure / samediff_measure.sum(axis=0)).tolist()
def samediff_cpd(aid1, aid2):
cpd = TabularCPD(
variable='A' + aid1 + aid2,
variable_card=num_same_diff,
values=samediff_vals,
evidence=['N' + aid1, 'N' + aid2], # [::-1],
evidence_card=[num_names, num_names]) # [::-1])
cpd.semtype = 'match'
return cpd
samediff_cpds = [samediff_cpd('i', 'j'), samediff_cpd('j', 'k'), samediff_cpd('k', 'i')]
var2_cpd.update(dict(zip([cpd.variable for cpd in samediff_cpds], samediff_cpds)))
if True:
def score_cpd(aid1, aid2):
semtype = 'score'
evidence = ['A' + aid1 + aid2, 'N' + aid1, 'N' + aid2]
evidence_cpds = [var2_cpd[key] for key in evidence]
evidence_nice = [semtype2_nice[cpd.semtype] for cpd in evidence_cpds]
evidence_card = list(map(len, evidence_nice))
evidence_states = list(ut.iprod(*evidence_nice))
variable_basis = semtype2_nice[semtype]
variable_values = []
for mystate in variable_basis:
row = []
for state in evidence_states:
if state[0] == state[1]:
if state[2] == 'same':
val = .2 if mystate == 'low' else .8
else:
val = 1
# val = .5 if mystate == 'low' else .5
elif state[0] != state[1]:
if state[2] == 'same':
val = .5 if mystate == 'low' else .5
else:
val = 1
# val = .9 if mystate == 'low' else .1
row.append(val)
variable_values.append(row)
cpd = TabularCPD(
variable='S' + aid1 + aid2,
variable_card=len(variable_basis),
values=variable_values,
evidence=evidence, # [::-1],
evidence_card=evidence_card) # [::-1])
cpd.semtype = semtype
return cpd
else:
score_values = [
[.8, .1],
[.2, .9],
]
def score_cpd(aid1, aid2):
cpd = TabularCPD(
variable='S' + aid1 + aid2,
variable_card=num_scores,
values=score_values,
evidence=['A' + aid1 + aid2], # [::-1],
evidence_card=[num_same_diff]) # [::-1])
cpd.semtype = 'score'
return cpd
score_cpds = [score_cpd('i', 'j'), score_cpd('j', 'k')]
cpd_list = name_cpds + score_cpds + samediff_cpds
else:
score_measure = np.array([get_expected_scores_prob(level1, level2)
for level1, level2 in
zip(np.linspace(.1, .9, num_scores),
np.linspace(.2, .8, num_scores))])
score_values = (score_measure / score_measure.sum(axis=0)).tolist()
def score_cpd(aid1, aid2):
cpd = TabularCPD(
variable='S' + aid1 + aid2,
variable_card=num_scores,
values=score_values,
evidence=['N' + aid1, 'N' + aid2],
evidence_card=[num_names, num_names])
cpd.semtype = 'score'
return cpd
score_cpds = [score_cpd('i', 'j'), score_cpd('j', 'k')]
cpd_list = name_cpds + score_cpds
pass
input_graph = []
for cpd in cpd_list:
if cpd.evidence is not None:
for evar in cpd.evidence:
input_graph.append((evar, cpd.variable))
name_model = BayesianModel(input_graph)
name_model.add_cpds(*cpd_list)
var2_cpd.update(dict(zip([cpd.variable for cpd in cpd_list], cpd_list)))
globals()['var2_cpd'] = var2_cpd
varnames = [cpd.variable for cpd in cpd_list]
# --- PRINT CPDS ---
cpd = score_cpds[0]
def print_cpd(cpd):
print('CPT: %r' % (cpd,))
index = semtype2_nice[cpd.semtype]
if cpd.evidence is None:
columns = ['None']
else:
basis_lists = [semtype2_nice[var2_cpd[ename].semtype] for ename in cpd.evidence]
columns = [','.join(x) for x in ut.iprod(*basis_lists)]
data = cpd.get_cpd()
print(pd.DataFrame(data, index=index, columns=columns))
for cpd in name_model.get_cpds():
print('----')
print(cpd._str('phi'))
print_cpd(cpd)
# --- INFERENCE ---
Ni = name_cpds[0]
event_space_combos = {}
event_space_combos[Ni.variable] = 0 # Set ni to always be Fred
for cpd in cpd_list:
if cpd.semtype == 'score':
event_space_combos[cpd.variable] = list(range(cpd.variable_card))
evidence_dict = ut.all_dict_combinations(event_space_combos)
# Query about name of annotation k given different event space params
def pretty_evidence(evidence):
return [key + '=' + str(semtype2_nice[var2_cpd[key].semtype][val])
for key, val in evidence.items()]
def print_factor(factor):
row_cards = factor.cardinality
row_vars = factor.variables
values = factor.values.reshape(np.prod(row_cards), 1).flatten()
# col_cards = 1
# col_vars = ['']
basis_lists = list(zip(*list(ut.iprod(*[range(c) for c in row_cards]))))
nice_basis_lists = []
for varname, basis in zip(row_vars, basis_lists):
cpd = var2_cpd[varname]
_nice_basis = ut.take(semtype2_nice[cpd.semtype], basis)
nice_basis = ['%s=%s' % (varname, val) for val in _nice_basis]
nice_basis_lists.append(nice_basis)
row_lbls = [', '.join(sorted(x)) for x in zip(*nice_basis_lists)]
print(ut.repr3(dict(zip(row_lbls, values)), precision=3, align=True, key_order_metric='-val'))
# name_belief = BeliefPropagation(name_model)
name_belief = VariableElimination(name_model)
import pgmpy
import six # NOQA
def try_query(evidence):
print('--------')
query_vars = ut.setdiff_ordered(varnames, list(evidence.keys()))
evidence_str = ', '.join(pretty_evidence(evidence))
probs = name_belief.query(query_vars, evidence)
factor_list = probs.values()
joint_factor = pgmpy.factors.factor_product(*factor_list)
print('P(' + ', '.join(query_vars) + ' | ' + evidence_str + ')')
# print(six.text_type(joint_factor))
factor = joint_factor # NOQA
# print_factor(factor)
# import utool as ut
print(ut.hz_str([(f._str(phi_or_p='phi')) for f in factor_list]))
for evidence in evidence_dict:
try_query(evidence)
evidence = {'Aij': 1, 'Ajk': 1, 'Aki': 1, 'Ni': 0}
try_query(evidence)
evidence = {'Aij': 0, 'Ajk': 0, 'Aki': 0, 'Ni': 0}
try_query(evidence)
globals()['score_nice'] = score_nice
globals()['name_nice'] = name_nice
globals()['score_basis'] = score_basis
globals()['nid_basis'] = nid_basis
print('Independencies')
print(name_model.get_independencies())
print(name_model.local_independencies([Ni.variable]))
# name_belief = BeliefPropagation(name_model)
# # name_belief = VariableElimination(name_model)
# for case in special_cases:
# test_data = case.drop('Lk', axis=1)
# test_data = test_data.reset_index(drop=True)
# print('----')
# for i in range(test_data.shape[0]):
# evidence = test_data.loc[i].to_dict()
# probs = name_belief.query(['Lk'], evidence)
# factor = probs['Lk']
# probs = factor.values
# evidence_ = evidence.copy()
# evidence_['Li'] = name_nice[evidence['Li']]
# evidence_['Lj'] = name_nice[evidence['Lj']]
# evidence_['Sij'] = score_nice[evidence['Sij']]
# evidence_['Sjk'] = score_nice[evidence['Sjk']]
# nice2_prob = ut.odict(zip(name_nice, probs.tolist()))
# ut.print_python_code('P(Lk | {evidence}) = {cpt}'.format(
# evidence=(ut.repr2(evidence_, explicit=True, nobraces=True, strvals=True)),
# cpt=ut.repr3(nice2_prob, precision=3, align=True, key_order_metric='-val')
# ))
# for case in special_cases:
# test_data = case.drop('Lk', axis=1)
# test_data = test_data.drop('Lj', axis=1)
# test_data = test_data.reset_index(drop=True)
# print('----')
# for i in range(test_data.shape[0]):
# evidence = test_data.loc[i].to_dict()
# query_vars = ['Lk', 'Lj']
# probs = name_belief.query(query_vars, evidence)
# for queryvar in query_vars:
# factor = probs[queryvar]
# print(factor._str('phi'))
# probs = factor.values
# evidence_ = evidence.copy()
# evidence_['Li'] = name_nice[evidence['Li']]
# evidence_['Sij'] = score_nice[evidence['Sij']]
# evidence_['Sjk'] = score_nice[evidence['Sjk']]
# nice2_prob = ut.odict(zip([queryvar + '=' + x for x in name_nice], probs.tolist()))
# ut.print_python_code('P({queryvar} | {evidence}) = {cpt}'.format(
# query_var=query_var,
# evidence=(ut.repr2(evidence_, explicit=True, nobraces=True, strvals=True)),
# cpt=ut.repr3(nice2_prob, precision=3, align=True, key_order_metric='-val')
# ))
# _ draw model
import plottool as pt
import networkx as netx
fig = pt.figure() # NOQA
fig.clf()
ax = pt.gca()
netx_nodes = [(node, {}) for node in name_model.nodes()]
netx_edges = [(etup[0], etup[1], {}) for etup in name_model.edges()]
netx_graph = netx.DiGraph()
netx_graph.add_nodes_from(netx_nodes)
netx_graph.add_edges_from(netx_edges)
# pos = netx.graphviz_layout(netx_graph)
pos = netx.pydot_layout(netx_graph, prog='dot')
netx.draw(netx_graph, pos=pos, ax=ax, with_labels=True)
pt.plt.savefig('foo.png')
ut.startfile('foo.png')
| 5,347,781 |
def safe_makedirs(directory, mode=0o777):
"""Create a directory and all its parent directories, unless it already
exists.
"""
if not os.path.isdir(directory):
os.makedirs(directory, mode)
| 5,347,782 |
def command(task_id, tail, wip, limit):
"""
Use this command to show a task or all the tasks.
$ trackmywork show
id;time;project;category;links;started_at;finished_at
1;Starting a new task;2h;trackmywork;personal;;2018-08-11 14:41:39.584405;
2;Starting a second task;2h;trackmywork;personal;;2018-08-11 14:41:39.584405;
$ trackmywork show 1
id;time;project;category;links;started_at;finished_at
1;Starting a new task;2h;trackmywork;personal;;2018-08-11 14:41:39.584405;
$ trackmywork show --tail --limit 1
id;time;project;category;links;started_at;finished_at
2;Starting a new task;2h;trackmywork;personal;;2018-08-11 14:41:39.584405;
"""
if task_id:
task = storage.get_by_id(task_id)
if not task:
click.echo(f"Task {task_id} not found.")
sys.exit(1)
tasks = [task]
else:
tasks = storage.all(limit=limit, reverse=tail, wip=wip)
print_header()
for task in tasks:
show_task(task)
| 5,347,783 |
def sigma_R(sim, Pk=None, z=None, non_lin=False):
""" return amplitude of density fluctuations
if given Pk -- C++ class Extrap_Pk or Extrap_Pk_Nl -- computes its sigma_R.
if given redshift, computes linear or non-linear (emulator) amplitude of density fluctuations """
sigma = fs.Data_Vec_2()
if Pk: # compute amplitude of density fluctuations from given continuous power spectrum
fs.gen_sigma_binned_gsl_qawf(sim, Pk, sigma)
elif z is not None: # compute (non-)linear amplitude of density fluctuations
a = 1./(1.+z) if z != 'init' else 1.0
if non_lin:
fs.gen_sigma_func_binned_gsl_qawf_lin(sim, a, sigma)
else:
fs.gen_sigma_func_binned_gsl_qawf_nl(sim, a, sigma)
else:
raise KeyError("Function 'sigma_R' called without arguments.")
return get_ndarray(sigma)
| 5,347,784 |
def test_displays_all_error_messages():
"""By default, ParseFixer stops on errors and outputs a message
listing all encountered errors."""
expected_error_msg = dedent(
"""\
Stopped parsing after 2 errors in table 'farm_cols1' with messages:
Duplicate column 'flt' at position 4 in table 'farm_cols1'.
Duplicate column 'flt' at position 5 in table 'farm_cols1'."""
)
with raises(InputError) as input_error:
blocks = list(read_csv(input_dir() / "cols1.csv"))
msg = input_error.value.args[0].issue # Avoid repr escaping
assert expected_error_msg == str(msg)
| 5,347,785 |
def session_ended_request_handler(handler_input):
"""Handler for Session End."""
# type: (HandlerInput) -> Response
logger.info("Entering AMAZON.SessionEndedRequest")
save_data(handler_input)
return handler_input.response_builder.response
| 5,347,786 |
def _getTestSuite(testFiles):
"""
Loads unit tests recursively from beneath the current directory.
Inputs: testFiles - If non-empty, a list of unit tests to selectively run.
Outputs: A unittest.TestSuite object containing the unit tests to run.
"""
loader = unittest.TestLoader()
if testFiles:
return loader.loadTestsFromNames([".".join(TEST_DIR, testFile) for testFile in testFiles])
return loader.discover(TEST_DIR)
| 5,347,787 |
def stratifiedsmooth2stratifiedwavy_c(rho_gas, rho_liq, vel_gas, d_m, beta, mu_liq, mu_gas):
"""
function for construction of boundary transition from stratified-smooth to stratified-wavy structure
resulting from the "wind" effect
:param rho_gas: gas density
:param rho_liq: liquid density
:param vel_gas: superficial gas velocity
:param d_m: pipe diameter
:param beta: angle of inclination from the horizontal
:param mu_liq: liquid viscosity
:param mu_gas: gas viscosity
:return: superficial liquid velocity
"""
froude_number = (rho_gas / (rho_liq - rho_gas)) ** 0.5 * vel_gas / (d_m * uc.g * np.cos(beta * uc.pi / 180)) ** 0.5
vel_liq_0 = 0.0000001
def equation2solve(vel_liq):
re_sl = reynolds_number(rho_liq, vel_liq, d_m, mu_liq)
k = froude_number * re_sl ** 0.5
# k = froude_number ** 2 * re_sl
x = parameter_x(d_m, rho_liq, rho_gas, mu_liq, mu_gas, vel_gas, vel_liq)
y = parameter_y(d_m, rho_liq, rho_gas, mu_gas, vel_gas, beta)
h_l = combined_momentum_equation(x, y, d_m, rho_liq, rho_gas, mu_liq, mu_gas, vel_gas, vel_liq)
variables = dimensionless_variables(h_l)
v_g = variables[6]
s = 0.01
v_l = variables[5]
equation = k - 2 / (v_l ** 0.5 * v_g * s ** 0.5)
return equation
vel_liq = opt.fsolve(equation2solve, np.array(vel_liq_0))
return vel_liq
| 5,347,788 |
def box(
data_frame=None,
x=None,
y=None,
color=None,
facet_row=None,
facet_row_weights=None,
facet_col=None,
facet_col_weights=None,
facet_col_wrap=0,
facet_row_spacing=None,
facet_col_spacing=None,
hover_name=None,
hover_data=None,
custom_data=None,
animation_frame=None,
animation_group=None,
category_orders=None,
labels=None,
color_discrete_sequence=None,
color_discrete_map=None,
orientation=None,
boxmode=None,
log_x=False,
log_y=False,
range_x=None,
range_y=None,
points=None,
notched=False,
title=None,
template=None,
width=None,
height=None,
):
"""
In a box plot, rows of `data_frame` are grouped together into a
box-and-whisker mark to visualize their distribution.
Each box spans from quartile 1 (Q1) to quartile 3 (Q3). The second
quartile (Q2) is marked by a line inside the box. By default, the
whiskers correspond to the box' edges +/- 1.5 times the interquartile
range (IQR: Q3-Q1), see "points" for other options.
"""
return make_figure(
args=locals(),
constructor=go.Box,
trace_patch=dict(boxpoints=points, notched=notched, x0=" ", y0=" "),
layout_patch=dict(boxmode=boxmode),
)
| 5,347,789 |
def main():
"""Run script."""
tic = perf_counter()
# fetch buzz data, create DataFrame, and clean it
current_df = create_df()
add_team_position_column(current_df)
clean_player_column(current_df)
add_pct_column(current_df)
add_timestamp_column(current_df)
# dump DataFrame to MySQL
if has_table('yahoo_buzz'):
# add data only if it hasn't been added already
last_df = create_df_from_last_dump()
if not compare_dfs(current_df, last_df):
dump_current_df_to_mysql(current_df)
else:
# create table and dump if 'yahoo_buzz' doesn't exist
dump_current_df_to_mysql(current_df)
# send an email if a player is buzzing
buzz_df = create_todays_buzz_df()
if not buzz_df.empty:
send_email(buzz_df)
dump_buzz_player_to_mysql(buzz_df)
toc = perf_counter()
duration = (toc - tic)
print(f"{pd.Timestamp('today'):%Y-%m-%d %I-%M %p} \
Finished in {duration:0.3f} seconds ")
| 5,347,790 |
def _infer_main_columns(df, index_level='Column Name', numeric_dtypes=_numeric_dtypes):
"""
All numeric columns up-until the first non-numeric column are considered main columns.
:param df: The pd.DataFrame
:param index_level: Name of the index level of the column names. Default 'Column Name'
:param numeric_dtypes: Set of numpy.dtype containing all numeric types. Default int/float.
:returns: The names of the infered main columns
"""
columns = df.columns.get_level_values(index_level)
main_columns = []
for i,dtype in enumerate(df.dtypes):
if dtype in numeric_dtypes:
main_columns.append(columns[i])
else:
break
return main_columns
| 5,347,791 |
def index():
"""
显示首页
:return:
"""
return render_template('index.html')
| 5,347,792 |
def float2int_rz(x):
"""
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_float2int_rz.html
:param in: Argument.
:type in: float32
:rtype: int32
"""
| 5,347,793 |
def two_phase(model, config):
"""Two-phase simulation workflow."""
wea_path, datetime_stamps = get_wea(config)
smx = gen_smx(wea_path, config.smx_basis, config.mtxdir)
pdsmx = prep_2phase_pt(model, config)
vdsmx = prep_2phase_vu(model, config)
if not config.no_multiply:
calc_2phase_pt(model, datetime_stamps, pdsmx, smx, config)
calc_2phase_vu(datetime_stamps, vdsmx, smx, config)
return pdsmx, vdsmx
| 5,347,794 |
def str_with_tab(indent: int, text: str, uppercase: bool = True) -> str:
"""Create a string with ``indent`` spaces followed by ``text``."""
if uppercase:
text = text.upper()
return " " * indent + text
| 5,347,795 |
def delete(card, files=None):
"""Delete individual notefiles and their contents.
Args:
card (Notecard): The current Notecard object.
files (array): A list of Notefiles to delete.
Returns:
string: The result of the Notecard request.
"""
req = {"req": "file.delete"}
if files:
req["files"] = files
return card.Transaction(req)
| 5,347,796 |
def p_vars_1(p):
"""
vars : empty
"""
p[0] = []
| 5,347,797 |
def test_timed_info():
"""Test timed_info decorator"""
@timed_info
def target():
return "hello world"
result = target()
assert result == "hello world"
| 5,347,798 |
def get_doc_word_token_set(doc: Doc, use_lemma=False) -> Set[Token]:
"""Return the set of tokens in a document (no repetition)."""
return set([token.lemma_ if use_lemma else token.text for token in get_word_tokens(doc)])
| 5,347,799 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.